1
0
mirror of https://github.com/raseels-repos/golang-saas-starter-kit.git synced 2025-08-08 22:36:41 +02:00

Added image resizer for responsive web apps

This commit is contained in:
Lee Brown
2019-05-20 22:16:58 -05:00
23 changed files with 1458 additions and 327 deletions

View File

@ -146,3 +146,28 @@ $ curl -H "Authorization: Bearer ${TOKEN}" http://localhost:3000/v1/users
## What's Next
We are in the process of writing more documentation about this code. Classes are being finalized as part of the Ultimate series.
## AWS Permissions
Base required permissions
```
secretsmanager:CreateSecret
secretsmanager:GetSecretValue
secretsmanager:ListSecretVersionIds
secretsmanager:PutSecretValue
secretsmanager:UpdateSecret
```
If cloudfront enabled for static files
```
cloudFront:ListDistributions
```
Additional permissions required for unittests
```
secretsmanager:DeleteSecret
```

View File

@ -2,10 +2,8 @@ package main
import (
"context"
"crypto/rsa"
"encoding/json"
"expvar"
"io/ioutil"
"log"
"net/http"
_ "net/http/pprof"
@ -19,9 +17,12 @@ import (
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/db"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/flag"
itrace "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/trace"
jwt "github.com/dgrijalva/jwt-go"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/kelseyhightower/envconfig"
"go.opencensus.io/trace"
awstrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws"
)
/*
@ -49,33 +50,59 @@ func main() {
// =========================================================================
// Configuration
var cfg struct {
Env string `default:"dev" envconfig:"ENV"`
HTTP struct {
Host string `default:"0.0.0.0:3001" envconfig:"HTTP_HOST"`
DebugHost string `default:"0.0.0.0:4000" envconfig:"DEBUG_HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"WRITE_TIMEOUT"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"SHUTDOWN_TIMEOUT"`
DebugHost string `default:"0.0.0.0:4000" envconfig:"HTTP_DEBUG_HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"HTTP_READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"HTTP_WRITE_TIMEOUT"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"HTTP_SHUTDOWN_TIMEOUT"`
}
App struct {
Name string `default:"web-app" envconfig:"APP_NAME"`
}
BuildInfo struct {
CiCommitRefName string `envconfig:"CI_COMMIT_REF_NAME"`
CiCommitRefSlug string `envconfig:"CI_COMMIT_REF_SLUG"`
CiCommitSha string `envconfig:"CI_COMMIT_SHA"`
CiCommitTag string `envconfig:"CI_COMMIT_TAG"`
CiCommitTitle string `envconfig:"CI_COMMIT_TITLE"`
CiCommitDescription string `envconfig:"CI_COMMIT_DESCRIPTION"`
CiJobId string `envconfig:"CI_COMMIT_JOB_ID"`
CiJobUrl string `envconfig:"CI_COMMIT_JOB_URL"`
CiPipelineId string `envconfig:"CI_COMMIT_PIPELINE_ID"`
CiPipelineUrl string `envconfig:"CI_COMMIT_PIPELINE_URL"`
}
DB struct {
DialTimeout time.Duration `default:"5s" envconfig:"DIAL_TIMEOUT"`
Host string `default:"mongo:27017/gotraining" envconfig:"HOST"`
DialTimeout time.Duration `default:"5s" envconfig:"DB_DIAL_TIMEOUT"`
Host string `default:"mongo:27017/gotraining" envconfig:"DB_HOST"`
}
Trace struct {
Host string `default:"http://tracer:3002/v1/publish" envconfig:"HOST"`
BatchSize int `default:"1000" envconfig:"BATCH_SIZE"`
SendInterval time.Duration `default:"15s" envconfig:"SEND_INTERVAL"`
SendTimeout time.Duration `default:"500ms" envconfig:"SEND_TIMEOUT"`
Host string `default:"http://tracer:3002/v1/publish" envconfig:"TRACE_HOST"`
BatchSize int `default:"1000" envconfig:"TRACE_BATCH_SIZE"`
SendInterval time.Duration `default:"15s" envconfig:"TRACE_SEND_INTERVAL"`
SendTimeout time.Duration `default:"500ms" envconfig:"TRACE_SEND_TIMEOUT"`
}
AwsAccount struct {
AccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
SecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY"`
Region string `default:"us-east-1" envconfig:"AWS_REGION"`
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
// EC2/ECS instance roles.
UseRole bool `envconfig:"AWS_USE_ROLE"`
}
Auth struct {
KeyID string `envconfig:"KEY_ID"`
PrivateKeyFile string `default:"/app/private.pem" envconfig:"PRIVATE_KEY_FILE"`
Algorithm string `default:"RS256" envconfig:"ALGORITHM"`
AwsSecretID string `default:"auth-secret-key" envconfig:"AUTH_AWS_SECRET_ID"`
KeyExpiration time.Duration `default:"3600s" envconfig:"AUTH_KEY_EXPIRATION"`
}
}
if err := envconfig.Process("WEB_APP", &cfg); err != nil {
// !!! This prefix seems buried, if you copy and paste this main.go
// file, its likely you will forget to update this.
if err := envconfig.Process("WEB_API", &cfg); err != nil {
log.Fatalf("main : Parsing Config : %v", err)
}
@ -104,21 +131,22 @@ func main() {
log.Printf("main : Config : %v\n", string(cfgJSON))
// =========================================================================
// Find auth keys
keyContents, err := ioutil.ReadFile(cfg.Auth.PrivateKeyFile)
if err != nil {
log.Fatalf("main : Reading auth private key : %v", err)
// Init AWS Session
var awsSession *session.Session
if cfg.AwsAccount.UseRole {
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
// EC2/ECS instance roles.
awsSession = session.Must(session.NewSession())
} else {
creds := credentials.NewStaticCredentials(cfg.AwsAccount.AccessKeyID, cfg.AwsAccount.SecretAccessKey, "")
awsSession = session.New(&aws.Config{Region: aws.String(cfg.AwsAccount.Region), Credentials: creds})
}
awsSession = awstrace.WrapSession(awsSession)
key, err := jwt.ParseRSAPrivateKeyFromPEM(keyContents)
if err != nil {
log.Fatalf("main : Parsing auth private key : %v", err)
}
publicKeyLookup := auth.NewSingleKeyFunc(cfg.Auth.KeyID, key.Public().(*rsa.PublicKey))
authenticator, err := auth.NewAuthenticator(key, cfg.Auth.KeyID, cfg.Auth.Algorithm, publicKeyLookup)
// =========================================================================
// Load auth keys from AWS and init new Authenticator
authenticator, err := auth.NewAuthenticator(awsSession, cfg.Auth.AwsSecretID, time.Now().UTC(), cfg.Auth.KeyExpiration)
if err != nil {
log.Fatalf("main : Constructing authenticator : %v", err)
}

View File

@ -11,7 +11,7 @@ import (
// User represents the User API method handler set.
type Root struct {
MasterDB *db.DB
MasterDB *db.DB
Renderer web.Renderer
// ADD OTHER STATE LIKE THE LOGGER AND CONFIG HERE.
}
@ -21,5 +21,9 @@ func (u *Root) Index(ctx context.Context, w http.ResponseWriter, r *http.Request
ctx, span := trace.StartSpan(ctx, "handlers.Root.Index")
defer span.End()
return u.Renderer.Render(ctx, w, r, baseLayoutTmpl, "root-index.tmpl", web.MIMETextHTMLCharsetUTF8, http.StatusOK, nil)
data := map[string]interface{}{
"imgSizes": []int{100, 200, 300, 400, 500},
}
return u.Renderer.Render(ctx, w, r, baseLayoutTmpl, "root-index.tmpl", web.MIMETextHTMLCharsetUTF8, http.StatusOK, data)
}

View File

@ -21,14 +21,14 @@ func APP(shutdown chan os.Signal, log *log.Logger, staticDir, templateDir string
// Register health check endpoint. This route is not authenticated.
check := Check{
MasterDB: masterDB,
MasterDB: masterDB,
Renderer: renderer,
}
app.Handle("GET", "/v1/health", check.Health)
// Register user management and authentication endpoints.
u := User{
MasterDB: masterDB,
MasterDB: masterDB,
Renderer: renderer,
}
@ -38,7 +38,7 @@ func APP(shutdown chan os.Signal, log *log.Logger, staticDir, templateDir string
// Register root
r := Root{
MasterDB: masterDB,
MasterDB: masterDB,
Renderer: renderer,
}
// This route is not authenticated
@ -46,7 +46,7 @@ func APP(shutdown chan os.Signal, log *log.Logger, staticDir, templateDir string
app.Handle("GET", "/", r.Index)
// Static file server
app.Handle("GET", "/*", web.Static(staticDir,""))
app.Handle("GET", "/*", web.Static(staticDir, ""))
return app
}

View File

@ -11,7 +11,7 @@ import (
// User represents the User API method handler set.
type User struct {
MasterDB *db.DB
MasterDB *db.DB
Renderer web.Renderer
// ADD OTHER STATE LIKE THE LOGGER AND CONFIG HERE.
}

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"expvar"
"fmt"
"html/template"
"log"
"net/http"
_ "net/http/pprof"
@ -16,31 +17,27 @@ import (
"strings"
"syscall"
"time"
"html/template"
template_renderer "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web/template-renderer"
lru "github.com/hashicorp/golang-lru"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
"geeks-accelerator/oss/saas-starter-kit/example-project/cmd/web-app/handlers"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/deploy"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/flag"
img_resize "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/img-resize"
itrace "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/trace"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
template_renderer "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web/template-renderer"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/go-redis/redis"
"github.com/kelseyhightower/envconfig"
"go.opencensus.io/trace"
awstrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws"
redistrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
)
// build is the git version of this program. It is set using build flags in the makefile.
var build = "develop"
const LRU_CACHE_ITEMS = 128
var (
localCache *lru.Cache
)
func init() {
localCache, _ = lru.New(LRU_CACHE_ITEMS)
}
func main() {
// =========================================================================
@ -51,53 +48,79 @@ func main() {
// =========================================================================
// Configuration
var cfg struct {
Env string `default:"dev" envconfig:"ENV"`
Env string `default:"dev" envconfig:"ENV"`
HTTP struct {
Host string `default:"0.0.0.0:3000" envconfig:"HTTP_HOST"`
DebugHost string `default:"0.0.0.0:4000" envconfig:"DEBUG_HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"WRITE_TIMEOUT"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"SHUTDOWN_TIMEOUT"`
TemplateDir string `default:"./templates" envconfig:"TEMPLATE_DIR"`
StaticDir string `default:"./static" envconfig:"STATIC_DIR"`
Host string `default:"0.0.0.0:3000" envconfig:"HTTP_HOST"`
ReadTimeout time.Duration `default:"10s" envconfig:"HTTP_READ_TIMEOUT"`
WriteTimeout time.Duration `default:"10s" envconfig:"HTTP_WRITE_TIMEOUT"`
}
HTTPS struct {
Host string `default:"" envconfig:"HTTPS_HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"HTTPS_READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"HTTPS_WRITE_TIMEOUT"`
}
App struct {
Name string `default:"web-app" envconfig:"APP_NAME"`
StaticS3 struct {
S3Bucket string `envconfig:"APP_STATIC_S3_BUCKET"`
S3KeyPrefix string `envconfig:"APP_STATIC_S3_KEY_PREFIX"`
EnableCloudFront bool `envconfig:"APP_STATIC_S3_ENABLE_CLOUDFRONT"`
Name string `default:"web-app" envconfig:"APP_NAME"`
BaseUrl string `default:"" envconfig:"APP_BASE_URL"`
TemplateDir string `default:"./templates" envconfig:"APP_TEMPLATE_DIR"`
StaticDir string `default:"./static" envconfig:"APP_STATIC_DIR"`
StaticS3 struct {
S3Enabled bool `envconfig:"APP_STATIC_S3_ENABLED"`
S3Bucket string `envconfig:"APP_STATIC_S3_BUCKET"`
S3KeyPrefix string `default:"public/web_app/static" envconfig:"APP_STATIC_S3_KEY_PREFIX"`
CloudFrontEnabled bool `envconfig:"APP_STATIC_S3_CLOUDFRONT_ENABLED"`
ImgResizeEnabled bool `envconfig:"APP_STATIC_S3_IMG_RESIZE_ENABLED"`
}
DebugHost string `default:"0.0.0.0:4000" envconfig:"APP_DEBUG_HOST"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"APP_SHUTDOWN_TIMEOUT"`
}
BuildInfo struct {
CiCommitRefName string `envconfig:"CI_COMMIT_REF_NAME"`
CiCommitRefSlug string `envconfig:"CI_COMMIT_REF_SLUG"`
CiCommitSha string `envconfig:"CI_COMMIT_SHA"`
CiCommitTag string `envconfig:"CI_COMMIT_TAG"`
CiCommitTitle string `envconfig:"CI_COMMIT_TITLE"`
CiCommitRefName string `envconfig:"CI_COMMIT_REF_NAME"`
CiCommitRefSlug string `envconfig:"CI_COMMIT_REF_SLUG"`
CiCommitSha string `envconfig:"CI_COMMIT_SHA"`
CiCommitTag string `envconfig:"CI_COMMIT_TAG"`
CiCommitTitle string `envconfig:"CI_COMMIT_TITLE"`
CiCommitDescription string `envconfig:"CI_COMMIT_DESCRIPTION"`
CiJobId string `envconfig:"CI_COMMIT_JOB_ID"`
CiJobUrl string `envconfig:"CI_COMMIT_JOB_URL"`
CiPipelineId string `envconfig:"CI_COMMIT_PIPELINE_ID"`
CiPipelineUrl string `envconfig:"CI_COMMIT_PIPELINE_URL"`
CiJobId string `envconfig:"CI_COMMIT_JOB_ID"`
CiJobUrl string `envconfig:"CI_COMMIT_JOB_URL"`
CiPipelineId string `envconfig:"CI_COMMIT_PIPELINE_ID"`
CiPipelineUrl string `envconfig:"CI_COMMIT_PIPELINE_URL"`
}
Redis struct {
DialTimeout time.Duration `default:"5s" envconfig:"REDIS_DIAL_TIMEOUT"`
Host string `default:":6379" envconfig:"REDIS_HOST"`
DB int `default:"1" envconfig:"REDIS_DB"`
MaxmemoryPolicy string `envconfig:"REDIS_MAXMEMORY_POLICY"`
}
DB struct {
DialTimeout time.Duration `default:"5s" envconfig:"DIAL_TIMEOUT"`
Host string `default:"mongo:27017/gotraining" envconfig:"HOST"`
DialTimeout time.Duration `default:"5s" envconfig:"DB_DIAL_TIMEOUT"`
Host string `default:"mongo:27017/gotraining" envconfig:"DB_HOST"`
}
Trace struct {
Host string `default:"http://tracer:3002/v1/publish" envconfig:"HOST"`
BatchSize int `default:"1000" envconfig:"BATCH_SIZE"`
SendInterval time.Duration `default:"15s" envconfig:"SEND_INTERVAL"`
SendTimeout time.Duration `default:"500ms" envconfig:"SEND_TIMEOUT"`
Host string `default:"http://tracer:3002/v1/publish" envconfig:"TRACE_HOST"`
BatchSize int `default:"1000" envconfig:"TRACE_BATCH_SIZE"`
SendInterval time.Duration `default:"15s" envconfig:"TRACE_SEND_INTERVAL"`
SendTimeout time.Duration `default:"500ms" envconfig:"TRACE_SEND_TIMEOUT"`
}
AwsAccount struct {
AccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
SecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY"`
Region string `default:"us-east-1" envconfig:"AWS_REGION"`
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
// EC2/ECS instance roles.
UseRole bool `envconfig:"AWS_USE_ROLE"`
}
Auth struct {
KeyID string `envconfig:"KEY_ID"`
PrivateKeyFile string `default:"/app/private.pem" envconfig:"PRIVATE_KEY_FILE"`
Algorithm string `default:"RS256" envconfig:"ALGORITHM"`
AwsSecretID string `default:"auth-secret-key" envconfig:"AUTH_AWS_SECRET_ID"`
KeyExpiration time.Duration `default:"3600s" envconfig:"AUTH_KEY_EXPIRATION"`
}
CMD string `envconfig:"CMD"`
}
// !!! This prefix seems buried, if you copy and paste this main.go
// file, its likely you will forget to update this.
if err := envconfig.Process("WEB_APP", &cfg); err != nil {
log.Fatalf("main : Parsing Config : %v", err)
}
@ -109,6 +132,22 @@ func main() {
return // We displayed help.
}
// If base URL is empty, set the default value from the HTTP Host
if cfg.App.BaseUrl == "" {
baseUrl := cfg.HTTP.Host
if !strings.HasPrefix(baseUrl, "http") {
if strings.HasPrefix(baseUrl, "0.0.0.0:") {
pts := strings.Split(baseUrl, ":")
pts[0] = "127.0.0.1"
baseUrl = strings.Join(pts, ":")
} else if strings.HasPrefix(baseUrl, ":") {
baseUrl = "127.0.0.1" + baseUrl
}
baseUrl = "http://" + baseUrl
}
cfg.App.BaseUrl = baseUrl
}
// =========================================================================
// App Starting
@ -126,18 +165,113 @@ func main() {
// want to leak credentials or anything that can be a security risk.
log.Printf("main : Config : %v\n", string(cfgJSON))
// =========================================================================
// Init AWS Session
var awsSession *session.Session
if cfg.AwsAccount.UseRole {
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
// EC2/ECS instance roles.
awsSession = session.Must(session.NewSession())
} else {
creds := credentials.NewStaticCredentials(cfg.AwsAccount.AccessKeyID, cfg.AwsAccount.SecretAccessKey, "")
awsSession = session.New(&aws.Config{Region: aws.String(cfg.AwsAccount.Region), Credentials: creds})
}
awsSession = awstrace.WrapSession(awsSession)
// =========================================================================
// Start Redis
// Ensure the eviction policy on the redis cluster is set correctly.
// AWS Elastic cache redis clusters by default have the volatile-lru.
// volatile-lru: evict keys by trying to remove the less recently used (LRU) keys first, but only among keys that have an expire set, in order to make space for the new data added.
// allkeys-lru: evict keys by trying to remove the less recently used (LRU) keys first, in order to make space for the new data added.
// Recommended to have eviction policy set to allkeys-lru
log.Println("main : Started : Initialize Redis")
redisClient := redistrace.NewClient(&redis.Options{
Addr: cfg.Redis.Host,
DB: cfg.Redis.DB,
DialTimeout: cfg.Redis.DialTimeout,
})
defer redisClient.Close()
evictPolicyConfigKey := "maxmemory-policy"
// if the maxmemory policy is set for redis, make sure its set on the cluster
// default not set and will based on the redis config values defined on the server
if cfg.Redis.MaxmemoryPolicy != "" {
err = redisClient.ConfigSet(evictPolicyConfigKey, cfg.Redis.MaxmemoryPolicy).Err()
if err != nil {
log.Fatalf("main : redis : ConfigSet maxmemory-policy : %v", err)
}
} else {
evictPolicy, err := redisClient.ConfigGet(evictPolicyConfigKey).Result()
if err != nil {
log.Fatalf("main : redis : ConfigGet maxmemory-policy : %v", err)
}
if evictPolicy[1] != "allkeys-lru" {
log.Printf("main : redis : ConfigGet maxmemory-policy : recommended to be set to allkeys-lru to avoid OOM")
}
}
// =========================================================================
// Deploy
switch cfg.CMD {
case "sync-static":
// sync static files to S3
if cfg.App.StaticS3.S3Enabled || cfg.App.StaticS3.CloudFrontEnabled {
err = deploy.SyncS3StaticFiles(awsSession, cfg.App.StaticS3.S3Bucket, cfg.App.StaticS3.S3KeyPrefix, cfg.App.StaticDir)
if err != nil {
log.Fatalf("main : deploy : %v", err)
}
}
return
}
// =========================================================================
// URL Formatter
// s3UrlFormatter is a help function used by to convert an s3 key to
// a publicly available image URL.
var staticS3UrlFormatter func(string) string
if cfg.App.StaticS3.S3Enabled || cfg.App.StaticS3.CloudFrontEnabled || cfg.App.StaticS3.ImgResizeEnabled {
s3UrlFormatter, err := deploy.S3UrlFormatter(awsSession, cfg.App.StaticS3.S3Bucket, cfg.App.StaticS3.S3KeyPrefix, cfg.App.StaticS3.CloudFrontEnabled)
if err != nil {
log.Fatalf("main : S3UrlFormatter failed : %v", err)
}
staticS3UrlFormatter = func(p string) string {
// When the path starts with a forward slash its referencing a local file,
// make sure the static file prefix is included
if strings.HasPrefix(p, "/") {
p = filepath.Join(cfg.App.StaticS3.S3KeyPrefix, p)
}
return s3UrlFormatter(p)
}
}
// staticUrlFormatter is a help function used by template functions defined below.
// If the app has an S3 bucket defined for the static directory, all references in the app
// templates should be updated to use a fully qualified URL for either the public file on S3
// on from the cloudfront distribution.
var staticUrlFormatter func(string) string
if cfg.App.StaticS3.S3Enabled || cfg.App.StaticS3.CloudFrontEnabled {
staticUrlFormatter = staticS3UrlFormatter
} else {
baseUrl, err := url.Parse(cfg.App.BaseUrl)
if err != nil {
log.Fatalf("main : url Parse(%s) : %v", cfg.App.BaseUrl, err)
}
staticUrlFormatter = func(p string) string {
baseUrl.Path = p
return baseUrl.String()
}
}
// =========================================================================
// Template Renderer
// Implements interface web.Renderer to support alternative renderer
var (
staticS3BaseUrl string
staticS3CloudFrontOriginPrefix string
)
if cfg.App.StaticS3.S3Bucket != "" {
// TODO: lookup s3 url/cloud front distribution based on s3 bucket
}
// Append query string value to break browser cache used for services
// that render responses for a browser with the following:
// 1. when env=dev, the current timestamp will be used to ensure every
@ -178,8 +312,8 @@ func main() {
},
"AssetUrl": func(p string) string {
var u string
if staticS3BaseUrl != "" {
u = template_renderer.S3Url(staticS3BaseUrl, staticS3CloudFrontOriginPrefix, p)
if staticUrlFormatter != nil {
u = staticUrlFormatter(p)
} else {
if !strings.HasPrefix(p, "/") {
p = "/" + p
@ -187,14 +321,14 @@ func main() {
u = p
}
u = browserCacheBusterFunc( u)
u = browserCacheBusterFunc(u)
return u
},
"SiteAssetUrl": func(p string) string {
var u string
if staticS3BaseUrl != "" {
u = template_renderer.S3Url(staticS3BaseUrl, staticS3CloudFrontOriginPrefix, filepath.Join(cfg.App.Name, p))
if staticUrlFormatter != nil {
u = staticUrlFormatter(filepath.Join(cfg.App.Name, p))
} else {
if !strings.HasPrefix(p, "/") {
p = "/" + p
@ -202,14 +336,14 @@ func main() {
u = p
}
u = browserCacheBusterFunc( u)
u = browserCacheBusterFunc(u)
return u
},
"SiteS3Url": func(p string) string {
var u string
if staticS3BaseUrl != "" {
u = template_renderer.S3Url(staticS3BaseUrl, staticS3CloudFrontOriginPrefix, filepath.Join(cfg.App.Name, p))
if staticUrlFormatter != nil {
u = staticUrlFormatter(filepath.Join(cfg.App.Name, p))
} else {
u = p
}
@ -217,8 +351,8 @@ func main() {
},
"S3Url": func(p string) string {
var u string
if staticS3BaseUrl != "" {
u = template_renderer.S3Url(staticS3BaseUrl, staticS3CloudFrontOriginPrefix, p)
if staticUrlFormatter != nil {
u = staticUrlFormatter(p)
} else {
u = p
}
@ -226,14 +360,62 @@ func main() {
},
}
// Image Formatter - additional functions exposed to templates for resizing images
// to support response web applications.
if cfg.App.StaticS3.ImgResizeEnabled {
imgResizeS3KeyPrefix := filepath.Join(cfg.App.StaticS3.S3KeyPrefix, "images/responsive")
tmplFuncs["S3ImgSrcLarge"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320, 480, 800}, true)
return template.HTMLAttr(res)
}
tmplFuncs["S3ImgThumbSrcLarge"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320, 480, 800}, false)
return template.HTMLAttr(res)
}
tmplFuncs["S3ImgSrcMedium"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320, 640}, true)
return template.HTMLAttr(res)
}
tmplFuncs["S3ImgThumbSrcMedium"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320, 640}, false)
return template.HTMLAttr(res)
}
tmplFuncs["S3ImgSrcSmall"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320}, true)
return template.HTMLAttr(res)
}
tmplFuncs["S3ImgThumbSrcSmall"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320}, false)
return template.HTMLAttr(res)
}
tmplFuncs["S3ImgSrc"] = func(ctx context.Context, p string, sizes []int) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, sizes, true)
return template.HTMLAttr(res)
}
tmplFuncs["S3ImgUrl"] = func(ctx context.Context, p string, size int) string {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgUrl(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, size)
return res
}
}
//
t := template_renderer.NewTemplate(tmplFuncs)
// global variables exposed for rendering of responses with templates
gvd := map[string]interface{}{
"_App": map[string]interface{}{
"ENV": cfg.Env,
"BuildInfo": cfg.BuildInfo,
"ENV": cfg.Env,
"BuildInfo": cfg.BuildInfo,
"BuildVersion": build,
},
}
@ -243,11 +425,11 @@ func main() {
data := map[string]interface{}{}
return renderer.Render(ctx, w, r,
"base.tmpl", // base layout file to be used for rendering of errors
"base.tmpl", // base layout file to be used for rendering of errors
"error.tmpl", // generic format for errors, could select based on status code
web.MIMETextHTMLCharsetUTF8,
http.StatusOK,
data,
web.MIMETextHTMLCharsetUTF8,
http.StatusOK,
data,
)
}
@ -259,7 +441,7 @@ func main() {
enableHotReload := cfg.Env == "dev"
// Template Renderer used to generate HTML response for web experience.
renderer, err := template_renderer.NewTemplateRenderer(cfg.HTTP.TemplateDir, enableHotReload, gvd, t, eh)
renderer, err := template_renderer.NewTemplateRenderer(cfg.App.TemplateDir, enableHotReload, gvd, t, eh)
if err != nil {
log.Fatalf("main : Marshalling Config to JSON : %v", err)
}
@ -295,10 +477,10 @@ func main() {
//
// /debug/vars - Added to the default mux by the expvars package.
// /debug/pprof - Added to the default mux by the net/http/pprof package.
if cfg.HTTP.DebugHost != "" {
if cfg.App.DebugHost != "" {
go func() {
log.Printf("main : Debug Listening %s", cfg.HTTP.DebugHost)
log.Printf("main : Debug Listener closed : %v", http.ListenAndServe(cfg.HTTP.DebugHost, http.DefaultServeMux))
log.Printf("main : Debug Listening %s", cfg.App.DebugHost)
log.Printf("main : Debug Listener closed : %v", http.ListenAndServe(cfg.App.DebugHost, http.DefaultServeMux))
}()
}
@ -312,7 +494,7 @@ func main() {
api := http.Server{
Addr: cfg.HTTP.Host,
Handler: handlers.APP(shutdown, log, cfg.HTTP.StaticDir, cfg.HTTP.TemplateDir, nil, nil, renderer),
Handler: handlers.APP(shutdown, log, cfg.App.StaticDir, cfg.App.TemplateDir, nil, nil, renderer),
ReadTimeout: cfg.HTTP.ReadTimeout,
WriteTimeout: cfg.HTTP.WriteTimeout,
MaxHeaderBytes: 1 << 20,
@ -340,13 +522,13 @@ func main() {
log.Printf("main : %v : Start shutdown..", sig)
// Create context for Shutdown call.
ctx, cancel := context.WithTimeout(context.Background(), cfg.HTTP.ShutdownTimeout)
ctx, cancel := context.WithTimeout(context.Background(), cfg.App.ShutdownTimeout)
defer cancel()
// Asking listener to shutdown and load shed.
err := api.Shutdown(ctx)
if err != nil {
log.Printf("main : Graceful shutdown did not complete in %v : %v", cfg.HTTP.ShutdownTimeout, err)
log.Printf("main : Graceful shutdown did not complete in %v : %v", cfg.App.ShutdownTimeout, err)
err = api.Close()
}
@ -382,38 +564,3 @@ func browserCacheBuster(cacheBusterValueFunc func() string) func(uri string) str
return f
}
/*
"S3ImgSrcLarge": func(p string) template.HTMLAttr {
res, _ := blower_display.S3ImgSrc(cfg, site, p, []int{320, 480, 800}, true)
return template.HTMLAttr(res)
},
"S3ImgThumbSrcLarge": func(p string) template.HTMLAttr {
res, _ := blower_display.S3ImgSrc(cfg, site, p, []int{320, 480, 800}, false)
return template.HTMLAttr(res)
},
"S3ImgSrcMedium": func(p string) template.HTMLAttr {
res, _ := blower_display.S3ImgSrc(cfg, site, p, []int{320, 640}, true)
return template.HTMLAttr(res)
},
"S3ImgThumbSrcMedium": func(p string) template.HTMLAttr {
res, _ := blower_display.S3ImgSrc(cfg, site, p, []int{320, 640}, false)
return template.HTMLAttr(res)
},
"S3ImgSrcSmall": func(p string) template.HTMLAttr {
res, _ := blower_display.S3ImgSrc(cfg, site, p, []int{320}, true)
return template.HTMLAttr(res)
},
"S3ImgThumbSrcSmall": func(p string) template.HTMLAttr {
res, _ := blower_display.S3ImgSrc(cfg, site, p, []int{320}, false)
return template.HTMLAttr(res)
},
"S3ImgSrc": func(p string, sizes []int) template.HTMLAttr {
res, _ := blower_display.S3ImgSrc(cfg, site, p, sizes, true)
return template.HTMLAttr(res)
},
"S3ImgUrl": func(p string, size int) string {
res, _ := blower_display.S3ImgUrl(cfg, site, p, size)
return res
},
*/

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 MiB

View File

@ -4,6 +4,40 @@
{{end}}
{{define "content"}}
Welcome to the web app
<p>S3ImgSrcLarge
<img {{ S3ImgSrcLarge $._ctx "/assets/images/glacier-example-pic.jpg" }}/>
</p>
<p>S3ImgThumbSrcLarge
<img {{ S3ImgThumbSrcLarge $._ctx "/assets/images/glacier-example-pic.jpg" }}/>
</p>
<p>S3ImgSrcMedium
<img {{ S3ImgSrcMedium $._ctx "/assets/images/glacier-example-pic.jpg" }}/>
</p>
<p>S3ImgThumbSrcMedium
<img {{ S3ImgThumbSrcMedium $._ctx "/assets/images/glacier-example-pic.jpg" }}/>
</p>
<p>S3ImgSrcSmall
<img {{ S3ImgSrcSmall $._ctx "/assets/images/glacier-example-pic.jpg" }}/>
</p>
<p>S3ImgThumbSrcSmall
<img {{ S3ImgThumbSrcSmall $._ctx "/assets/images/glacier-example-pic.jpg" }}/>
</p>
<p>S3ImgSrc
<img {{ S3ImgSrc $._ctx "/assets/images/glacier-example-pic.jpg" $.imgSizes }}/>
</p>
<p>S3ImgUrl
<img src="{{ S3ImgUrl $._ctx "/assets/images/glacier-example-pic.jpg" 200 }}" />
</p>
{{end}}
{{define "js"}}

View File

@ -0,0 +1,18 @@
{{ define "partials/buildinfo" }}
<p style="{{if eq ._Site.ENV "prod"}}display: none;{{end}}">
{{if ne ._Site.BuildInfo.CiCommitTag ""}}
Tag: {{ ._Site.BuildInfo.CiCommitRefName }}@{{ ._Site.BuildInfo.CiCommitSha }}<br/>
{{else}}
Branch: {{ ._Site.BuildInfo.CiCommitRefName }}@{{ ._Site.BuildInfo.CiCommitSha }}<br/>
{{end}}
{{if ne ._Site.ENV "prod"}}
Commit: {{ ._Site.BuildInfo.CiCommitTitle }}
{{if ne ._Site.BuildInfo.CiJobId ""}}
Job: <a href="{{ ._Site.BuildInfo.CiJobUrl }}" target="_blank">{{ ._Site.BuildInfo.CiJobId }}</a>
{{end}}
{{if ne ._Site.BuildInfo.CiPipelineId ""}}
Pipeline: <a href="{{ ._Site.BuildInfo.CiPipelineUrl }}" target="_blank">{{ ._Site.BuildInfo.CiPipelineId }}</a>
{{end}}
{{end}}
</p>
{{end}}

View File

@ -1,24 +1,31 @@
module geeks-accelerator/oss/saas-starter-kit/example-project
require (
github.com/aws/aws-sdk-go v1.19.33
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/dimfeld/httptreemux v5.0.1+incompatible
github.com/go-playground/locales v0.12.1
github.com/go-playground/universal-translator v0.16.0
github.com/go-redis/redis v6.15.2+incompatible
github.com/google/go-cmp v0.2.0
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/hashicorp/golang-lru v0.5.1
github.com/kelseyhightower/envconfig v1.3.0
github.com/kr/pretty v0.1.0 // indirect
github.com/leodido/go-urn v1.1.0 // indirect
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/openzipkin/zipkin-go v0.1.1
github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3
github.com/philhofer/fwd v1.0.0 // indirect
github.com/philippgille/gokv v0.5.0 // indirect
github.com/pkg/errors v0.8.0
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0
github.com/stretchr/testify v1.3.0 // indirect
github.com/tinylib/msgp v1.1.0 // indirect
go.opencensus.io v0.14.0
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b
golang.org/x/net v0.0.0-20180724234803-3673e40ba225 // indirect
golang.org/x/sys v0.0.0-20190516110030-61b9204099cb // indirect
golang.org/x/text v0.3.0 // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.13.1
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect

View File

@ -1,3 +1,7 @@
github.com/aws/aws-sdk-go v1.19.32 h1:/usjSR6qsKfOKzk4tDNvZq7LqmP5+J0Cq/Uwsr2XVG8=
github.com/aws/aws-sdk-go v1.19.32/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.19.33 h1:qz9ZQtxCUuwBKdc5QiY6hKuISYGeRQyLVA2RryDEDaQ=
github.com/aws/aws-sdk-go v1.19.33/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
@ -8,10 +12,14 @@ github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotf
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4=
github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/kelseyhightower/envconfig v1.3.0 h1:IvRS4f2VcIQy6j4ORGIf9145T/AsUB+oY8LyvN8BXNM=
github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@ -21,16 +29,24 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3 h1:9J0mOv1rXIBlRjQCiAGyx9C3dZZh5uIa3HU0oTV8v1E=
github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/philippgille/gokv v0.5.0 h1:6bgvKt+RR1BDxhD/oLXDTA9a7ws8xbgV3767ytBNrso=
github.com/philippgille/gokv v0.5.0/go.mod h1:3qSKa2SgG4qXwLfF4htVEWRoRNLi86+fNdn+jQH5Clw=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0 h1:X9XMOYjxEfAYSy3xK1DzO5dMkkWhs9E9UCcS1IERx2k=
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0/go.mod h1:Ad7IjTpvzZO8Fl0vh9AzQ+j/jYZfyp2diGwI8m5q+ns=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@ -42,6 +58,8 @@ golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7k
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sys v0.0.0-20190516110030-61b9204099cb h1:k07iPOt0d6nEnwXF+kHB+iEg+WSuKe/SOQuFM2QoD+E=
golang.org/x/sys v0.0.0-20190516110030-61b9204099cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/DataDog/dd-trace-go.v1 v1.13.1 h1:oTzOClfuudNhW9Skkp2jxjqYO92uDKXqKLbiuPA13Rk=

View File

@ -3,6 +3,7 @@ package mid
import (
"context"
"net/http"
"runtime/debug"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
"github.com/pkg/errors"
@ -25,7 +26,7 @@ func Panics() web.Middleware {
// after the fact. Using the errors package will generate a stack trace.
defer func() {
if r := recover(); r != nil {
err = errors.Errorf("panic: %v", r)
err = errors.Errorf("panic: %+v %s", r, string(debug.Stack()))
}
}()

View File

@ -1,10 +1,19 @@
package auth
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/dgrijalva/jwt-go"
"github.com/pkg/errors"
)
@ -19,15 +28,15 @@ import (
// endpoint. See https://auth0.com/docs/jwks for more details.
type KeyFunc func(keyID string) (*rsa.PublicKey, error)
// NewSingleKeyFunc is a simple implementation of KeyFunc that only ever
// supports one key. This is easy for development but in projection should be
// replaced with a caching layer that calls a JWKS endpoint.
func NewSingleKeyFunc(id string, key *rsa.PublicKey) KeyFunc {
// NewKeyFunc is a multiple implementation of KeyFunc that
// supports a map of keys.
func NewKeyFunc(keys map[string]*rsa.PrivateKey) KeyFunc {
return func(kid string) (*rsa.PublicKey, error) {
if id != kid {
key, ok := keys[kid]
if !ok {
return nil, fmt.Errorf("unrecognized kid %q", kid)
}
return key, nil
return key.Public().(*rsa.PublicKey), nil
}
}
@ -41,21 +50,193 @@ type Authenticator struct {
parser *jwt.Parser
}
// NewAuthenticator creates an *Authenticator for use. It will error if:
// - The private key is nil.
// - The public key func is nil.
// - The key ID is blank.
// NewAuthenticator creates an *Authenticator for use.
// key expiration is optional to filter out old keys
// It will error if:
// - The aws session is nil.
// - The aws secret id is blank.
// - The specified algorithm is unsupported.
func NewAuthenticator(key *rsa.PrivateKey, keyID, algorithm string, publicKeyFunc KeyFunc) (*Authenticator, error) {
if key == nil {
return nil, errors.New("private key cannot be nil")
func NewAuthenticator(awsSession *session.Session, awsSecretID string, now time.Time, keyExpiration time.Duration) (*Authenticator, error) {
if awsSession == nil {
return nil, errors.New("aws session cannot be nil")
}
if publicKeyFunc == nil {
return nil, errors.New("public key function cannot be nil")
if awsSecretID == "" {
return nil, errors.New("aws secret id cannot be empty")
}
if keyID == "" {
return nil, errors.New("keyID cannot be blank")
if now.IsZero() {
now = time.Now().UTC()
}
// Time threshold to stop loading keys, any key with a created date
// before this value will not be loaded.
var disabledCreatedDate time.Time
// Time threshold to create a new key. If a current key exists and the
// created date of the key is before this value, a new key will be created.
var activeCreatedDate time.Time
// If an expiration duration is included, convert to past time from now.
if keyExpiration.Seconds() != 0 {
// Ensure the expiration is a time in the past for comparison below.
if keyExpiration.Seconds() > 0 {
keyExpiration = keyExpiration * -1
}
// Stop loading keys when the created date exceeds two times the key expiration
disabledCreatedDate = now.UTC().Add(keyExpiration * 2)
// Time used to determine when a new key should be created.
activeCreatedDate = now.UTC().Add(keyExpiration)
}
// Init new AWS Secret Manager using provided AWS session.
secretManager := secretsmanager.New(awsSession)
// A List of version ids for the stored secret. All keys will be stored under
// the same name in AWS secret manager. We still want to load old keys for a
// short period of time to ensure any requests in flight have the opportunity
// to be completed.
var versionIds []string
// Exec call to AWS secret manager to return a list of version ids for the
// provided secret ID.
listParams := &secretsmanager.ListSecretVersionIdsInput{
SecretId: aws.String(awsSecretID),
}
err := secretManager.ListSecretVersionIdsPages(listParams,
func(page *secretsmanager.ListSecretVersionIdsOutput, lastPage bool) bool {
for _, v := range page.Versions {
// When disabled CreatedDate is not empty, compare the created date
// for each key version to the disabled cut off time.
if !disabledCreatedDate.IsZero() && v.CreatedDate != nil && !v.CreatedDate.IsZero() {
// Skip any version ids that are less than the expiration time.
if v.CreatedDate.UTC().Unix() < disabledCreatedDate.UTC().Unix() {
continue
}
}
if v.VersionId != nil {
versionIds = append(versionIds, *v.VersionId)
}
}
return !lastPage
},
)
// Flag whether the secret exists and update needs to be used
// instead of create.
var awsSecretIDNotFound bool
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case secretsmanager.ErrCodeResourceNotFoundException:
awsSecretIDNotFound = true
}
}
if !awsSecretIDNotFound {
return nil, errors.Wrapf(err, "aws list secret version ids for secret ID %s failed", awsSecretID)
}
}
// Map of keys stored by version id. version id is kid.
keyContents := make(map[string][]byte)
// The current key id if there is an active one.
var curKeyId string
// If the list of version ids is not empty, load the keys from secret manager.
if len(versionIds) > 0 {
// The max created data to determine the most recent key.
var lastCreatedDate time.Time
for _, id := range versionIds {
res, err := secretManager.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(awsSecretID),
VersionId: aws.String(id),
})
if err != nil {
return nil, errors.Wrapf(err, "aws secret id %s, version id %s value failed", awsSecretID, id)
}
if len(res.SecretBinary) == 0 {
continue
}
keyContents[*res.VersionId] = res.SecretBinary
if lastCreatedDate.IsZero() || res.CreatedDate.UTC().Unix() > lastCreatedDate.UTC().Unix() {
curKeyId = *res.VersionId
lastCreatedDate = res.CreatedDate.UTC()
}
}
//
if !activeCreatedDate.IsZero() && lastCreatedDate.UTC().Unix() < activeCreatedDate.UTC().Unix() {
curKeyId = ""
}
}
// If there are no keys stored in secret manager, create a new one or
// if the current key needs to be rotated, generate a new key and update the secret.
// @TODO: When a new key is generated and there are multiple instances of the service running
// its possible based on the key expiration set that requests fail because keys are only
// refreshed on instance launch. Could store keys in a kv store and update that value
// when new keys are generated
if len(keyContents) == 0 || curKeyId == "" {
privateKey, err := keygen()
if err != nil {
return nil, errors.Wrap(err, "failed to generate new private key")
}
if awsSecretIDNotFound {
res, err := secretManager.CreateSecret(&secretsmanager.CreateSecretInput{
Name: aws.String(awsSecretID),
SecretBinary: privateKey,
})
if err != nil {
return nil, errors.Wrap(err, "failed to create new secret with private key")
}
curKeyId = *res.VersionId
} else {
res, err := secretManager.UpdateSecret(&secretsmanager.UpdateSecretInput{
SecretId: aws.String(awsSecretID),
SecretBinary: privateKey,
})
if err != nil {
return nil, errors.Wrap(err, "failed to create new secret with private key")
}
curKeyId = *res.VersionId
}
keyContents[curKeyId] = privateKey
}
// Map of keys by kid (version id).
keys := make(map[string]*rsa.PrivateKey)
// The current active key to be used.
var curPrivateKey *rsa.PrivateKey
// Loop through all the key bytes and load the private key.
for kid, keyContent := range keyContents {
key, err := jwt.ParseRSAPrivateKeyFromPEM(keyContent)
if err != nil {
return nil, errors.Wrap(err, "parsing auth private key")
}
keys[kid] = key
if kid == curKeyId {
curPrivateKey = key
}
}
// Lookup function to be used by the middleware to validate the kid and
// Return the associated public key.
publicKeyLookup := NewKeyFunc(keys)
// Algorithm to be used to for the private key.
algorithm := "RS256"
if jwt.GetSigningMethod(algorithm) == nil {
return nil, errors.Errorf("unknown algorithm %v", algorithm)
}
@ -68,10 +249,10 @@ func NewAuthenticator(key *rsa.PrivateKey, keyID, algorithm string, publicKeyFun
}
a := Authenticator{
privateKey: key,
keyID: keyID,
privateKey: curPrivateKey,
keyID: curKeyId,
algorithm: algorithm,
kf: publicKeyFunc,
kf: publicKeyLookup,
parser: &parser,
}
@ -125,3 +306,23 @@ func (a *Authenticator) ParseClaims(tknStr string) (Claims, error) {
return claims, nil
}
// keygen creates an x509 private key for signing auth tokens.
func keygen() ([]byte, error) {
key, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return []byte{}, errors.Wrap(err, "generating keys")
}
block := pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(key),
}
buf := new(bytes.Buffer)
if err := pem.Encode(buf, &block); err != nil {
return []byte{}, errors.Wrap(err, "encoding to private file")
}
return buf.Bytes(), nil
}

View File

@ -1,29 +1,56 @@
package auth_test
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"os"
"testing"
"time"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/auth"
jwt "github.com/dgrijalva/jwt-go"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/tests"
"github.com/pborman/uuid"
)
var test *tests.Test
// TestMain is the entry point for testing.
func TestMain(m *testing.M) {
os.Exit(testMain(m))
}
func testMain(m *testing.M) int {
test = tests.New()
defer test.TearDown()
return m.Run()
}
func TestAuthenticator(t *testing.T) {
// Parse the private key used to generate the token.
prvKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(privateRSAKey))
if err != nil {
t.Fatal(err)
}
awsSecretID := "jwt-key" + uuid.NewRandom().String()
// Parse the public key used to validate the token.
pubKey, err := jwt.ParseRSAPublicKeyFromPEM([]byte(publicRSAKey))
if err != nil {
t.Fatal(err)
}
defer func() {
// cleanup the secret after test is complete
sm := secretsmanager.New(test.AwsSession)
_, err := sm.DeleteSecret(&secretsmanager.DeleteSecretInput{
SecretId: aws.String(awsSecretID),
})
if err != nil {
t.Fatal(err)
}
}()
a, err := auth.NewAuthenticator(prvKey, privateRSAKeyID, "RS256", auth.NewSingleKeyFunc(privateRSAKeyID, pubKey))
if err != nil {
t.Fatal(err)
var authTests = []struct {
name string
awsSecretID string
now time.Time
keyExpiration time.Duration
error error
}{
{"NoKeyExpiration", awsSecretID, time.Now(), time.Duration(0), nil},
{"KeyExpirationOk", awsSecretID, time.Now(), time.Duration(time.Second * 3600), nil},
{"KeyExpirationDisabled", awsSecretID, time.Now().Add(time.Second * 3600 * 3), time.Duration(time.Second * 3600), nil},
}
// Generate the token.
@ -31,67 +58,44 @@ func TestAuthenticator(t *testing.T) {
Roles: []string{auth.RoleAdmin},
}
tknStr, err := a.GenerateToken(signedClaims)
if err != nil {
t.Fatal(err)
}
t.Log("Given the need to validate initiating a new Authenticator by key expiration.")
{
for i, tt := range authTests {
t.Logf("\tTest: %d\tWhen running test: %s", i, tt.name)
{
a, err := auth.NewAuthenticator(test.AwsSession, tt.awsSecretID, tt.now, tt.keyExpiration)
if err != tt.error {
t.Log("\t\tGot :", err)
t.Log("\t\tWant:", tt.error)
t.Fatalf("\t%s\tNewAuthenticator failed.", tests.Failed)
}
parsedClaims, err := a.ParseClaims(tknStr)
if err != nil {
t.Fatal(err)
}
tknStr, err := a.GenerateToken(signedClaims)
if err != nil {
t.Log("\t\tGot :", err)
t.Fatalf("\t%s\tGenerateToken failed.", tests.Failed)
}
// Assert expected claims.
if exp, got := len(signedClaims.Roles), len(parsedClaims.Roles); exp != got {
t.Fatalf("expected %v roles, got %v", exp, got)
}
if exp, got := signedClaims.Roles[0], parsedClaims.Roles[0]; exp != got {
t.Fatalf("expected roles[0] == %v, got %v", exp, got)
parsedClaims, err := a.ParseClaims(tknStr)
if err != nil {
t.Log("\t\tGot :", err)
t.Fatalf("\t%s\tParseClaims failed.", tests.Failed)
}
// Assert expected claims.
if exp, got := len(signedClaims.Roles), len(parsedClaims.Roles); exp != got {
t.Log("\t\tGot :", got)
t.Log("\t\tWant:", exp)
t.Fatalf("\t%s\tShould got the same number of roles.", tests.Failed)
}
if exp, got := signedClaims.Roles[0], parsedClaims.Roles[0]; exp != got {
t.Log("\t\tGot :", got)
t.Log("\t\tWant:", exp)
t.Fatalf("\t%s\tShould got the same role name.", tests.Failed)
}
t.Logf("\t%s\tNewAuthenticator ok.", tests.Success)
}
}
}
}
// The key id we would have generated for the private below key
const privateRSAKeyID = "54bb2165-71e1-41a6-af3e-7da4a0e1e2c1"
// Output of:
// openssl genpkey -algorithm RSA -out private.pem -pkeyopt rsa_keygen_bits:2048
const privateRSAKey = `-----BEGIN PRIVATE KEY-----
MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDdiBDU4jqRYuHl
yBmo5dWB1j9aeDrXzUTJbRKlgo+DWDQzIzJQvackvRu8/f7B5cseoqmeJcmBu6pc
4DmQ+puGNHxzCyYVFSMwRtHBZvfWS3P+UqIXCKRAX/NZbLkUEeqPnn5WXjA+YXKk
sfniE0xDH8W22o0OXHOzRhDWORjNTulpMpLv8tKnnLKh2Y/kCL/4vo0SZ+RWh8F9
4+JTZx/47RHWb6fkxkikyTO3zO3efIkrKjfRx2CwFwO2rQ/3T04GQB/Lgr5lfJQU
iofvvVYuj2xBJao+3t9Ir0OeSbw1T5Rz03VLtN8SZhvaxWaBfwkUuUNL1glJO+Yd
LkMxGS0zAgMBAAECggEBAKM6m7RQUPlJE8u8qfOCDdSSKbIefrT9wZ5tKN0dG2Oa
/TNkzrEhXOO8F5Ek0a7LA+Q51KL7ksNtpLS0XpZNoYS8bapS36ePIJN0yx8nIJwc
koYlGtu/+U6ZpHQSoTiBjwRtswcudXuxT8i8frOupnWbFpKJ7H9Vbcb9bHB8N6Mm
D63wSBR08ZMrZXheKHQCQcxSQ2ZQZ+X3LBIOdXZH1aaptU2KpMEU5oyxXPShTVMg
0f748yU2njXCF0ZABEanXgp13egr/MPqHwnS/h0PH45bNy3IgFtMEHEouQFsAzoS
qNe8/9WnrpY87UdSZMnzF/IAXV0bmollDnqfM8/EqxkCgYEA96ThXYGzAK5RKNqp
RqVdRVA0UTT48sJvrxLMuHpyUzg6cl8FZE5rrNxFbouxvyN192Ctv1q8yfv4/HfM
KpmtEjt3fYtITHVXII6O3qNaRoIEPwKT4eK/ar+JO59vI0YvweXvDH5TkS9aiFr+
pPGf3a7EbE24BKhgiI8eT6K0VuUCgYEA5QGg11ZVoUut4ERAPouwuSdWwNe0HYqJ
A1m5vTvF5ghUHAb023lrr7Psq9DPJQQe7GzPfXafsat9hGenyqiyxo1gwClIyoEH
fOg753kdHcy60VVzumsPXece3OOSnd0rRMgfsSsclgYO7z0g9YZPAjt2w9NVw6uN
UDqX3eO2WjcCgYEA015eoNHv99fRG96udsbz+hI/5UQibAl7C+Iu7BJO/CrU8AOc
dYXdr5f+hyEioDLjIDbbdaU71+aCGPMjRwUNzK8HCRfVqLTKndYvqWWhyuZ0O1e2
4ykHGlTLDCHD2Uaxwny/8VjteNEDI7kO+bfmLG9b5djcBNW2Nzh4tZ348OUCgYEA
vIrTppbhF1QciqkGj7govrgBt/GfzDaTyZtkzcTZkSNIRG8Bx3S3UUh8UZUwBpTW
9OY9ClnQ7tF3HLzOq46q6cfaYTtcP8Vtqcv2DgRsEW3OXazSBChC1ZgEk+4Vdz1x
c0akuRP6jBXe099rNFno0LiudlmXoeqrBOPIxxnEt48CgYEAxNZBc/GKiHXz/ZRi
IZtRT5rRRof7TEiDxSKOXHSG7HhIRDCrpwn4Dfi+GWNHIwsIlom8FzZTSHAN6pqP
E8Imrlt3vuxnUE1UMkhDXrlhrxslRXU9enynVghAcSrg6ijs8KuN/9RB/I7H03cT
77mx9eHMcYcRUciY5C8AOaArmMA=
-----END PRIVATE KEY-----`
// Output of:
// openssl rsa -pubout -in private.pem -out public.pem
const publicRSAKey = `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3YgQ1OI6kWLh5cgZqOXV
gdY/Wng6181EyW0SpYKPg1g0MyMyUL2nJL0bvP3+weXLHqKpniXJgbuqXOA5kPqb
hjR8cwsmFRUjMEbRwWb31ktz/lKiFwikQF/zWWy5FBHqj55+Vl4wPmFypLH54hNM
Qx/FttqNDlxzs0YQ1jkYzU7paTKS7/LSp5yyodmP5Ai/+L6NEmfkVofBfePiU2cf
+O0R1m+n5MZIpMkzt8zt3nyJKyo30cdgsBcDtq0P909OBkAfy4K+ZXyUFIqH771W
Lo9sQSWqPt7fSK9Dnkm8NU+Uc9N1S7TfEmYb2sVmgX8JFLlDS9YJSTvmHS5DMRkt
MwIDAQAB
-----END PUBLIC KEY-----`

View File

@ -1,8 +0,0 @@
package deploy
/*
func () {
func (c *CloudFront) ListDistributions(input *ListDistributionsInput)
} */

View File

@ -0,0 +1,163 @@
package deploy
import (
"fmt"
"net/url"
"path/filepath"
"strings"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudfront"
"github.com/pkg/errors"
)
func CloudFrontDistribution(awsSession *session.Session, s3Bucket string) (*cloudfront.DistributionSummary, error) {
// Init new CloudFront using provided AWS session.
cloudFront := cloudfront.New(awsSession)
// Loop through all the cloudfront distributions and find the one that matches the
// S3 Bucket name. AWS doesn't current support multiple distributions per bucket
// so this should always be a one to one match.
var distribution *cloudfront.DistributionSummary
err := cloudFront.ListDistributionsPages(&cloudfront.ListDistributionsInput{},
func(page *cloudfront.ListDistributionsOutput, lastPage bool) bool {
if page.DistributionList != nil {
for _, v := range page.DistributionList.Items {
if v.DomainName == nil || v.Origins == nil || v.Origins.Items == nil {
continue
}
for _, o := range v.Origins.Items {
if o.DomainName == nil || !strings.HasPrefix(*o.DomainName, s3Bucket+".") {
continue
}
distribution = v
break
}
if distribution != nil {
break
}
}
}
if distribution != nil {
return false
}
return !lastPage
},
)
if err != nil {
return nil, err
}
if distribution == nil {
return nil, errors.Errorf("aws cloud front deployment does not exist for s3 bucket %s.", s3Bucket)
}
return distribution, nil
}
// NewAuthenticator creates an *Authenticator for use.
// key expiration is optional to filter out old keys
// It will error if:
// - The aws session is nil.
// - The aws s3 bucket is blank.
func S3UrlFormatter(awsSession *session.Session, s3Bucket, s3KeyPrefix string, enableCloudFront bool) (func(string) string, error) {
if awsSession == nil {
return nil, errors.New("aws session cannot be nil")
}
if s3Bucket == "" {
return nil, errors.New("aws s3 bucket cannot be empty")
}
var (
baseS3Url string
baseS3Origin string
)
if enableCloudFront {
dist, err := CloudFrontDistribution(awsSession, s3Bucket)
if err != nil {
return nil, err
}
// Format the domain as an HTTPS url, "dzuyel7n94hma.cloudfront.net"
baseS3Url = fmt.Sprintf("https://%s/", *dist.DomainName)
// The origin used for the cloudfront needs to be striped from the path
// provided, the URL shouldn't have one, but "/public"
baseS3Origin = *dist.Origins.Items[0].OriginPath
} else {
// The static files are upload to a specific prefix, so need to ensure
// the path reference includes this prefix
s3Path := filepath.Join(s3Bucket, s3KeyPrefix)
if *awsSession.Config.Region == "us-east-1" {
// US East (N.Virginia) region endpoint, http://s3.amazonaws.com/bucket or
// http://s3-external-1.amazonaws.com/bucket/
baseS3Url = fmt.Sprintf("https://s3.amazonaws.com/%s/", s3Path)
} else {
// Region-specific endpoint, http://s3-aws-region.amazonaws.com/bucket
baseS3Url = fmt.Sprintf("https://s3-%s.amazonaws.com/%s/", *awsSession.Config.Region, s3Path)
}
baseS3Origin = s3KeyPrefix
}
f := func(p string) string {
return S3Url(baseS3Url, baseS3Origin, p)
}
return f, nil
}
// S3Url formats a path to include either the S3 URL or a CloudFront
// URL instead of serving the file from local file system.
func S3Url(baseS3Url, baseS3Origin, p string) string {
// If its already a URL, then don't format it
if strings.HasPrefix(p, "http") {
return p
}
// Drop the beginning forward slash
p = strings.TrimLeft(p, "/")
// In the case of cloudfront, the base URL may not match S3,
// removing the origin from the path provided
// ie. The s3 bucket + path of
// gitw-corp-web.s3.amazonaws.com/public
// maps to dzuyel7n94hma.cloudfront.net
// where the path prefix of '/public' needs to be dropped.
org := strings.Trim(baseS3Origin, "/")
if org != "" {
p = strings.Replace(p, org+"/", "", 1)
}
// Parse out the querystring from the path
var pathQueryStr string
if strings.Contains(p, "?") {
pts := strings.Split(p, "?")
p = pts[0]
if len(pts) > 1 {
pathQueryStr = pts[1]
}
}
u, err := url.Parse(baseS3Url)
if err != nil {
return "?"
}
ldir := filepath.Base(u.Path)
if strings.HasPrefix(p, ldir) {
p = strings.Replace(p, ldir+"/", "", 1)
}
u.Path = filepath.Join(u.Path, p)
u.RawQuery = pathQueryStr
return u.String()
}

View File

@ -0,0 +1,20 @@
package deploy
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// SyncS3StaticFiles copies the local files from the static directory to s3
// with public-read enabled.
func SyncS3StaticFiles(awsSession *session.Session, staticS3Bucket, staticS3Prefix, staticDir string) error {
uploader := s3manager.NewUploader(awsSession)
di := NewDirectoryIterator(staticS3Bucket, staticS3Prefix, staticDir, "public-read")
if err := uploader.UploadWithIterator(aws.BackgroundContext(), di); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,103 @@
package deploy
import (
"bytes"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/pkg/errors"
)
// DirectoryIterator represents an iterator of a specified directory
type DirectoryIterator struct {
filePaths []string
bucket string
keyPrefix string
acl string
next struct {
path string
f *os.File
}
err error
}
// NewDirectoryIterator builds a new DirectoryIterator
func NewDirectoryIterator(bucket, keyPrefix, dir, acl string) s3manager.BatchUploadIterator {
// The key prefix could end with the base directory name,
// If this is the case, drop the dirname from the key prefix
if keyPrefix != "" {
dirName := filepath.Base(dir)
keyPrefix = strings.TrimRight(keyPrefix, "/")
keyPrefix = strings.TrimRight(keyPrefix, dirName)
}
var paths []string
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
paths = append(paths, path)
}
return nil
})
return &DirectoryIterator{
filePaths: paths,
bucket: bucket,
keyPrefix: keyPrefix,
acl: acl,
}
}
// Next returns whether next file exists or not
func (di *DirectoryIterator) Next() bool {
if len(di.filePaths) == 0 {
di.next.f = nil
return false
}
f, err := os.Open(di.filePaths[0])
di.err = err
di.next.f = f
di.next.path = di.filePaths[0]
di.filePaths = di.filePaths[1:]
return true && di.Err() == nil
}
// Err returns error of DirectoryIterator
func (di *DirectoryIterator) Err() error {
return errors.WithStack(di.err)
}
// UploadObject uploads a file
func (di *DirectoryIterator) UploadObject() s3manager.BatchUploadObject {
f := di.next.f
var acl *string
if di.acl != "" {
acl = aws.String(di.acl)
}
// Get file size and read the file content into a buffer
fileInfo, _ := f.Stat()
var size int64 = fileInfo.Size()
buffer := make([]byte, size)
f.Read(buffer)
return s3manager.BatchUploadObject{
Object: &s3manager.UploadInput{
Bucket: aws.String(di.bucket),
Key: aws.String(filepath.Join(di.keyPrefix, di.next.path)),
Body: bytes.NewReader(buffer),
ContentType: aws.String(http.DetectContentType(buffer)),
ACL: acl,
},
After: func() error {
return f.Close()
},
}
}

View File

@ -0,0 +1,392 @@
package img_resize
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"image"
"image/gif"
"image/jpeg"
"image/png"
"io/ioutil"
"net/http"
"net/url"
"path/filepath"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/nfnt/resize"
"github.com/pkg/errors"
"github.com/sethgrid/pester"
redistrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
)
// S3ImgUrl parses the original url from an srcset
func S3ImgUrl(ctx context.Context, redisClient *redistrace.Client, s3UrlFormatter func(string) string, awsSession *session.Session, s3Bucket, S3KeyPrefix, p string, size int) (string, error) {
src, err := S3ImgSrc(ctx, redisClient, s3UrlFormatter, awsSession, s3Bucket, S3KeyPrefix, p, []int{size}, true)
if err != nil {
return "", err
}
var imgUrl string
if strings.Contains(src, "srcset=\"") {
imgUrl = strings.Split(src, "srcset=\"")[1]
imgUrl = strings.Trim(strings.Split(imgUrl, ",")[0], "\"")
} else if strings.Contains(src, "src=\"") {
imgUrl = strings.Split(src, "src=\"")[1]
imgUrl = strings.Trim(strings.Split(imgUrl, ",")[0], "\"")
} else {
imgUrl = src
}
if strings.Contains(imgUrl, " ") {
imgUrl = strings.Split(imgUrl, " ")[0]
}
return imgUrl, nil
}
// S3ImgSrc returns an srcset for a given image url and defined sizes
// Format the local image path to the fully qualified image URL,
// on stage and prod the app will not have access to the local image
// files if App.StaticS3 is enabled.
func S3ImgSrc(ctx context.Context, redisClient *redistrace.Client, s3UrlFormatter func(string) string, awsSession *session.Session, s3Bucket, s3KeyPrefix, imgUrlStr string, sizes []int, includeOrig bool) (string, error) {
// Default return value on error.
defaultSrc := fmt.Sprintf(`src="%s"`, imgUrlStr)
// Only fully qualified image URLS are supported. On dev the app host should
// still be included as this lacks the concept of the static directory.
if !strings.HasPrefix(imgUrlStr, "http") {
return defaultSrc, nil
}
// Extract the image path from the URL.
imgUrl, err := url.Parse(imgUrlStr)
if err != nil {
return defaultSrc, errors.WithStack(err)
}
// Determine the file extension for the image path.
pts := strings.Split(imgUrl.Path, ".")
filExt := strings.ToLower(pts[len(pts)-1])
if filExt == "jpg" {
filExt = ".jpg"
} else if filExt == "jpeg" {
filExt = ".jpeg"
} else if filExt == "gif" {
filExt = ".gif"
} else if filExt == "png" {
filExt = ".png"
} else {
return defaultSrc, nil
}
// Cache Key used by Redis for storing the resulting image src to avoid having to
// regenerate on each page load.
data := []byte(fmt.Sprintf("S3ImgSrc:%s:%v:%v", imgUrlStr, sizes, includeOrig))
ck := fmt.Sprintf("%x", md5.Sum(data))
// Check redis for the cache key.
var imgSrc string
cv, err := redisClient.WithContext(ctx).Get(ck).Result()
if err != nil {
// TODO: log the error as a warning
} else if len(cv) > 0 {
imgSrc = string(cv)
}
if imgSrc == "" {
// Make the http request to retrieve the image.
res, err := pester.Get(imgUrl.String())
if err != nil {
return imgSrc, errors.WithStack(err)
}
defer res.Body.Close()
// Validate the http status is OK and request did not fail.
if res.StatusCode != http.StatusOK {
err = errors.Errorf("Request failed with statusCode %v for %s", res.StatusCode, imgUrlStr)
return defaultSrc, errors.WithStack(err)
}
// Read all the image bytes.
dat, err := ioutil.ReadAll(res.Body)
if err != nil {
return defaultSrc, errors.WithStack(err)
}
//if hv, ok := res.Request.Response.Header["Last-Modified"]; ok && len(hv) > 0 {
// // Expires: Sun, 03 May 2015 23:02:37 GMT
// http.ParseTime(hv[0])
//}
// s3Path is the base s3 key to store all the associated resized images.
// Store the by the image host + path
s3Path := filepath.Join(s3KeyPrefix, fmt.Sprintf("%x", md5.Sum([]byte(imgUrl.Host+imgUrl.Path))))
// baseImgName is the base image filename
// Extract the image filename from the url
baseImgName := filepath.Base(imgUrl.Path)
// If the image has a query string, append md5 and append to s3Path
if len(imgUrl.Query()) > 0 {
qh := fmt.Sprintf("%x", md5.Sum([]byte(imgUrl.Query().Encode())))
s3Path = s3Path + "q" + qh
// Update the base image name to include the query string hash
pts := strings.Split(baseImgName, ".")
if len(pts) >= 2 {
pts[len(pts)-2] = pts[len(pts)-2] + "-" + qh
baseImgName = strings.Join(pts, ".")
} else {
baseImgName = baseImgName + "-" + qh
}
}
// checkSum is used to determine if the contents of the src file changed.
var checkSum string
// Try to pull a value from the response headers to be used as a checksum
if hv, ok := res.Header["ETag"]; ok && len(hv) > 0 {
// ETag: "5485fac7-ae74"
checkSum = strings.Trim(hv[0], "\"")
} else if hv, ok := res.Header["Last-Modified"]; ok && len(hv) > 0 {
// Last-Modified: Mon, 08 Dec 2014 19:23:51 GMT
checkSum = fmt.Sprintf("%x", md5.Sum([]byte(hv[0])))
} else {
checkSum = fmt.Sprintf("%x", md5.Sum(dat))
}
// Append the checkSum to the s3Path
s3Path = filepath.Join(s3Path, checkSum)
// Init new CloudFront using provided AWS session.
s3srv := s3.New(awsSession)
// List all the current images that exist on s3 for the s3 path.
// New files will have none until they are generated below and uploaded.
listRes, err := s3srv.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(s3Bucket),
Prefix: aws.String(s3Path),
})
if err != nil {
return defaultSrc, errors.WithStack(err)
}
// Loop through all the S3 objects and store by in map by
// filename with its current lastModified time
curFiles := make(map[string]time.Time)
if listRes != nil && listRes.Contents != nil {
for _, obj := range listRes.Contents {
fname := filepath.Base(*obj.Key)
curFiles[fname] = obj.LastModified.UTC()
}
}
pts := strings.Split(baseImgName, ".")
var uidx int
if len(pts) >= 2 {
uidx = len(pts) - 2
}
var maxSize int
expFiles := make(map[int]string)
for _, s := range sizes {
spts := pts
spts[uidx] = fmt.Sprintf("%s-%dw", spts[uidx], s)
nname := strings.Join(spts, ".")
expFiles[s] = nname
if s > maxSize {
maxSize = s
}
}
renderFiles := make(map[int]string)
for s, fname := range expFiles {
if _, ok := curFiles[fname]; !ok {
// Image does not exist, render
renderFiles[s] = fname
}
}
if len(renderFiles) > 0 {
uploader := s3manager.NewUploaderWithClient(s3srv, func(d *s3manager.Uploader) {
//d.PartSize = s.UploadPartSize
//d.Concurrency = s.UploadConcurrency
})
for s, fname := range renderFiles {
// Render new image with specified width, height of
// of 0 will preserve the current aspect ratio.
var (
contentType string
uploadBytes []byte
)
if filExt == ".gif" {
contentType = "image/gif"
uploadBytes, err = ResizeGif(dat, uint(s), 0)
} else if filExt == ".png" {
contentType = "image/png"
uploadBytes, err = ResizePng(dat, uint(s), 0)
} else {
contentType = "image/jpeg"
uploadBytes, err = ResizeJpg(dat, uint(s), 0)
}
if err != nil {
return defaultSrc, errors.WithStack(err)
}
// The s3 key for the newly resized image file.
renderedS3Key := filepath.Join(s3Path, fname)
// Upload the s3 key with the resized image bytes.
p := &s3manager.UploadInput{
Bucket: aws.String(s3Bucket),
Key: aws.String(renderedS3Key),
Body: bytes.NewReader(uploadBytes),
Metadata: map[string]*string{
"Content-Type": aws.String(contentType),
"Cache-Control": aws.String("max-age=604800"),
},
}
_, err = uploader.Upload(p)
if err != nil {
return defaultSrc, errors.WithStack(err)
}
// Grant public read access to the uploaded image file.
_, err = s3srv.PutObjectAcl(&s3.PutObjectAclInput{
Bucket: aws.String(s3Bucket),
Key: aws.String(renderedS3Key),
ACL: aws.String("public-read"),
})
if err != nil {
return defaultSrc, errors.WithStack(err)
}
}
}
// Determine the current width of the image, don't need height since will be using
// maintain the current aspect ratio.
lw, _, err := getImageDimension(dat)
if includeOrig {
if lw > maxSize && (!strings.HasPrefix(imgUrlStr, "http") || strings.HasPrefix(imgUrlStr, "https:")) {
maxSize = lw
sizes = append(sizes, lw)
}
} else {
maxSize = sizes[len(sizes)-1]
}
sort.Ints(sizes)
var srcUrl string
srcSets := []string{}
srcSizes := []string{}
for _, s := range sizes {
var nu string
if lw == s {
nu = imgUrlStr
} else {
fname := expFiles[s]
nk := filepath.Join(s3Path, fname)
nu = s3UrlFormatter(nk)
}
srcSets = append(srcSets, fmt.Sprintf("%s %dw", nu, s))
if s == maxSize {
srcSizes = append(srcSizes, fmt.Sprintf("%dpx", s))
srcUrl = nu
} else {
srcSizes = append(srcSizes, fmt.Sprintf("(max-width: %dpx) %dpx", s, s))
}
}
imgSrc = fmt.Sprintf(`srcset="%s" sizes="%s" src="%s"`, strings.Join(srcSets, ","), strings.Join(srcSizes, ","), srcUrl)
}
err = redisClient.WithContext(ctx).Set(ck, imgSrc, 0).Err()
if err != nil {
return imgSrc, errors.WithStack(err)
}
return imgSrc, nil
}
// ResizeJpg resizes a JPG image file to specified width and height using
// lanczos resampling and preserving the aspect ratio.
func ResizeJpg(dat []byte, width, height uint) ([]byte, error) {
// decode jpeg into image.Image
img, err := jpeg.Decode(bytes.NewReader(dat))
if err != nil {
return []byte{}, errors.WithStack(err)
}
// resize to width 1000 using Lanczos resampling
// and preserve aspect ratio
m := resize.Resize(width, height, img, resize.NearestNeighbor)
// write new image to file
var out = new(bytes.Buffer)
jpeg.Encode(out, m, nil)
return out.Bytes(), nil
}
// ResizeGif resizes a GIF image file to specified width and height using
// lanczos resampling and preserving the aspect ratio.
func ResizeGif(dat []byte, width, height uint) ([]byte, error) {
// decode gif into image.Image
img, err := gif.Decode(bytes.NewReader(dat))
if err != nil {
return []byte{}, errors.WithStack(err)
}
// resize to width 1000 using Lanczos resampling
// and preserve aspect ratio
m := resize.Resize(width, height, img, resize.NearestNeighbor)
// write new image to file
var out = new(bytes.Buffer)
gif.Encode(out, m, nil)
return out.Bytes(), nil
}
// ResizePng resizes a PNG image file to specified width and height using
// lanczos resampling and preserving the aspect ratio.
func ResizePng(dat []byte, width, height uint) ([]byte, error) {
// decode png into image.Image
img, err := png.Decode(bytes.NewReader(dat))
if err != nil {
return []byte{}, errors.WithStack(err)
}
// resize to width 1000 using Lanczos resampling
// and preserve aspect ratio
m := resize.Resize(width, height, img, resize.NearestNeighbor)
// write new image to file
var out = new(bytes.Buffer)
png.Encode(out, m)
return out.Bytes(), nil
}
// getImageDimension returns the width and height for a given local file path
func getImageDimension(dat []byte) (int, int, error) {
image, _, err := image.DecodeConfig(bytes.NewReader(dat))
if err != nil {
return 0, 0, errors.WithStack(err)
}
return image.Width, image.Height, nil
}

View File

@ -3,6 +3,7 @@ package tests
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go/aws/session"
"log"
"os"
"runtime/debug"
@ -23,9 +24,10 @@ const (
// Test owns state for running/shutting down tests.
type Test struct {
Log *log.Logger
MasterDB *db.DB
container *docker.Container
Log *log.Logger
MasterDB *db.DB
container *docker.Container
AwsSession *session.Session
}
// New is the entry point for tests.
@ -36,6 +38,10 @@ func New() *Test {
log := log.New(os.Stdout, "TEST : ", log.LstdFlags|log.Lmicroseconds|log.Lshortfile)
// ============================================================
// Init AWS Session
awsSession := session.Must(session.NewSession())
// ============================================================
// Startup Mongo container
@ -59,7 +65,7 @@ func New() *Test {
log.Fatalf("startup : Register DB : %v", err)
}
return &Test{log, masterDB, container}
return &Test{log, masterDB, container, awsSession}
}
// TearDown is used for shutting down tests. Calling this should be

View File

@ -18,13 +18,13 @@ const (
// MIME types
const (
MIMEApplicationJSON = "application/json"
MIMEApplicationJSONCharsetUTF8 = MIMEApplicationJSON + "; " + charsetUTF8
MIMETextHTML = "text/html"
MIMETextHTMLCharsetUTF8 = MIMETextHTML + "; " + charsetUTF8
MIMETextPlain = "text/plain"
MIMETextPlainCharsetUTF8 = MIMETextPlain + "; " + charsetUTF8
MIMEOctetStream = "application/octet-stream"
MIMEApplicationJSON = "application/json"
MIMEApplicationJSONCharsetUTF8 = MIMEApplicationJSON + "; " + charsetUTF8
MIMETextHTML = "text/html"
MIMETextHTMLCharsetUTF8 = MIMETextHTML + "; " + charsetUTF8
MIMETextPlain = "text/plain"
MIMETextPlainCharsetUTF8 = MIMETextPlain + "; " + charsetUTF8
MIMEOctetStream = "application/octet-stream"
)
// RespondJsonError sends an error formatted as JSON response back to the client.
@ -158,11 +158,14 @@ func Static(rootDir, prefix string) Handler {
// the ability to format/display the error before responding to the client.
func StaticHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string, rootDir, prefix string) error {
// Parse the URL from the http request.
urlPath := path.Clean("/"+r.URL.Path) // "/"+ for security
urlPath := path.Clean("/" + r.URL.Path) // "/"+ for security
urlPath = strings.TrimLeft(urlPath, "/")
// Remove the static directory name from the url
urlPath = strings.TrimLeft(urlPath, filepath.Base(rootDir))
rootDirName := filepath.Base(rootDir)
if strings.HasPrefix(urlPath, rootDirName) {
urlPath = strings.Replace(urlPath, rootDirName, "", 1)
}
// Also remove the URL prefix used to serve the static file since
// this does not need to match any existing directory structure.
@ -172,7 +175,7 @@ func StaticHandler(ctx context.Context, w http.ResponseWriter, r *http.Request,
// Resolve the root directory to an absolute path
sd, err := filepath.Abs(rootDir)
if err != nil {
if err != nil {
return err
}
@ -186,7 +189,7 @@ func StaticHandler(ctx context.Context, w http.ResponseWriter, r *http.Request,
}
// Serve the file from the local file system.
http.ServeFile(w, r , filePath)
http.ServeFile(w, r, filePath)
return nil
}

View File

@ -23,7 +23,7 @@ var (
)
type Template struct {
Funcs template.FuncMap
Funcs template.FuncMap
mainTemplate *template.Template
}
@ -117,14 +117,14 @@ func NewTemplate(templateFuncs template.FuncMap) *Template {
type TemplateRenderer struct {
templateDir string
// has to be map so can know the name and map the name to the location / file path
layoutFiles map[string]string
contentFiles map[string]string
partialFiles map[string]string
layoutFiles map[string]string
contentFiles map[string]string
partialFiles map[string]string
enableHotReload bool
templates map[string]*template.Template
globalViewData map[string]interface{}
mainTemplate *template.Template
errorHandler func(ctx context.Context, w http.ResponseWriter, req *http.Request, renderer web.Renderer, statusCode int, er error) error
templates map[string]*template.Template
globalViewData map[string]interface{}
mainTemplate *template.Template
errorHandler func(ctx context.Context, w http.ResponseWriter, req *http.Request, renderer web.Renderer, statusCode int, er error) error
}
// NewTemplateRenderer implements the interface web.Renderer allowing for execution of
@ -134,14 +134,14 @@ type TemplateRenderer struct {
// 3. partials: templates used by multiple layout or content templates
func NewTemplateRenderer(templateDir string, enableHotReload bool, globalViewData map[string]interface{}, tmpl *Template, errorHandler func(ctx context.Context, w http.ResponseWriter, req *http.Request, renderer web.Renderer, statusCode int, er error) error) (*TemplateRenderer, error) {
r := &TemplateRenderer{
templateDir: templateDir,
layoutFiles: make( map[string]string),
contentFiles: make( map[string]string),
partialFiles: make( map[string]string),
templateDir: templateDir,
layoutFiles: make(map[string]string),
contentFiles: make(map[string]string),
partialFiles: make(map[string]string),
enableHotReload: enableHotReload,
templates: make(map[string]*template.Template),
globalViewData:globalViewData,
errorHandler: errorHandler,
templates: make(map[string]*template.Template),
globalViewData: globalViewData,
errorHandler: errorHandler,
}
// Recursively loop through all folders/files in the template directory and group them by their
@ -172,7 +172,7 @@ func NewTemplateRenderer(templateDir string, enableHotReload bool, globalViewDat
// Main template used to render execute all templates against.
r.mainTemplate = template.New("main")
r.mainTemplate, _ = r.mainTemplate.Parse( `{{define "main" }} {{ template "base" . }} {{ end }}`)
r.mainTemplate, _ = r.mainTemplate.Parse(`{{define "main" }} {{ template "base" . }} {{ end }}`)
r.mainTemplate.Funcs(tmpl.Funcs)
// Ensure all layout files render successfully with no errors.
@ -214,7 +214,8 @@ func (r *TemplateRenderer) Render(ctx context.Context, w http.ResponseWriter, re
// then parse the template files.
t, ok := r.templates[templateContentName]
if !ok || r.enableHotReload {
t, err := r.mainTemplate.Clone()
var err error
t, err = r.mainTemplate.Clone()
if err != nil {
return err
}
@ -267,7 +268,7 @@ func (r *TemplateRenderer) Render(ctx context.Context, w http.ResponseWriter, re
// Add Request URL to render data
reqData := map[string]interface{}{
"Url": "",
"Uri": "",
"Uri": "",
}
if req != nil {
reqData["Url"] = req.URL.String()
@ -299,7 +300,7 @@ func (r *TemplateRenderer) Render(ctx context.Context, w http.ResponseWriter, re
func (r *TemplateRenderer) Error(ctx context.Context, w http.ResponseWriter, req *http.Request, statusCode int, er error) error {
// If error handler was defined to support formatted response for web, used it.
if r.errorHandler != nil {
return r.errorHandler(ctx, w, req, r, statusCode, er)
return r.errorHandler(ctx, w, req, r, statusCode, er)
}
// Default response text response of error.
@ -318,42 +319,3 @@ func (tr *TemplateRenderer) Static(rootDir, prefix string) web.Handler {
}
return h
}
// S3Url formats a path to include either the S3 URL or a CloudFront
// URL instead of serving the file from local file system.
func S3Url(baseS3Url, baseS3Origin, p string) string {
if strings.HasPrefix(p, "http") {
return p
}
org := strings.TrimRight(baseS3Origin, "/")
if org != "" {
p = strings.Replace(p, org+"/", "", 1)
}
pts := strings.Split(p, "?")
p = pts[0]
var rq string
if len(pts) > 1 {
rq = pts[1]
}
p = strings.TrimLeft(p, "/")
baseUrl := baseS3Url
u, err := url.Parse(baseUrl)
if err != nil {
return "?"
}
ldir := filepath.Base(u.Path)
if strings.HasPrefix(p, ldir) {
p = strings.Replace(p, ldir+"/", "", 1)
}
u.Path = filepath.Join(u.Path, p)
u.RawQuery = rq
return u.String()
}

3
go.mod Normal file
View File

@ -0,0 +1,3 @@
module gitlab.com/geeks-accelerator/oss/saas-starter-kit
go 1.12