1
0
mirror of https://github.com/raseels-repos/golang-saas-starter-kit.git synced 2025-06-15 00:15:15 +02:00

update tracing, fixed docker-compose and removed vendor dir

This commit is contained in:
Lee Brown
2019-05-23 19:40:29 -05:00
parent c77dd8f5f3
commit c19f46e07f
264 changed files with 391 additions and 45102 deletions

View File

@ -0,0 +1,43 @@
FROM golang:alpine3.9 AS build_base
LABEL maintainer="lee@geeksinthewoods.com"
RUN apk --update --no-cache add \
git
# go to base project
WORKDIR $GOPATH/src/gitlab.com/geeks-accelerator/oss/saas-starter-kit/example-project
# enable go modules
ENV GO111MODULE="on"
COPY go.mod .
COPY go.sum .
RUN go mod download
FROM build_base AS builder
# copy shared packages
COPY internal ./internal
# copy cmd specific package
COPY cmd/web-api ./cmd/web-api
COPY cmd/web-api/templates /templates
#COPY cmd/web-api/static /static
WORKDIR ./cmd/web-api
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix nocgo -o /gosrv .
FROM alpine:3.9
RUN apk --update --no-cache add \
tzdata ca-certificates curl openssl
COPY --from=builder /gosrv /
#COPY --from=builder /static /static
COPY --from=builder /templates /templates
ARG gogc="50"
ENV GOGC $gogc
ENTRYPOINT ["/gosrv"]

View File

@ -15,7 +15,7 @@ import (
func API(shutdown chan os.Signal, log *log.Logger, masterDB *sqlx.DB, authenticator *auth.Authenticator) http.Handler {
// Construct the web.App which holds all routes as well as common Middleware.
app := web.NewApp(shutdown, log, mid.Logger(log), mid.Errors(log), mid.Metrics(), mid.Panics())
app := web.NewApp(shutdown, log, mid.Trace(), mid.Logger(log), mid.Errors(log), mid.Metrics(), mid.Panics())
// Register health check endpoint. This route is not authenticated.
check := Check{

View File

@ -4,6 +4,8 @@ import (
"context"
"encoding/json"
"expvar"
"fmt"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"log"
"net/http"
_ "net/http/pprof"
@ -17,14 +19,12 @@ import (
"geeks-accelerator/oss/saas-starter-kit/example-project/cmd/web-api/handlers"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/auth"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/flag"
itrace "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/trace"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/go-redis/redis"
"github.com/kelseyhightower/envconfig"
"github.com/lib/pq"
"go.opencensus.io/trace"
awstrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws"
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
redistrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
@ -46,46 +46,45 @@ func main() {
var cfg struct {
Env string `default:"dev" envconfig:"ENV"`
HTTP struct {
Host string `default:"0.0.0.0:3000" envconfig:"HTTP_HOST"`
ReadTimeout time.Duration `default:"10s" envconfig:"HTTP_READ_TIMEOUT"`
WriteTimeout time.Duration `default:"10s" envconfig:"HTTP_WRITE_TIMEOUT"`
Host string `default:"0.0.0.0:3000" envconfig:"HOST"`
ReadTimeout time.Duration `default:"10s" envconfig:"READ_TIMEOUT"`
WriteTimeout time.Duration `default:"10s" envconfig:"WRITE_TIMEOUT"`
}
HTTPS struct {
Host string `default:"" envconfig:"HTTPS_HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"HTTPS_READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"HTTPS_WRITE_TIMEOUT"`
Host string `default:"" envconfig:"HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"WRITE_TIMEOUT"`
}
App struct {
Name string `default:"web-api" envconfig:"APP_NAME"`
BaseUrl string `default:"" envconfig:"APP_BASE_URL"`
TemplateDir string `default:"./templates" envconfig:"APP_TEMPLATE_DIR"`
DebugHost string `default:"0.0.0.0:4000" envconfig:"APP_DEBUG_HOST"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"APP_SHUTDOWN_TIMEOUT"`
Name string `default:"web-api" envconfig:"NAME"`
BaseUrl string `default:"" envconfig:"BASE_URL"`
TemplateDir string `default:"./templates" envconfig:"TEMPLATE_DIR"`
DebugHost string `default:"0.0.0.0:4000" envconfig:"DEBUG_HOST"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"SHUTDOWN_TIMEOUT"`
}
Redis struct {
DialTimeout time.Duration `default:"5s" envconfig:"REDIS_DIAL_TIMEOUT"`
Host string `default:":6379" envconfig:"REDIS_HOST"`
DB int `default:"1" envconfig:"REDIS_DB"`
MaxmemoryPolicy string `envconfig:"REDIS_MAXMEMORY_POLICY"`
Host string `default:":6379" envconfig:"HOST"`
DB int `default:"1" envconfig:"DB"`
DialTimeout time.Duration `default:"5s" envconfig:"DIAL_TIMEOUT"`
MaxmemoryPolicy string `envconfig:"MAXMEMORY_POLICY"`
}
DB struct {
Host string `default:"127.0.0.1:5433" envconfig:"DB_HOST"`
User string `default:"postgres" envconfig:"DB_USER"`
Pass string `default:"postgres" envconfig:"DB_PASS" json:"-"` // don't print
Database string `default:"shared" envconfig:"DB_DATABASE"`
Driver string `default:"postgres" envconfig:"DB_DRIVER"`
Timezone string `default:"utc" envconfig:"DB_TIMEZONE"`
DisableTLS bool `default:"false" envconfig:"DB_DISABLE_TLS"`
Host string `default:"127.0.0.1:5433" envconfig:"HOST"`
User string `default:"postgres" envconfig:"USER"`
Pass string `default:"postgres" envconfig:"PASS" json:"-"` // don't print
Database string `default:"shared" envconfig:"DATABASE"`
Driver string `default:"postgres" envconfig:"DRIVER"`
Timezone string `default:"utc" envconfig:"TIMEZONE"`
DisableTLS bool `default:"false" envconfig:"DISABLE_TLS"`
}
Trace struct {
Host string `default:"http://tracer:3002/v1/publish" envconfig:"TRACE_HOST"`
BatchSize int `default:"1000" envconfig:"TRACE_BATCH_SIZE"`
SendInterval time.Duration `default:"15s" envconfig:"TRACE_SEND_INTERVAL"`
SendTimeout time.Duration `default:"500ms" envconfig:"TRACE_SEND_TIMEOUT"`
Host string `default:"127.0.0.1" envconfig:"DD_TRACE_AGENT_HOSTNAME"`
Port int `default:"8126" envconfig:"DD_TRACE_AGENT_PORT"`
AnalyticsRate float64 `default:"0.10" envconfig:"ANALYTICS_RATE"`
}
AwsAccount struct {
AccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
SecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY" json:"-"` // don't print
Aws struct {
AccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID" required:"true"` // WEB_API_AWS_AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY_ID
SecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY" required:"true" json:"-"` // don't print
Region string `default:"us-east-1" envconfig:"AWS_REGION"`
// Get an AWS session from an implicit source if no explicit
@ -94,8 +93,8 @@ func main() {
UseRole bool `envconfig:"AWS_USE_ROLE"`
}
Auth struct {
AwsSecretID string `default:"auth-secret-key" envconfig:"AUTH_AWS_SECRET_ID"`
KeyExpiration time.Duration `default:"3600s" envconfig:"AUTH_KEY_EXPIRATION"`
AwsSecretID string `default:"auth-secret-key" envconfig:"AWS_SECRET_ID"`
KeyExpiration time.Duration `default:"3600s" envconfig:"KEY_EXPIRATION"`
}
BuildInfo struct {
CiCommitRefName string `envconfig:"CI_COMMIT_REF_NAME"`
@ -168,14 +167,14 @@ func main() {
// =========================================================================
// Init AWS Session
var awsSession *session.Session
if cfg.AwsAccount.UseRole {
if cfg.Aws.UseRole {
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
// EC2/ECS instance roles.
awsSession = session.Must(session.NewSession())
} else {
creds := credentials.NewStaticCredentials(cfg.AwsAccount.AccessKeyID, cfg.AwsAccount.SecretAccessKey, "")
awsSession = session.New(&aws.Config{Region: aws.String(cfg.AwsAccount.Region), Credentials: creds})
creds := credentials.NewStaticCredentials(cfg.Aws.AccessKeyID, cfg.Aws.SecretAccessKey, "")
awsSession = session.New(&aws.Config{Region: aws.String(cfg.Aws.Region), Credentials: creds})
}
awsSession = awstrace.WrapSession(awsSession)
@ -219,7 +218,7 @@ func main() {
var dbUrl url.URL
{
// Query parameters.
var q url.Values
var q url.Values = make(map[string][]string)
// Handle SSL Mode
if cfg.DB.DisableTLS {
@ -239,6 +238,7 @@ func main() {
RawQuery: q.Encode(),
}
}
log.Println("main : Started : Initialize Database")
// Register informs the sqlxtrace package of the driver that we will be using in our program.
// It uses a default service name, in the below case "postgres.db". To use a custom service
@ -259,28 +259,11 @@ func main() {
// =========================================================================
// Start Tracing Support
logger := func(format string, v ...interface{}) {
log.Printf(format, v...)
}
log.Printf("main : Tracing Started : %s", cfg.Trace.Host)
exporter, err := itrace.NewExporter(logger, cfg.Trace.Host, cfg.Trace.BatchSize, cfg.Trace.SendInterval, cfg.Trace.SendTimeout)
if err != nil {
log.Fatalf("main : RegiTracingster : ERROR : %v", err)
}
defer func() {
log.Printf("main : Tracing Stopping : %s", cfg.Trace.Host)
batch, err := exporter.Close()
if err != nil {
log.Printf("main : Tracing Stopped : ERROR : Batch[%d] : %v", batch, err)
} else {
log.Printf("main : Tracing Stopped : Flushed Batch[%d]", batch)
}
}()
trace.RegisterExporter(exporter)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
th := fmt.Sprintf("%s:%d", cfg.Trace.Host, cfg.Trace.Port)
log.Printf("main : Tracing Started : %s", th)
sr := tracer.NewRateSampler(cfg.Trace.AnalyticsRate)
tracer.Start(tracer.WithAgentAddr(th), tracer.WithSampler(sr))
defer tracer.Stop()
// =========================================================================
// Start Debug Service. Not concerned with shutting this down when the

View File

@ -0,0 +1,43 @@
FROM golang:alpine3.9 AS build_base
LABEL maintainer="lee@geeksinthewoods.com"
RUN apk --update --no-cache add \
git
# go to base project
WORKDIR $GOPATH/src/gitlab.com/geeks-accelerator/oss/saas-starter-kit/example-project
# enable go modules
ENV GO111MODULE="on"
COPY go.mod .
COPY go.sum .
RUN go mod download
FROM build_base AS builder
# copy shared packages
COPY internal ./internal
# copy cmd specific package
COPY cmd/web-app ./cmd/web-app
COPY cmd/web-app/templates /templates
COPY cmd/web-app/static /static
WORKDIR ./cmd/web-app
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix nocgo -o /gosrv .
FROM alpine:3.9
RUN apk --update --no-cache add \
tzdata ca-certificates curl openssl
COPY --from=builder /gosrv /
COPY --from=builder /static /static
COPY --from=builder /templates /templates
ARG gogc="50"
ENV GOGC $gogc
ENTRYPOINT ["/gosrv"]

View File

@ -17,7 +17,7 @@ const baseLayoutTmpl = "base.tmpl"
func APP(shutdown chan os.Signal, log *log.Logger, staticDir, templateDir string, masterDB *sqlx.DB, authenticator *auth.Authenticator, renderer web.Renderer) http.Handler {
// Construct the web.App which holds all routes as well as common Middleware.
app := web.NewApp(shutdown, log, mid.Logger(log), mid.Errors(log), mid.Metrics(), mid.Panics())
app := web.NewApp(shutdown, log, mid.Trace(), mid.Logger(log), mid.Errors(log), mid.Metrics(), mid.Panics())
// Register health check endpoint. This route is not authenticated.
check := Check{

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"expvar"
"fmt"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"html/template"
"log"
"net/http"
@ -22,7 +23,6 @@ import (
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/deploy"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/flag"
img_resize "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/img-resize"
itrace "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/trace"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
template_renderer "geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web/template-renderer"
"github.com/aws/aws-sdk-go/aws"
@ -31,7 +31,6 @@ import (
"github.com/go-redis/redis"
"github.com/kelseyhightower/envconfig"
"github.com/lib/pq"
"go.opencensus.io/trace"
awstrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws"
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
redistrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
@ -53,54 +52,53 @@ func main() {
var cfg struct {
Env string `default:"dev" envconfig:"ENV"`
HTTP struct {
Host string `default:"0.0.0.0:3000" envconfig:"HTTP_HOST"`
ReadTimeout time.Duration `default:"10s" envconfig:"HTTP_READ_TIMEOUT"`
WriteTimeout time.Duration `default:"10s" envconfig:"HTTP_WRITE_TIMEOUT"`
Host string `default:"0.0.0.0:3000" envconfig:"HOST"`
ReadTimeout time.Duration `default:"10s" envconfig:"READ_TIMEOUT"`
WriteTimeout time.Duration `default:"10s" envconfig:"WRITE_TIMEOUT"`
}
HTTPS struct {
Host string `default:"" envconfig:"HTTPS_HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"HTTPS_READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"HTTPS_WRITE_TIMEOUT"`
Host string `default:"" envconfig:"HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"WRITE_TIMEOUT"`
}
App struct {
Name string `default:"web-app" envconfig:"APP_NAME"`
BaseUrl string `default:"" envconfig:"APP_BASE_URL"`
TemplateDir string `default:"./templates" envconfig:"APP_TEMPLATE_DIR"`
StaticDir string `default:"./static" envconfig:"APP_STATIC_DIR"`
Name string `default:"web-app" envconfig:"NAME"`
BaseUrl string `default:"" envconfig:"BASE_URL"`
TemplateDir string `default:"./templates" envconfig:"TEMPLATE_DIR"`
StaticDir string `default:"./static" envconfig:"STATIC_DIR"`
StaticS3 struct {
S3Enabled bool `envconfig:"APP_STATIC_S3_ENABLED"`
S3Bucket string `envconfig:"APP_STATIC_S3_BUCKET"`
S3KeyPrefix string `default:"public/web_app/static" envconfig:"APP_STATIC_S3_KEY_PREFIX"`
CloudFrontEnabled bool `envconfig:"APP_STATIC_S3_CLOUDFRONT_ENABLED"`
ImgResizeEnabled bool `envconfig:"APP_STATIC_S3_IMG_RESIZE_ENABLED"`
S3Enabled bool `envconfig:"ENABLED"`
S3Bucket string `envconfig:"S3_BUCKET"`
S3KeyPrefix string `default:"public/web_app/static" envconfig:"KEY_PREFIX"`
CloudFrontEnabled bool `envconfig:"CLOUDFRONT_ENABLED"`
ImgResizeEnabled bool `envconfig:"IMG_RESIZE_ENABLED"`
}
DebugHost string `default:"0.0.0.0:4000" envconfig:"APP_DEBUG_HOST"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"APP_SHUTDOWN_TIMEOUT"`
DebugHost string `default:"0.0.0.0:4000" envconfig:"DEBUG_HOST"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"SHUTDOWN_TIMEOUT"`
}
Redis struct {
DialTimeout time.Duration `default:"5s" envconfig:"REDIS_DIAL_TIMEOUT"`
Host string `default:":6379" envconfig:"REDIS_HOST"`
DB int `default:"1" envconfig:"REDIS_DB"`
MaxmemoryPolicy string `envconfig:"REDIS_MAXMEMORY_POLICY"`
Host string `default:":6379" envconfig:"HOST"`
DB int `default:"1" envconfig:"DB"`
DialTimeout time.Duration `default:"5s" envconfig:"DIAL_TIMEOUT"`
MaxmemoryPolicy string `envconfig:"MAXMEMORY_POLICY"`
}
DB struct {
Host string `default:"127.0.0.1:5433" envconfig:"DB_HOST"`
User string `default:"postgres" envconfig:"DB_USER"`
Pass string `default:"postgres" envconfig:"DB_PASS" json:"-"` // don't print
Database string `default:"shared" envconfig:"DB_DATABASE"`
Driver string `default:"postgres" envconfig:"DB_DRIVER"`
Timezone string `default:"utc" envconfig:"DB_TIMEZONE"`
DisableTLS bool `default:"false" envconfig:"DB_DISABLE_TLS"`
Host string `default:"127.0.0.1:5433" envconfig:"HOST"`
User string `default:"postgres" envconfig:"USER"`
Pass string `default:"postgres" envconfig:"PASS" json:"-"` // don't print
Database string `default:"shared" envconfig:"DATABASE"`
Driver string `default:"postgres" envconfig:"DRIVER"`
Timezone string `default:"utc" envconfig:"TIMEZONE"`
DisableTLS bool `default:"false" envconfig:"DISABLE_TLS"`
}
Trace struct {
Host string `default:"http://tracer:3002/v1/publish" envconfig:"TRACE_HOST"`
BatchSize int `default:"1000" envconfig:"TRACE_BATCH_SIZE"`
SendInterval time.Duration `default:"15s" envconfig:"TRACE_SEND_INTERVAL"`
SendTimeout time.Duration `default:"500ms" envconfig:"TRACE_SEND_TIMEOUT"`
Host string `default:"127.0.0.1" envconfig:"DD_TRACE_AGENT_HOSTNAME"`
Port int `default:"8126" envconfig:"DD_TRACE_AGENT_PORT"`
AnalyticsRate float64 `default:"0.10" envconfig:"ANALYTICS_RATE"`
}
AwsAccount struct {
AccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
SecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY" json:"-"` // don't print
Aws struct {
AccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID" required:"true"` // WEB_API_AWS_AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY_ID
SecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY" required:"true" json:"-"` // don't print
Region string `default:"us-east-1" envconfig:"AWS_REGION"`
// Get an AWS session from an implicit source if no explicit
@ -109,8 +107,8 @@ func main() {
UseRole bool `envconfig:"AWS_USE_ROLE"`
}
Auth struct {
AwsSecretID string `default:"auth-secret-key" envconfig:"AUTH_AWS_SECRET_ID"`
KeyExpiration time.Duration `default:"3600s" envconfig:"AUTH_KEY_EXPIRATION"`
AwsSecretID string `default:"auth-secret-key" envconfig:"AWS_SECRET_ID"`
KeyExpiration time.Duration `default:"3600s" envconfig:"KEY_EXPIRATION"`
}
BuildInfo struct {
CiCommitRefName string `envconfig:"CI_COMMIT_REF_NAME"`
@ -184,14 +182,14 @@ func main() {
// =========================================================================
// Init AWS Session
var awsSession *session.Session
if cfg.AwsAccount.UseRole {
if cfg.Aws.UseRole {
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
// EC2/ECS instance roles.
awsSession = session.Must(session.NewSession())
} else {
creds := credentials.NewStaticCredentials(cfg.AwsAccount.AccessKeyID, cfg.AwsAccount.SecretAccessKey, "")
awsSession = session.New(&aws.Config{Region: aws.String(cfg.AwsAccount.Region), Credentials: creds})
creds := credentials.NewStaticCredentials(cfg.Aws.AccessKeyID, cfg.Aws.SecretAccessKey, "")
awsSession = session.New(&aws.Config{Region: aws.String(cfg.Aws.Region), Credentials: creds})
}
awsSession = awstrace.WrapSession(awsSession)
@ -235,7 +233,7 @@ func main() {
var dbUrl url.URL
{
// Query parameters.
var q url.Values
var q url.Values = make(map[string][]string)
// Handle SSL Mode
if cfg.DB.DisableTLS {
@ -255,6 +253,7 @@ func main() {
RawQuery: q.Encode(),
}
}
log.Println("main : Started : Initialize Database")
// Register informs the sqlxtrace package of the driver that we will be using in our program.
// It uses a default service name, in the below case "postgres.db". To use a custom service
@ -414,50 +413,46 @@ func main() {
// Image Formatter - additional functions exposed to templates for resizing images
// to support response web applications.
if cfg.App.StaticS3.ImgResizeEnabled {
imgResizeS3KeyPrefix := filepath.Join(cfg.App.StaticS3.S3KeyPrefix, "images/responsive")
tmplFuncs["S3ImgSrcLarge"] = func(ctx context.Context, p string) template.HTMLAttr {
imgSrcAttr := func(ctx context.Context, p string, sizes []int, includeOrig bool) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320, 480, 800}, true)
return template.HTMLAttr(res)
var srcAttr string
if cfg.App.StaticS3.ImgResizeEnabled {
srcAttr, _ = img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, sizes, includeOrig)
} else {
srcAttr = fmt.Sprintf("src=\"%s\"", u)
}
return template.HTMLAttr(srcAttr)
}
tmplFuncs["S3ImgSrcLarge"] = func(ctx context.Context, p string) template.HTMLAttr {
return imgSrcAttr(ctx, p, []int{320, 480, 800}, true)
}
tmplFuncs["S3ImgThumbSrcLarge"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320, 480, 800}, false)
return template.HTMLAttr(res)
return imgSrcAttr(ctx, p, []int{320, 480, 800}, false)
}
tmplFuncs["S3ImgSrcMedium"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320, 640}, true)
return template.HTMLAttr(res)
return imgSrcAttr(ctx, p, []int{320, 640}, true)
}
tmplFuncs["S3ImgThumbSrcMedium"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320, 640}, false)
return template.HTMLAttr(res)
return imgSrcAttr(ctx, p, []int{320, 640}, false)
}
tmplFuncs["S3ImgSrcSmall"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320}, true)
return template.HTMLAttr(res)
return imgSrcAttr(ctx, p, []int{320}, true)
}
tmplFuncs["S3ImgThumbSrcSmall"] = func(ctx context.Context, p string) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, []int{320}, false)
return template.HTMLAttr(res)
return imgSrcAttr(ctx, p, []int{320}, false)
}
tmplFuncs["S3ImgSrc"] = func(ctx context.Context, p string, sizes []int) template.HTMLAttr {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, sizes, true)
return template.HTMLAttr(res)
return imgSrcAttr(ctx, p, sizes, true)
}
tmplFuncs["S3ImgUrl"] = func(ctx context.Context, p string, size int) string {
u := staticUrlFormatter(p)
res, _ := img_resize.S3ImgUrl(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, u, size)
return res
imgUrl := staticUrlFormatter(p)
if cfg.App.StaticS3.ImgResizeEnabled {
imgUrl, _ = img_resize.S3ImgUrl(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.App.StaticS3.S3Bucket, imgResizeS3KeyPrefix, imgUrl, size)
}
return imgUrl
}
//
@ -500,28 +495,11 @@ func main() {
// =========================================================================
// Start Tracing Support
logger := func(format string, v ...interface{}) {
log.Printf(format, v...)
}
log.Printf("main : Tracing Started : %s", cfg.Trace.Host)
exporter, err := itrace.NewExporter(logger, cfg.Trace.Host, cfg.Trace.BatchSize, cfg.Trace.SendInterval, cfg.Trace.SendTimeout)
if err != nil {
log.Fatalf("main : RegiTracingster : ERROR : %v", err)
}
defer func() {
log.Printf("main : Tracing Stopping : %s", cfg.Trace.Host)
batch, err := exporter.Close()
if err != nil {
log.Printf("main : Tracing Stopped : ERROR : Batch[%d] : %v", batch, err)
} else {
log.Printf("main : Tracing Stopped : Flushed Batch[%d]", batch)
}
}()
trace.RegisterExporter(exporter)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
th := fmt.Sprintf("%s:%d", cfg.Trace.Host, cfg.Trace.Port)
log.Printf("main : Tracing Started : %s", th)
sr := tracer.NewRateSampler(cfg.Trace.AnalyticsRate)
tracer.Start(tracer.WithAgentAddr(th), tracer.WithSampler(sr))
defer tracer.Stop()
// =========================================================================
// Start Debug Service. Not concerned with shutting this down when the

View File

@ -4,14 +4,20 @@
# docker-compose down
version: '3'
networks:
main:
services:
postgres:
container_name: postgres
image: postgres:11-alpine
expose:
- "5433"
ports:
- "5433:5432"
networks:
main:
aliases:
- postgres
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASS=postgres
@ -23,21 +29,27 @@ services:
- "6379"
ports:
- "6379:6379"
networks:
main:
aliases:
- redis
entrypoint: redis-server --appendonly yes
datadog:
container_name: datadog
build:
context: .
dockerfile: docker/datadog-agent/Dockerfile
image: example-project/datadog:latest
build:
context: docker/datadog-agent
dockerfile: Dockerfile
ports:
- 8125:8125
- 8126:8126
- 8125:8125 # metrics
- 8126:8126 # tracing
networks:
main:
aliases:
- datadog
env_file:
- .env_docker_compose
environment:
- DD_API_KEY=${DD_API_KEY}
- DD_LOGS_ENABLED=true
- DD_APM_ENABLED=true
- DD_RECEIVER_PORT=8126
@ -46,17 +58,20 @@ services:
- DD_TAGS=source:docker env:dev
- DD_DOGSTATSD_ORIGIN_DETECTION=true
- DD_DOGSTATSD_NON_LOCAL_TRAFFIC=true
- ECS_FARGATE=false
#- ECS_FARGATE=false
- DD_EXPVAR=service_name=web-app env=dev url=http://web-app:4000/debug/vars|service_name=web-api env=dev url=http://web-api:4001/debug/vars
web-app:
container_name: web-app
image: example-project/web-app:latest
build:
context: .
dockerfile: cmd/web-app/Dockerfile
image: example-project/web-api:latest
ports:
- 3000:3000 # WEB APP
- 4000:4000 # DEBUG API
networks:
main:
aliases:
- web-app
links:
- postgres
- redis
@ -66,15 +81,12 @@ services:
environment:
- WEB_APP_HTTP_HOST=0.0.0.0:3000
- WEB_APP_APP_BASE_URL=http://127.0.0.1:3000
- WEB_APP_REDIS_HOST=redis
- WEB_APP_DB_HOST=postgres
- WEB_API_APP_DEBUG_HOST=0.0.0.0:4000
- WEB_APP_REDIS_HOST=redis:6379
- WEB_APP_DB_HOST=postgres:5433
- WEB_APP_DB_USER=postgres
- WEB_APP_DB_PASS=postgres
- WEB_APP_DB_DATABASE=shared
- WEB_APP_AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- WEB_APP_AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
- WEB_APP_AWS_REGION=${AWS_REGION}
- WEB_APP_AWS_USE_ROLE=${AWS_USE_ROLE}
- DD_TRACE_AGENT_HOSTNAME=datadog
- DD_TRACE_AGENT_PORT=8126
- DD_SERVICE_NAME=web-app
@ -82,14 +94,17 @@ services:
# - GODEBUG=gctrace=1
web-api:
container_name: web-api
image: example-project/web-api:latest
build:
context: .
dockerfile: cmd/web-api/Dockerfile
image: example-project/web-api:latest
ports:
- 3001:3001 # WEB API
- 4001:4001 # DEBUG API
networks:
main:
aliases:
- web-api
links:
- postgres
- redis
@ -99,22 +114,14 @@ services:
environment:
- WEB_API_HTTP_HOST=0.0.0.0:3001
- WEB_API_APP_BASE_URL=http://127.0.0.1:3001
- WEB_API_REDIS_HOST=redis
- WEB_API_DB_HOST=postgres
- WEB_API_APP_DEBUG_HOST=0.0.0.0:4001
- WEB_API_REDIS_HOST=redis:6379
- WEB_API_DB_HOST=postgres:5433
- WEB_API_DB_USER=postgres
- WEB_API_DB_PASS=postgres
- WEB_API_DB_DATABASE=shared
- WEB_API_AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- WEB_API_AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
- WEB_API_AWS_REGION=${AWS_REGION}
- WEB_API_AWS_USE_ROLE=${AWS_USE_ROLE}
- DD_TRACE_AGENT_HOSTNAME=datadog
- DD_TRACE_AGENT_PORT=8126
- DD_SERVICE_NAME=web-app
- DD_ENV=dev
# - GODEBUG=gctrace=1

View File

@ -2,8 +2,8 @@ FROM datadog/agent:latest
LABEL maintainer="lee@geeksinthewoods.com"
#COPY target/go_expvar.conf.yaml /etc/datadog-agent/conf.d/go_expvar.d/conf.yaml
COPY target/custom-init.sh /custom-init.sh
#COPY go_expvar.conf.yaml /etc/datadog-agent/conf.d/go_expvar.d/conf.yaml
COPY custom-init.sh /custom-init.sh
ARG service
ENV SERVICE_NAME $service

View File

@ -2,15 +2,59 @@
configFile="/etc/datadog-agent/conf.d/go_expvar.d/conf.yaml"
echo -e "init_config:\n\ninstances:\n - expvar_url: http://localhost:80/debug/vars" > $configFile
echo -e "init_config:\n\ninstances:\n" > $configFile
if [[ "${DD_EXPVAR}" != "" ]]; then
while IFS='|' read -ra HOSTS; do
for h in "${HOSTS[@]}"; do
if [[ "${h}" == "" ]]; then
continue
fi
url=""
for p in $h; do
k=`echo $p | awk -F '=' '{print $1}'`
v=`echo $p | awk -F '=' '{print $2}'`
if [[ "${k}" == "url" ]]; then
url=$v
break
fi
done
if [[ "${url}" == "" ]]; then
echo "No url param found in '${h}'"
continue
fi
echo -e " - expvar_url: ${url}" >> $configFile
if [[ "${DD_TAGS}" != "" ]]; then
echo " tags:" >> $configFile
for t in ${DD_TAGS}; do
echo " - \"${t}\"" >> $configFile
echo " - ${t}" >> $configFile
done
fi
for p in $h; do
k=`echo $p | awk -F '=' '{print $1}'`
v=`echo $p | awk -F '=' '{print $2}'`
if [[ "${k}" == "url" ]]; then
continue
fi
echo " - ${k}:${v}" >> $configFile
done
done
done <<< "$DD_EXPVAR"
else :
echo -e " - expvar_url: http://localhost:80/debug/vars" >> $configFile
if [[ "${DD_TAGS}" != "" ]]; then
echo " tags:" >> $configFile
for t in ${DD_TAGS}; do
echo " - ${t}" >> $configFile
done
fi
fi
cat $configFile
/init

View File

@ -11,7 +11,7 @@ require (
github.com/go-playground/universal-translator v0.16.0
github.com/go-redis/redis v6.15.2+incompatible
github.com/google/go-cmp v0.2.0
github.com/hashicorp/golang-lru v0.5.1
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/jmoiron/sqlx v1.2.0
github.com/kelseyhightower/envconfig v1.3.0
github.com/kr/pretty v0.1.0 // indirect
@ -19,7 +19,7 @@ require (
github.com/lib/pq v1.1.1
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/openzipkin/zipkin-go v0.1.1
github.com/openzipkin/zipkin-go v0.1.1 // indirect
github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3
github.com/philhofer/fwd v1.0.0 // indirect
github.com/philippgille/gokv v0.5.0 // indirect

View File

@ -5,10 +5,10 @@ import (
"net/http"
"strings"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/auth"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
// ErrForbidden is returned when an authenticated user does not have a
@ -26,8 +26,8 @@ func Authenticate(authenticator *auth.Authenticator) web.Middleware {
// Wrap this handler around the next one provided.
h := func(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {
ctx, span := trace.StartSpan(ctx, "internal.mid.Authenticate")
defer span.End()
span, ctx := tracer.StartSpanFromContext(ctx, "internal.mid.Authenticate")
defer span.Finish()
authHdr := r.Header.Get("Authorization")
if authHdr == "" {
@ -65,8 +65,8 @@ func HasRole(roles ...string) web.Middleware {
f := func(after web.Handler) web.Handler {
h := func(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {
ctx, span := trace.StartSpan(ctx, "internal.mid.HasRole")
defer span.End()
span, ctx := tracer.StartSpanFromContext(ctx, "internal.mid.HasRole")
defer span.Finish()
claims, ok := ctx.Value(auth.Key).(auth.Claims)
if !ok {

View File

@ -5,8 +5,8 @@ import (
"log"
"net/http"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
"go.opencensus.io/trace"
)
// Errors handles errors coming out of the call chain. It detects normal
@ -19,20 +19,13 @@ func Errors(log *log.Logger) web.Middleware {
// Create the handler that will be attached in the middleware chain.
h := func(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {
ctx, span := trace.StartSpan(ctx, "internal.mid.Errors")
defer span.End()
// If the context is missing this value, request the service
// to be shutdown gracefully.
v, ok := ctx.Value(web.KeyValues).(*web.Values)
if !ok {
return web.NewShutdownError("web value missing from context")
}
span, ctx := tracer.StartSpanFromContext(ctx, "internal.mid.Errors")
defer span.Finish()
if err := before(ctx, w, r, params); err != nil {
// Log the error.
log.Printf("%s : ERROR : %+v", v.TraceID, err)
log.Printf("%d : ERROR : %+v", span.Context().TraceID(), err)
// Respond to the error.
if err := web.RespondError(ctx, w, err); err != nil {

View File

@ -6,8 +6,8 @@ import (
"net/http"
"time"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
"go.opencensus.io/trace"
)
// Logger writes some information about the request to the logs in the
@ -19,8 +19,8 @@ func Logger(log *log.Logger) web.Middleware {
// Create the handler that will be attached in the middleware chain.
h := func(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {
ctx, span := trace.StartSpan(ctx, "internal.mid.Logger")
defer span.End()
span, ctx := tracer.StartSpanFromContext(ctx, "internal.mid.Logger")
defer span.Finish()
// If the context is missing this value, request the service
// to be shutdown gracefully.
@ -31,8 +31,8 @@ func Logger(log *log.Logger) web.Middleware {
err := before(ctx, w, r, params)
log.Printf("%s : (%d) : %s %s -> %s (%s)\n",
v.TraceID,
log.Printf("%d : (%d) : %s %s -> %s (%s)\n",
span.Context().TraceID(),
v.StatusCode,
r.Method, r.URL.Path,
r.RemoteAddr, time.Since(v.Now),

View File

@ -6,8 +6,8 @@ import (
"net/http"
"runtime"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
"go.opencensus.io/trace"
)
// m contains the global program counters for the application.
@ -29,8 +29,8 @@ func Metrics() web.Middleware {
// Wrap this handler around the next one provided.
h := func(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {
ctx, span := trace.StartSpan(ctx, "internal.mid.Metrics")
defer span.End()
span, ctx := tracer.StartSpanFromContext(ctx, "internal.mid.Metrics")
defer span.Finish()
err := before(ctx, w, r, params)

View File

@ -5,9 +5,9 @@ import (
"net/http"
"runtime/debug"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
// Panics recovers from panics and converts the panic to an error so it is
@ -19,8 +19,8 @@ func Panics() web.Middleware {
// Wrap this handler around the next one provided.
h := func(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) (err error) {
ctx, span := trace.StartSpan(ctx, "internal.mid.Panics")
defer span.End()
span, ctx := tracer.StartSpanFromContext(ctx, "internal.mid.Panics")
defer span.Finish()
// Defer a function to recover from a panic and set the err return variable
// after the fact. Using the errors package will generate a stack trace.

View File

@ -0,0 +1,64 @@
package mid
import (
"context"
"fmt"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/web"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"net/http"
)
// Trace adds the base tracing info for requests
func Trace() web.Middleware {
// This is the actual middleware function to be executed.
f := func(before web.Handler) web.Handler {
// Wrap this handler around the next one provided.
h := func(ctx context.Context, w http.ResponseWriter, r *http.Request, params map[string]string) error {
// Span options with request info
opts := []ddtrace.StartSpanOption{
tracer.SpanType(ext.SpanTypeWeb),
tracer.ResourceName(r.URL.Path),
tracer.Tag(ext.HTTPMethod, r.Method),
tracer.Tag(ext.HTTPURL, r.RequestURI),
}
// Continue server side request tracing from previous request.
if spanctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(r.Header)); err == nil {
opts = append(opts, tracer.ChildOf(spanctx))
}
// Start the span for tracking
span, ctx := tracer.StartSpanFromContext(ctx, "http.request", opts...)
defer span.Finish()
// If the context is missing this value, request the service
// to be shutdown gracefully.
v, ok := ctx.Value(web.KeyValues).(*web.Values)
if !ok {
return web.NewShutdownError("web value missing from context")
}
// Execute the request handler
err := before(ctx, w, r, params)
// Set the span status code for the trace
span.SetTag(ext.HTTPCode, v.StatusCode)
// If there was an error, append it to the span
if err != nil {
span.SetTag(ext.Error, fmt.Sprintf("%+v", err))
}
// Return the error so it can be handled further up the chain.
return err
}
return h
}
return f
}

View File

@ -1,194 +0,0 @@
package trace
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"sync"
"time"
"go.opencensus.io/trace"
)
// Error variables for factory validation.
var (
ErrLoggerNotProvided = errors.New("logger not provided")
ErrHostNotProvided = errors.New("host not provided")
)
// Log provides support for logging inside this package.
// Unfortunately, the opentrace API calls into the ExportSpan
// function directly with no means to pass user defined arguments.
type Log func(format string, v ...interface{})
// Exporter provides support to batch spans and send them
// to the sidecar for processing.
type Exporter struct {
log Log // Handler function for logging.
host string // IP:port of the sidecare consuming the trace data.
batchSize int // Size of the batch of spans before sending.
sendInterval time.Duration // Time to send a batch if batch size is not met.
sendTimeout time.Duration // Time to wait for the sidecar to respond on send.
client http.Client // Provides APIs for performing the http send.
batch []*trace.SpanData // Maintains the batch of span data to be sent.
mu sync.Mutex // Provide synchronization to access the batch safely.
timer *time.Timer // Signals when the sendInterval is met.
}
// NewExporter creates an exporter for use.
func NewExporter(log Log, host string, batchSize int, sendInterval, sendTimeout time.Duration) (*Exporter, error) {
if log == nil {
return nil, ErrLoggerNotProvided
}
if host == "" {
return nil, ErrHostNotProvided
}
tr := http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 2,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
e := Exporter{
log: log,
host: host,
batchSize: batchSize,
sendInterval: sendInterval,
sendTimeout: sendTimeout,
client: http.Client{
Transport: &tr,
},
batch: make([]*trace.SpanData, 0, batchSize),
timer: time.NewTimer(sendInterval),
}
return &e, nil
}
// Close sends the remaining spans that have not been sent yet.
func (e *Exporter) Close() (int, error) {
var sendBatch []*trace.SpanData
e.mu.Lock()
{
sendBatch = e.batch
}
e.mu.Unlock()
err := e.send(sendBatch)
if err != nil {
return len(sendBatch), err
}
return len(sendBatch), nil
}
// ExportSpan is called by opentracing when spans are created. It implements
// the Exporter interface.
func (e *Exporter) ExportSpan(span *trace.SpanData) {
sendBatch := e.saveBatch(span)
if sendBatch != nil {
go func() {
e.log("trace : Exporter : ExportSpan : Sending Batch[%d]", len(sendBatch))
if err := e.send(sendBatch); err != nil {
e.log("trace : Exporter : ExportSpan : ERROR : %v", err)
}
}()
}
}
// Saves the span data to the batch. If the batch should be sent,
// returns a batch to send.
func (e *Exporter) saveBatch(span *trace.SpanData) []*trace.SpanData {
var sendBatch []*trace.SpanData
e.mu.Lock()
{
// We want to append this new span to the collection.
e.batch = append(e.batch, span)
// Do we need to send the current batch?
switch {
case len(e.batch) == e.batchSize:
// We hit the batch size. Now save the current
// batch for sending and start a new batch.
sendBatch = e.batch
e.batch = make([]*trace.SpanData, 0, e.batchSize)
e.timer.Reset(e.sendInterval)
default:
// We did not hit the batch size but maybe send what
// we have based on time.
select {
case <-e.timer.C:
// The time has expired so save the current
// batch for sending and start a new batch.
sendBatch = e.batch
e.batch = make([]*trace.SpanData, 0, e.batchSize)
e.timer.Reset(e.sendInterval)
// It's not time yet, just move on.
default:
}
}
}
e.mu.Unlock()
return sendBatch
}
// send uses HTTP to send the data to the tracing sidecare for processing.
func (e *Exporter) send(sendBatch []*trace.SpanData) error {
data, err := json.Marshal(sendBatch)
if err != nil {
return err
}
req, err := http.NewRequest("POST", e.host, bytes.NewBuffer(data))
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(req.Context(), e.sendTimeout)
defer cancel()
req = req.WithContext(ctx)
ch := make(chan error)
go func() {
resp, err := e.client.Do(req)
if err != nil {
ch <- err
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
ch <- fmt.Errorf("error on call : status[%s]", resp.Status)
return
}
ch <- fmt.Errorf("error on call : status[%s] : %s", resp.Status, string(data))
return
}
ch <- nil
}()
return <-ch
}

View File

@ -1,278 +0,0 @@
package trace
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"time"
"go.opencensus.io/trace"
)
// Success and failure markers.
const (
success = "\u2713"
failed = "\u2717"
)
// inputSpans represents spans of data for the tests.
var inputSpans = []*trace.SpanData{
{Name: "span1"},
{Name: "span2"},
{Name: "span3"},
}
// inputSpansJSON represents a JSON representation of the span data.
var inputSpansJSON = `[{"TraceID":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"SpanID":[0,0,0,0,0,0,0,0],"TraceOptions":0,"ParentSpanID":[0,0,0,0,0,0,0,0],"SpanKind":0,"Name":"span1","StartTime":"0001-01-01T00:00:00Z","EndTime":"0001-01-01T00:00:00Z","Attributes":null,"Annotations":null,"MessageEvents":null,"Code":0,"Message":"","Links":null,"HasRemoteParent":false},{"TraceID":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"SpanID":[0,0,0,0,0,0,0,0],"TraceOptions":0,"ParentSpanID":[0,0,0,0,0,0,0,0],"SpanKind":0,"Name":"span2","StartTime":"0001-01-01T00:00:00Z","EndTime":"0001-01-01T00:00:00Z","Attributes":null,"Annotations":null,"MessageEvents":null,"Code":0,"Message":"","Links":null,"HasRemoteParent":false},{"TraceID":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"SpanID":[0,0,0,0,0,0,0,0],"TraceOptions":0,"ParentSpanID":[0,0,0,0,0,0,0,0],"SpanKind":0,"Name":"span3","StartTime":"0001-01-01T00:00:00Z","EndTime":"0001-01-01T00:00:00Z","Attributes":null,"Annotations":null,"MessageEvents":null,"Code":0,"Message":"","Links":null,"HasRemoteParent":false}]`
// =============================================================================
// logger is required to create an Exporter.
var logger = func(format string, v ...interface{}) {
log.Printf(format, v)
}
// MakeExporter abstracts the error handling aspects of creating an Exporter.
func makeExporter(host string, batchSize int, sendInterval, sendTimeout time.Duration) *Exporter {
exporter, err := NewExporter(logger, host, batchSize, sendInterval, sendTimeout)
if err != nil {
log.Fatalln("Unable to create exporter, ", err)
}
return exporter
}
// =============================================================================
var saveTests = []struct {
name string
e *Exporter
input []*trace.SpanData
output []*trace.SpanData
lastSaveDelay time.Duration // The delay before the last save. For testing intervals.
isInputMatchBatch bool // If the input should match the internal exporter collection after the last save.
isSendBatch bool // If the last save should return nil or batch data.
}{
{"NoSend", makeExporter("test", 10, time.Minute, time.Second), inputSpans, nil, time.Nanosecond, true, false},
{"SendOnBatchSize", makeExporter("test", 3, time.Minute, time.Second), inputSpans, inputSpans, time.Nanosecond, false, true},
{"SendOnTime", makeExporter("test", 4, time.Millisecond, time.Second), inputSpans, inputSpans, 2 * time.Millisecond, false, true},
}
// TestSave validates the save batch functionality is working.
func TestSave(t *testing.T) {
t.Log("Given the need to validate saving span data to a batch.")
{
for i, tt := range saveTests {
t.Logf("\tTest: %d\tWhen running test: %s", i, tt.name)
{
// Save the input of span data.
l := len(tt.input) - 1
var batch []*trace.SpanData
for i, span := range tt.input {
// If this is the last save, take the configured delay.
// We might be testing invertal based batching.
if l == i {
time.Sleep(tt.lastSaveDelay)
}
batch = tt.e.saveBatch(span)
}
// Compare the internal collection with what we saved.
if tt.isInputMatchBatch {
if len(tt.e.batch) != len(tt.input) {
t.Log("\t\tGot :", len(tt.e.batch))
t.Log("\t\tWant:", len(tt.input))
t.Errorf("\t%s\tShould have the same number of spans as input.", failed)
} else {
t.Logf("\t%s\tShould have the same number of spans as input.", success)
}
} else {
if len(tt.e.batch) != 0 {
t.Log("\t\tGot :", len(tt.e.batch))
t.Log("\t\tWant:", 0)
t.Errorf("\t%s\tShould have zero spans.", failed)
} else {
t.Logf("\t%s\tShould have zero spans.", success)
}
}
// Validate the return provided or didn't provide a batch to send.
if !tt.isSendBatch && batch != nil {
t.Errorf("\t%s\tShould not have a batch to send.", failed)
} else if !tt.isSendBatch {
t.Logf("\t%s\tShould not have a batch to send.", success)
}
if tt.isSendBatch && batch == nil {
t.Errorf("\t%s\tShould have a batch to send.", failed)
} else if tt.isSendBatch {
t.Logf("\t%s\tShould have a batch to send.", success)
}
// Compare the batch to send.
if !reflect.DeepEqual(tt.output, batch) {
t.Log("\t\tGot :", batch)
t.Log("\t\tWant:", tt.output)
t.Errorf("\t%s\tShould have an expected match of the batch to send.", failed)
} else {
t.Logf("\t%s\tShould have an expected match of the batch to send.", success)
}
}
}
}
}
// =============================================================================
var sendTests = []struct {
name string
e *Exporter
input []*trace.SpanData
pass bool
}{
{"success", makeExporter("test", 3, time.Minute, time.Hour), inputSpans, true},
{"failure", makeExporter("test", 3, time.Minute, time.Hour), inputSpans[:2], false},
{"timeout", makeExporter("test", 3, time.Minute, time.Nanosecond), inputSpans, false},
}
// mockServer returns a pointer to a server to handle the mock get call.
func mockServer() *httptest.Server {
f := func(w http.ResponseWriter, r *http.Request) {
d, _ := ioutil.ReadAll(r.Body)
data := string(d)
if data != inputSpansJSON {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprint(w, data)
return
}
w.WriteHeader(http.StatusNoContent)
}
return httptest.NewServer(http.HandlerFunc(f))
}
// TestSend validates spans can be sent to the sidecar.
func TestSend(t *testing.T) {
s := mockServer()
defer s.Close()
t.Log("Given the need to validate sending span data to the sidecar.")
{
for i, tt := range sendTests {
t.Logf("\tTest: %d\tWhen running test: %s", i, tt.name)
{
// Set the URL for the call.
tt.e.host = s.URL
// Send the span data.
err := tt.e.send(tt.input)
if tt.pass {
if err != nil {
t.Errorf("\t%s\tShould be able to send the batch successfully: %v", failed, err)
} else {
t.Logf("\t%s\tShould be able to send the batch successfully.", success)
}
} else {
if err == nil {
t.Errorf("\t%s\tShould not be able to send the batch successfully : %v", failed, err)
} else {
t.Logf("\t%s\tShould not be able to send the batch successfully.", success)
}
}
}
}
}
}
// TestClose validates the flushing of the final batched spans.
func TestClose(t *testing.T) {
s := mockServer()
defer s.Close()
t.Log("Given the need to validate flushing the remaining batched spans.")
{
t.Logf("\tTest: %d\tWhen running test: %s", 0, "FlushWithData")
{
e, err := NewExporter(logger, "test", 10, time.Minute, time.Hour)
if err != nil {
t.Fatalf("\t%s\tShould be able to create an Exporter : %v", failed, err)
}
t.Logf("\t%s\tShould be able to create an Exporter.", success)
// Set the URL for the call.
e.host = s.URL
// Save the input of span data.
for _, span := range inputSpans {
e.saveBatch(span)
}
// Close the Exporter and we should get those spans sent.
sent, err := e.Close()
if err != nil {
t.Fatalf("\t%s\tShould be able to flush the Exporter : %v", failed, err)
}
t.Logf("\t%s\tShould be able to flush the Exporter.", success)
if sent != len(inputSpans) {
t.Log("\t\tGot :", sent)
t.Log("\t\tWant:", len(inputSpans))
t.Fatalf("\t%s\tShould have flushed the expected number of spans.", failed)
}
t.Logf("\t%s\tShould have flushed the expected number of spans.", success)
}
t.Logf("\tTest: %d\tWhen running test: %s", 0, "FlushWithError")
{
e, err := NewExporter(logger, "test", 10, time.Minute, time.Hour)
if err != nil {
t.Fatalf("\t%s\tShould be able to create an Exporter : %v", failed, err)
}
t.Logf("\t%s\tShould be able to create an Exporter.", success)
// Set the URL for the call.
e.host = s.URL
// Save the input of span data.
for _, span := range inputSpans[:2] {
e.saveBatch(span)
}
// Close the Exporter and we should get those spans sent.
if _, err := e.Close(); err == nil {
t.Fatalf("\t%s\tShould not be able to flush the Exporter.", failed)
}
t.Logf("\t%s\tShould not be able to flush the Exporter.", success)
}
}
}
// =============================================================================
// TestExporterFailure validates misuse cases are covered.
func TestExporterFailure(t *testing.T) {
t.Log("Given the need to validate Exporter initializes properly.")
{
t.Logf("\tTest: %d\tWhen not passing a proper logger.", 0)
{
_, err := NewExporter(nil, "test", 10, time.Minute, time.Hour)
if err == nil {
t.Errorf("\t%s\tShould not be able to create an Exporter.", failed)
} else {
t.Logf("\t%s\tShould not be able to create an Exporter.", success)
}
}
t.Logf("\tTest: %d\tWhen not passing a proper host.", 1)
{
_, err := NewExporter(logger, "", 10, time.Minute, time.Hour)
if err == nil {
t.Errorf("\t%s\tShould not be able to create an Exporter.", failed)
} else {
t.Logf("\t%s\tShould not be able to create an Exporter.", success)
}
}
}
}

View File

@ -9,9 +9,6 @@ import (
"time"
"github.com/dimfeld/httptreemux"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
"go.opencensus.io/trace"
)
// ctxKey represents the type of value for the context key.
@ -22,7 +19,6 @@ const KeyValues ctxKey = 1
// Values represent state for each request.
type Values struct {
TraceID string
Now time.Time
StatusCode int
}
@ -36,7 +32,6 @@ type Handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, p
// data/logic on this App struct
type App struct {
*httptreemux.TreeMux
och *ochttp.Handler
shutdown chan os.Signal
log *log.Logger
mw []Middleware
@ -51,17 +46,6 @@ func NewApp(shutdown chan os.Signal, log *log.Logger, mw ...Middleware) *App {
mw: mw,
}
// Create an OpenCensus HTTP Handler which wraps the router. This will start
// the initial span and annotate it with information about the request/response.
//
// This is configured to use the W3C TraceContext standard to set the remote
// parent if an client request includes the appropriate headers.
// https://w3c.github.io/trace-context/
app.och = &ochttp.Handler{
Handler: app.TreeMux,
Propagation: &tracecontext.HTTPFormat{},
}
return &app
}
@ -84,16 +68,12 @@ func (a *App) Handle(verb, path string, handler Handler, mw ...Middleware) {
// The function to execute for each request.
h := func(w http.ResponseWriter, r *http.Request, params map[string]string) {
ctx, span := trace.StartSpan(r.Context(), "internal.platform.web")
defer span.End()
// Set the context with the required values to
// process the request.
v := Values{
TraceID: span.SpanContext().TraceID.String(),
Now: time.Now(),
}
ctx = context.WithValue(ctx, KeyValues, &v)
ctx := context.WithValue(r.Context(), KeyValues, &v)
// Call the wrapped handler functions.
if err := handler(ctx, w, r, params); err != nil {
@ -106,10 +86,3 @@ func (a *App) Handle(verb, path string, handler Handler, mw ...Middleware) {
// Add this handler for the specified verb and route.
a.TreeMux.Handle(verb, path, h)
}
// ServeHTTP implements the http.Handler interface. It overrides the ServeHTTP
// of the embedded TreeMux by using the ochttp.Handler instead. That Handler
// wraps the TreeMux handler so the routes are served.
func (a *App) ServeHTTP(w http.ResponseWriter, r *http.Request) {
a.och.ServeHTTP(w, r)
}

View File

@ -1,4 +0,0 @@
.DS_Store
bin

View File

@ -1,13 +0,0 @@
language: go
script:
- go vet ./...
- go test -v ./...
go:
- 1.3
- 1.4
- 1.5
- 1.6
- 1.7
- tip

View File

@ -1,8 +0,0 @@
Copyright (c) 2012 Dave Grijalva
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,97 +0,0 @@
## Migration Guide from v2 -> v3
Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
### `Token.Claims` is now an interface type
The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
The old example for parsing a token looked like this..
```go
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
}
```
is now directly mapped to...
```go
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
claims := token.Claims.(jwt.MapClaims)
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
}
```
`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
```go
type MyCustomClaims struct {
User string
*StandardClaims
}
if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
claims := token.Claims.(*MyCustomClaims)
fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
}
```
### `ParseFromRequest` has been moved
To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
This simple parsing example:
```go
if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
}
```
is directly mapped to:
```go
if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
claims := token.Claims.(jwt.MapClaims)
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
}
```
There are several concrete `Extractor` types provided for your convenience:
* `HeaderExtractor` will search a list of headers until one contains content.
* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
### RSA signing methods no longer accept `[]byte` keys
Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
```go
func keyLookupFunc(*Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
// Look up key
key, err := lookupPublicKey(token.Header["kid"])
if err != nil {
return nil, err
}
// Unpack key from PEM encoded PKCS8
return jwt.ParseRSAPublicKeyFromPEM(key)
}
```

View File

@ -1,100 +0,0 @@
# jwt-go
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go)
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
## What the heck is a JWT?
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
## What's in the box?
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
## Examples
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
## Extensions
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
## Compliance
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
## Project Status & Versioning
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
**BREAKING CHANGES:***
* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
## Usage Tips
### Signing vs Encryption
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
* The author of the token was in the possession of the signing secret
* The data has not been modified since it was signed
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
### Choosing a Signing Method
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
### Signing Methods and Key Types
Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
### JWT and OAuth
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
## More
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.

View File

@ -1,118 +0,0 @@
## `jwt-go` Version History
#### 3.2.0
* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
#### 3.1.0
* Improvements to `jwt` command line tool
* Added `SkipClaimsValidation` option to `Parser`
* Documentation updates
#### 3.0.0
* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
* Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
* Other Additions and Changes
* Added `Claims` interface type to allow users to decode the claims into a custom type
* Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
* Added several new, more specific, validation errors to error type bitmask
* Moved examples from README to executable example files
* Signing method registry is now thread safe
* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
#### 2.7.0
This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
* Error text for expired tokens includes how long it's been expired
* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
* Documentation updates
#### 2.6.0
* Exposed inner error within ValidationError
* Fixed validation errors when using UseJSONNumber flag
* Added several unit tests
#### 2.5.0
* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
* Updated/fixed some documentation
* Added more helpful error message when trying to parse tokens that begin with `BEARER `
#### 2.4.0
* Added new type, Parser, to allow for configuration of various parsing parameters
* You can now specify a list of valid signing methods. Anything outside this set will be rejected.
* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
* Fixed some bugs with ECDSA parsing
#### 2.3.0
* Added support for ECDSA signing methods
* Added support for RSA PSS signing methods (requires go v1.4)
#### 2.2.0
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
#### 2.1.0
Backwards compatible API change that was missed in 2.0.0.
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
#### 2.0.0
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
* **Compatibility Breaking Changes**
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
* `KeyFunc` now returns `interface{}` instead of `[]byte`
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
* Added public package global `SigningMethodHS256`
* Added public package global `SigningMethodHS384`
* Added public package global `SigningMethodHS512`
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
* Added public package global `SigningMethodRS256`
* Added public package global `SigningMethodRS384`
* Added public package global `SigningMethodRS512`
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
* Refactored the RSA implementation to be easier to read
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
#### 1.0.2
* Fixed bug in parsing public keys from certificates
* Added more tests around the parsing of keys for RS256
* Code refactoring in RS256 implementation. No functional changes
#### 1.0.1
* Fixed panic if RS256 signing method was passed an invalid key
#### 1.0.0
* First versioned release
* API stabilized
* Supports creating, signing, parsing, and validating JWT tokens
* Supports RS256 and HS256 signing methods

View File

@ -1,134 +0,0 @@
package jwt
import (
"crypto/subtle"
"fmt"
"time"
)
// For a type to be a Claims object, it must just have a Valid method that determines
// if the token is invalid for any supported reason
type Claims interface {
Valid() error
}
// Structured version of Claims Section, as referenced at
// https://tools.ietf.org/html/rfc7519#section-4.1
// See examples for how to use this with your own claim types
type StandardClaims struct {
Audience string `json:"aud,omitempty"`
ExpiresAt int64 `json:"exp,omitempty"`
Id string `json:"jti,omitempty"`
IssuedAt int64 `json:"iat,omitempty"`
Issuer string `json:"iss,omitempty"`
NotBefore int64 `json:"nbf,omitempty"`
Subject string `json:"sub,omitempty"`
}
// Validates time based claims "exp, iat, nbf".
// There is no accounting for clock skew.
// As well, if any of the above claims are not in the token, it will still
// be considered a valid claim.
func (c StandardClaims) Valid() error {
vErr := new(ValidationError)
now := TimeFunc().Unix()
// The claims below are optional, by default, so if they are set to the
// default value in Go, let's not fail the verification for them.
if c.VerifyExpiresAt(now, false) == false {
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
vErr.Errors |= ValidationErrorExpired
}
if c.VerifyIssuedAt(now, false) == false {
vErr.Inner = fmt.Errorf("Token used before issued")
vErr.Errors |= ValidationErrorIssuedAt
}
if c.VerifyNotBefore(now, false) == false {
vErr.Inner = fmt.Errorf("token is not valid yet")
vErr.Errors |= ValidationErrorNotValidYet
}
if vErr.valid() {
return nil
}
return vErr
}
// Compares the aud claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
return verifyAud(c.Audience, cmp, req)
}
// Compares the exp claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
return verifyExp(c.ExpiresAt, cmp, req)
}
// Compares the iat claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
return verifyIat(c.IssuedAt, cmp, req)
}
// Compares the iss claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
return verifyIss(c.Issuer, cmp, req)
}
// Compares the nbf claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
return verifyNbf(c.NotBefore, cmp, req)
}
// ----- helpers
func verifyAud(aud string, cmp string, required bool) bool {
if aud == "" {
return !required
}
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
return true
} else {
return false
}
}
func verifyExp(exp int64, now int64, required bool) bool {
if exp == 0 {
return !required
}
return now <= exp
}
func verifyIat(iat int64, now int64, required bool) bool {
if iat == 0 {
return !required
}
return now >= iat
}
func verifyIss(iss string, cmp string, required bool) bool {
if iss == "" {
return !required
}
if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
return true
} else {
return false
}
}
func verifyNbf(nbf int64, now int64, required bool) bool {
if nbf == 0 {
return !required
}
return now >= nbf
}

View File

@ -1,4 +0,0 @@
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
//
// See README.md for more info.
package jwt

View File

@ -1,148 +0,0 @@
package jwt
import (
"crypto"
"crypto/ecdsa"
"crypto/rand"
"errors"
"math/big"
)
var (
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
)
// Implements the ECDSA family of signing methods signing methods
// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
type SigningMethodECDSA struct {
Name string
Hash crypto.Hash
KeySize int
CurveBits int
}
// Specific instances for EC256 and company
var (
SigningMethodES256 *SigningMethodECDSA
SigningMethodES384 *SigningMethodECDSA
SigningMethodES512 *SigningMethodECDSA
)
func init() {
// ES256
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
return SigningMethodES256
})
// ES384
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
return SigningMethodES384
})
// ES512
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
return SigningMethodES512
})
}
func (m *SigningMethodECDSA) Alg() string {
return m.Name
}
// Implements the Verify method from SigningMethod
// For this verify method, key must be an ecdsa.PublicKey struct
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
var err error
// Decode the signature
var sig []byte
if sig, err = DecodeSegment(signature); err != nil {
return err
}
// Get the key
var ecdsaKey *ecdsa.PublicKey
switch k := key.(type) {
case *ecdsa.PublicKey:
ecdsaKey = k
default:
return ErrInvalidKeyType
}
if len(sig) != 2*m.KeySize {
return ErrECDSAVerification
}
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
// Create hasher
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Verify the signature
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
return nil
} else {
return ErrECDSAVerification
}
}
// Implements the Sign method from SigningMethod
// For this signing method, key must be an ecdsa.PrivateKey struct
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
// Get the key
var ecdsaKey *ecdsa.PrivateKey
switch k := key.(type) {
case *ecdsa.PrivateKey:
ecdsaKey = k
default:
return "", ErrInvalidKeyType
}
// Create the hasher
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Sign the string and return r, s
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
curveBits := ecdsaKey.Curve.Params().BitSize
if m.CurveBits != curveBits {
return "", ErrInvalidKey
}
keyBytes := curveBits / 8
if curveBits%8 > 0 {
keyBytes += 1
}
// We serialize the outpus (r and s) into big-endian byte arrays and pad
// them with zeros on the left to make sure the sizes work out. Both arrays
// must be keyBytes long, and the output must be 2*keyBytes long.
rBytes := r.Bytes()
rBytesPadded := make([]byte, keyBytes)
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
sBytes := s.Bytes()
sBytesPadded := make([]byte, keyBytes)
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
out := append(rBytesPadded, sBytesPadded...)
return EncodeSegment(out), nil
} else {
return "", err
}
}

View File

@ -1,67 +0,0 @@
package jwt
import (
"crypto/ecdsa"
"crypto/x509"
"encoding/pem"
"errors"
)
var (
ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
)
// Parse PEM encoded Elliptic Curve Private Key Structure
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
return nil, err
}
var pkey *ecdsa.PrivateKey
var ok bool
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
return nil, ErrNotECPrivateKey
}
return pkey, nil
}
// Parse PEM encoded PKCS1 or PKCS8 public key
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
var pkey *ecdsa.PublicKey
var ok bool
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
return nil, ErrNotECPublicKey
}
return pkey, nil
}

View File

@ -1,59 +0,0 @@
package jwt
import (
"errors"
)
// Error constants
var (
ErrInvalidKey = errors.New("key is invalid")
ErrInvalidKeyType = errors.New("key is of invalid type")
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
)
// The errors that might occur when parsing and validating a token
const (
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
ValidationErrorUnverifiable // Token could not be verified because of signing problems
ValidationErrorSignatureInvalid // Signature validation failed
// Standard Claim validation errors
ValidationErrorAudience // AUD validation failed
ValidationErrorExpired // EXP validation failed
ValidationErrorIssuedAt // IAT validation failed
ValidationErrorIssuer // ISS validation failed
ValidationErrorNotValidYet // NBF validation failed
ValidationErrorId // JTI validation failed
ValidationErrorClaimsInvalid // Generic claims validation error
)
// Helper for constructing a ValidationError with a string error message
func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
return &ValidationError{
text: errorText,
Errors: errorFlags,
}
}
// The error from Parse if token is not valid
type ValidationError struct {
Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
Errors uint32 // bitfield. see ValidationError... constants
text string // errors that do not have a valid error just have text
}
// Validation error is an error type
func (e ValidationError) Error() string {
if e.Inner != nil {
return e.Inner.Error()
} else if e.text != "" {
return e.text
} else {
return "token is invalid"
}
}
// No errors
func (e *ValidationError) valid() bool {
return e.Errors == 0
}

View File

@ -1,95 +0,0 @@
package jwt
import (
"crypto"
"crypto/hmac"
"errors"
)
// Implements the HMAC-SHA family of signing methods signing methods
// Expects key type of []byte for both signing and validation
type SigningMethodHMAC struct {
Name string
Hash crypto.Hash
}
// Specific instances for HS256 and company
var (
SigningMethodHS256 *SigningMethodHMAC
SigningMethodHS384 *SigningMethodHMAC
SigningMethodHS512 *SigningMethodHMAC
ErrSignatureInvalid = errors.New("signature is invalid")
)
func init() {
// HS256
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
return SigningMethodHS256
})
// HS384
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
return SigningMethodHS384
})
// HS512
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
return SigningMethodHS512
})
}
func (m *SigningMethodHMAC) Alg() string {
return m.Name
}
// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
// Verify the key is the right type
keyBytes, ok := key.([]byte)
if !ok {
return ErrInvalidKeyType
}
// Decode signature, for comparison
sig, err := DecodeSegment(signature)
if err != nil {
return err
}
// Can we use the specified hashing method?
if !m.Hash.Available() {
return ErrHashUnavailable
}
// This signing method is symmetric, so we validate the signature
// by reproducing the signature from the signing string and key, then
// comparing that against the provided signature.
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
if !hmac.Equal(sig, hasher.Sum(nil)) {
return ErrSignatureInvalid
}
// No validation errors. Signature is good.
return nil
}
// Implements the Sign method from SigningMethod for this signing method.
// Key must be []byte
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
if keyBytes, ok := key.([]byte); ok {
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
return EncodeSegment(hasher.Sum(nil)), nil
}
return "", ErrInvalidKeyType
}

View File

@ -1,94 +0,0 @@
package jwt
import (
"encoding/json"
"errors"
// "fmt"
)
// Claims type that uses the map[string]interface{} for JSON decoding
// This is the default claims type if you don't supply one
type MapClaims map[string]interface{}
// Compares the aud claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
aud, _ := m["aud"].(string)
return verifyAud(aud, cmp, req)
}
// Compares the exp claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
switch exp := m["exp"].(type) {
case float64:
return verifyExp(int64(exp), cmp, req)
case json.Number:
v, _ := exp.Int64()
return verifyExp(v, cmp, req)
}
return req == false
}
// Compares the iat claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
switch iat := m["iat"].(type) {
case float64:
return verifyIat(int64(iat), cmp, req)
case json.Number:
v, _ := iat.Int64()
return verifyIat(v, cmp, req)
}
return req == false
}
// Compares the iss claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
iss, _ := m["iss"].(string)
return verifyIss(iss, cmp, req)
}
// Compares the nbf claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
switch nbf := m["nbf"].(type) {
case float64:
return verifyNbf(int64(nbf), cmp, req)
case json.Number:
v, _ := nbf.Int64()
return verifyNbf(v, cmp, req)
}
return req == false
}
// Validates time based claims "exp, iat, nbf".
// There is no accounting for clock skew.
// As well, if any of the above claims are not in the token, it will still
// be considered a valid claim.
func (m MapClaims) Valid() error {
vErr := new(ValidationError)
now := TimeFunc().Unix()
if m.VerifyExpiresAt(now, false) == false {
vErr.Inner = errors.New("Token is expired")
vErr.Errors |= ValidationErrorExpired
}
if m.VerifyIssuedAt(now, false) == false {
vErr.Inner = errors.New("Token used before issued")
vErr.Errors |= ValidationErrorIssuedAt
}
if m.VerifyNotBefore(now, false) == false {
vErr.Inner = errors.New("Token is not valid yet")
vErr.Errors |= ValidationErrorNotValidYet
}
if vErr.valid() {
return nil
}
return vErr
}

View File

@ -1,52 +0,0 @@
package jwt
// Implements the none signing method. This is required by the spec
// but you probably should never use it.
var SigningMethodNone *signingMethodNone
const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
var NoneSignatureTypeDisallowedError error
type signingMethodNone struct{}
type unsafeNoneMagicConstant string
func init() {
SigningMethodNone = &signingMethodNone{}
NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
return SigningMethodNone
})
}
func (m *signingMethodNone) Alg() string {
return "none"
}
// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
// accepting 'none' signing method
if _, ok := key.(unsafeNoneMagicConstant); !ok {
return NoneSignatureTypeDisallowedError
}
// If signing method is none, signature must be an empty string
if signature != "" {
return NewValidationError(
"'none' signing method with non-empty signature",
ValidationErrorSignatureInvalid,
)
}
// Accept 'none' signing method.
return nil
}
// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
if _, ok := key.(unsafeNoneMagicConstant); ok {
return "", nil
}
return "", NoneSignatureTypeDisallowedError
}

View File

@ -1,148 +0,0 @@
package jwt
import (
"bytes"
"encoding/json"
"fmt"
"strings"
)
type Parser struct {
ValidMethods []string // If populated, only these methods will be considered valid
UseJSONNumber bool // Use JSON Number format in JSON decoder
SkipClaimsValidation bool // Skip claims validation during token parsing
}
// Parse, validate, and return a token.
// keyFunc will receive the parsed token and should return the key for validating.
// If everything is kosher, err will be nil
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
}
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
token, parts, err := p.ParseUnverified(tokenString, claims)
if err != nil {
return token, err
}
// Verify signing method is in the required set
if p.ValidMethods != nil {
var signingMethodValid = false
var alg = token.Method.Alg()
for _, m := range p.ValidMethods {
if m == alg {
signingMethodValid = true
break
}
}
if !signingMethodValid {
// signing method is not in the listed set
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
}
}
// Lookup key
var key interface{}
if keyFunc == nil {
// keyFunc was not provided. short circuiting validation
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
}
if key, err = keyFunc(token); err != nil {
// keyFunc returned an error
if ve, ok := err.(*ValidationError); ok {
return token, ve
}
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
}
vErr := &ValidationError{}
// Validate Claims
if !p.SkipClaimsValidation {
if err := token.Claims.Valid(); err != nil {
// If the Claims Valid returned an error, check if it is a validation error,
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
if e, ok := err.(*ValidationError); !ok {
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
} else {
vErr = e
}
}
}
// Perform validation
token.Signature = parts[2]
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
vErr.Inner = err
vErr.Errors |= ValidationErrorSignatureInvalid
}
if vErr.valid() {
token.Valid = true
return token, nil
}
return token, vErr
}
// WARNING: Don't use this method unless you know what you're doing
//
// This method parses the token but doesn't validate the signature. It's only
// ever useful in cases where you know the signature is valid (because it has
// been checked previously in the stack) and you want to extract values from
// it.
func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
parts = strings.Split(tokenString, ".")
if len(parts) != 3 {
return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
}
token = &Token{Raw: tokenString}
// parse Header
var headerBytes []byte
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
}
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
}
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
}
// parse Claims
var claimBytes []byte
token.Claims = claims
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
}
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
if p.UseJSONNumber {
dec.UseNumber()
}
// JSON Decode. Special case for map type to avoid weird pointer behavior
if c, ok := token.Claims.(MapClaims); ok {
err = dec.Decode(&c)
} else {
err = dec.Decode(&claims)
}
// Handle decode error
if err != nil {
return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
}
// Lookup signature method
if method, ok := token.Header["alg"].(string); ok {
if token.Method = GetSigningMethod(method); token.Method == nil {
return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
}
} else {
return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
}
return token, parts, nil
}

View File

@ -1,101 +0,0 @@
package jwt
import (
"crypto"
"crypto/rand"
"crypto/rsa"
)
// Implements the RSA family of signing methods signing methods
// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
type SigningMethodRSA struct {
Name string
Hash crypto.Hash
}
// Specific instances for RS256 and company
var (
SigningMethodRS256 *SigningMethodRSA
SigningMethodRS384 *SigningMethodRSA
SigningMethodRS512 *SigningMethodRSA
)
func init() {
// RS256
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
return SigningMethodRS256
})
// RS384
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
return SigningMethodRS384
})
// RS512
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
return SigningMethodRS512
})
}
func (m *SigningMethodRSA) Alg() string {
return m.Name
}
// Implements the Verify method from SigningMethod
// For this signing method, must be an *rsa.PublicKey structure.
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
var err error
// Decode the signature
var sig []byte
if sig, err = DecodeSegment(signature); err != nil {
return err
}
var rsaKey *rsa.PublicKey
var ok bool
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
return ErrInvalidKeyType
}
// Create hasher
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Verify the signature
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
}
// Implements the Sign method from SigningMethod
// For this signing method, must be an *rsa.PrivateKey structure.
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
var ok bool
// Validate type of key
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
return "", ErrInvalidKey
}
// Create the hasher
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
return EncodeSegment(sigBytes), nil
} else {
return "", err
}
}

View File

@ -1,126 +0,0 @@
// +build go1.4
package jwt
import (
"crypto"
"crypto/rand"
"crypto/rsa"
)
// Implements the RSAPSS family of signing methods signing methods
type SigningMethodRSAPSS struct {
*SigningMethodRSA
Options *rsa.PSSOptions
}
// Specific instances for RS/PS and company
var (
SigningMethodPS256 *SigningMethodRSAPSS
SigningMethodPS384 *SigningMethodRSAPSS
SigningMethodPS512 *SigningMethodRSAPSS
)
func init() {
// PS256
SigningMethodPS256 = &SigningMethodRSAPSS{
&SigningMethodRSA{
Name: "PS256",
Hash: crypto.SHA256,
},
&rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA256,
},
}
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
return SigningMethodPS256
})
// PS384
SigningMethodPS384 = &SigningMethodRSAPSS{
&SigningMethodRSA{
Name: "PS384",
Hash: crypto.SHA384,
},
&rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA384,
},
}
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
return SigningMethodPS384
})
// PS512
SigningMethodPS512 = &SigningMethodRSAPSS{
&SigningMethodRSA{
Name: "PS512",
Hash: crypto.SHA512,
},
&rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA512,
},
}
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
return SigningMethodPS512
})
}
// Implements the Verify method from SigningMethod
// For this verify method, key must be an rsa.PublicKey struct
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
var err error
// Decode the signature
var sig []byte
if sig, err = DecodeSegment(signature); err != nil {
return err
}
var rsaKey *rsa.PublicKey
switch k := key.(type) {
case *rsa.PublicKey:
rsaKey = k
default:
return ErrInvalidKey
}
// Create hasher
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
}
// Implements the Sign method from SigningMethod
// For this signing method, key must be an rsa.PrivateKey struct
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
switch k := key.(type) {
case *rsa.PrivateKey:
rsaKey = k
default:
return "", ErrInvalidKeyType
}
// Create the hasher
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
return EncodeSegment(sigBytes), nil
} else {
return "", err
}
}

View File

@ -1,101 +0,0 @@
package jwt
import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
)
var (
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
)
// Parse PEM encoded PKCS1 or PKCS8 private key
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
var parsedKey interface{}
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
return nil, err
}
}
var pkey *rsa.PrivateKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
return nil, ErrNotRSAPrivateKey
}
return pkey, nil
}
// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
var parsedKey interface{}
var blockDecrypted []byte
if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
return nil, err
}
if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
return nil, err
}
}
var pkey *rsa.PrivateKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
return nil, ErrNotRSAPrivateKey
}
return pkey, nil
}
// Parse PEM encoded PKCS1 or PKCS8 public key
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
var pkey *rsa.PublicKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
return nil, ErrNotRSAPublicKey
}
return pkey, nil
}

View File

@ -1,35 +0,0 @@
package jwt
import (
"sync"
)
var signingMethods = map[string]func() SigningMethod{}
var signingMethodLock = new(sync.RWMutex)
// Implement SigningMethod to add new methods for signing or verifying tokens.
type SigningMethod interface {
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
Alg() string // returns the alg identifier for this method (example: 'HS256')
}
// Register the "alg" name and a factory function for signing method.
// This is typically done during init() in the method's implementation
func RegisterSigningMethod(alg string, f func() SigningMethod) {
signingMethodLock.Lock()
defer signingMethodLock.Unlock()
signingMethods[alg] = f
}
// Get a signing method from an "alg" string
func GetSigningMethod(alg string) (method SigningMethod) {
signingMethodLock.RLock()
defer signingMethodLock.RUnlock()
if methodF, ok := signingMethods[alg]; ok {
method = methodF()
}
return
}

View File

@ -1,108 +0,0 @@
package jwt
import (
"encoding/base64"
"encoding/json"
"strings"
"time"
)
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
// You can override it to use another time value. This is useful for testing or if your
// server uses a different time zone than your tokens.
var TimeFunc = time.Now
// Parse methods use this callback function to supply
// the key for verification. The function receives the parsed,
// but unverified Token. This allows you to use properties in the
// Header of the token (such as `kid`) to identify which key to use.
type Keyfunc func(*Token) (interface{}, error)
// A JWT Token. Different fields will be used depending on whether you're
// creating or parsing/verifying a token.
type Token struct {
Raw string // The raw token. Populated when you Parse a token
Method SigningMethod // The signing method used or to be used
Header map[string]interface{} // The first segment of the token
Claims Claims // The second segment of the token
Signature string // The third segment of the token. Populated when you Parse a token
Valid bool // Is the token valid? Populated when you Parse/Verify a token
}
// Create a new Token. Takes a signing method
func New(method SigningMethod) *Token {
return NewWithClaims(method, MapClaims{})
}
func NewWithClaims(method SigningMethod, claims Claims) *Token {
return &Token{
Header: map[string]interface{}{
"typ": "JWT",
"alg": method.Alg(),
},
Claims: claims,
Method: method,
}
}
// Get the complete, signed token
func (t *Token) SignedString(key interface{}) (string, error) {
var sig, sstr string
var err error
if sstr, err = t.SigningString(); err != nil {
return "", err
}
if sig, err = t.Method.Sign(sstr, key); err != nil {
return "", err
}
return strings.Join([]string{sstr, sig}, "."), nil
}
// Generate the signing string. This is the
// most expensive part of the whole deal. Unless you
// need this for something special, just go straight for
// the SignedString.
func (t *Token) SigningString() (string, error) {
var err error
parts := make([]string, 2)
for i, _ := range parts {
var jsonValue []byte
if i == 0 {
if jsonValue, err = json.Marshal(t.Header); err != nil {
return "", err
}
} else {
if jsonValue, err = json.Marshal(t.Claims); err != nil {
return "", err
}
}
parts[i] = EncodeSegment(jsonValue)
}
return strings.Join(parts, "."), nil
}
// Parse, validate, and return a token.
// keyFunc will receive the parsed token and should return the key for validating.
// If everything is kosher, err will be nil
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
return new(Parser).Parse(tokenString, keyFunc)
}
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
}
// Encode JWT specific base64url encoding with padding stripped
func EncodeSegment(seg []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
}
// Decode JWT specific base64url encoding with padding stripped
func DecodeSegment(seg string) ([]byte, error) {
if l := len(seg) % 4; l > 0 {
seg += strings.Repeat("=", 4-l)
}
return base64.URLEncoding.DecodeString(seg)
}

View File

@ -1,23 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@ -1,14 +0,0 @@
language: go
gobuild_args: "-v -race"
go:
- 1.5
- 1.6
- 1.7
- 1.8
- 1.9
- tip
matrix:
allow_failures:
- go: tip

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014,2015 Daniel Imfeld
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,240 +0,0 @@
httptreemux [![Build Status](https://travis-ci.org/dimfeld/httptreemux.png?branch=master)](https://travis-ci.org/dimfeld/httptreemux) [![GoDoc](https://godoc.org/github.com/dimfeld/httptreemux?status.svg)](https://godoc.org/github.com/dimfeld/httptreemux)
===========
High-speed, flexible, tree-based HTTP router for Go.
This is inspired by [Julien Schmidt's httprouter](https://www.github.com/julienschmidt/httprouter), in that it uses a patricia tree, but the implementation is rather different. Specifically, the routing rules are relaxed so that a single path segment may be a wildcard in one route and a static token in another. This gives a nice combination of high performance with a lot of convenience in designing the routing patterns. In [benchmarks](https://github.com/julienschmidt/go-http-routing-benchmark), httptreemux is close to, but slightly slower than, httprouter.
Release notes may be found using the [Github releases tab](https://github.com/dimfeld/httptreemux/releases). Version numbers are compatible with the [Semantic Versioning 2.0.0](http://semver.org/) convention, and a new release is made after every change to the code.
## Why?
There are a lot of good routers out there. But looking at the ones that were really lightweight, I couldn't quite get something that fit with the route patterns I wanted. The code itself is simple enough, so I spent an evening writing this.
## Handler
The handler is a simple function with the prototype `func(w http.ResponseWriter, r *http.Request, params map[string]string)`. The params argument contains the parameters parsed from wildcards and catch-alls in the URL, as described below. This type is aliased as httptreemux.HandlerFunc.
### Using http.HandlerFunc
Due to the inclusion of the [context](https://godoc.org/context) package as of Go 1.7, `httptreemux` now supports handlers of type [http.HandlerFunc](https://godoc.org/net/http#HandlerFunc). There are two ways to enable this support.
#### Adapting an Existing Router
The `UsingContext` method will wrap the router or group in a new group at the same path, but adapted for use with `context` and `http.HandlerFunc`.
```go
router := httptreemux.New()
group := router.NewGroup("/api")
group.GET("/v1/:id", func(w http.ResponseWriter, r *http.Request, params map[string]string) {
id := params["id"]
fmt.Fprintf(w, "GET /api/v1/%s", id)
})
// UsingContext returns a version of the router or group with context support.
ctxGroup := group.UsingContext() // sibling to 'group' node in tree
ctxGroup.GET("/v2/:id", func(w http.ResponseWriter, r *http.Request) {
params := httptreemux.ContextParams(r.Context())
id := params["id"]
fmt.Fprintf(w, "GET /api/v2/%s", id)
})
http.ListenAndServe(":8080", router)
```
#### New Router with Context Support
The `NewContextMux` function returns a router preconfigured for use with `context` and `http.HandlerFunc`.
```go
router := httptreemux.NewContextMux()
router.GET("/:page", func(w http.ResponseWriter, r *http.Request) {
params := httptreemux.ContextParams(r.Context())
fmt.Fprintf(w, "GET /%s", params["page"])
})
group := tree.NewGroup("/api")
group.GET("/v1/:id", func(w http.ResponseWriter, r *http.Request) {
params := httptreemux.ContextParams(r.Context())
id := params["id"]
fmt.Fprintf(w, "GET /api/v1/%s", id)
})
http.ListenAndServe(":8080", router)
```
## Routing Rules
The syntax here is also modeled after httprouter. Each variable in a path may match on one segment only, except for an optional catch-all variable at the end of the URL.
Some examples of valid URL patterns are:
* `/post/all`
* `/post/:postid`
* `/post/:postid/page/:page`
* `/post/:postid/:page`
* `/images/*path`
* `/favicon.ico`
* `/:year/:month/`
* `/:year/:month/:post`
* `/:page`
Note that all of the above URL patterns may exist concurrently in the router.
Path elements starting with `:` indicate a wildcard in the path. A wildcard will only match on a single path segment. That is, the pattern `/post/:postid` will match on `/post/1` or `/post/1/`, but not `/post/1/2`.
A path element starting with `*` is a catch-all, whose value will be a string containing all text in the URL matched by the wildcards. For example, with a pattern of `/images/*path` and a requested URL `images/abc/def`, path would contain `abc/def`.
#### Using : and * in routing patterns
The characters `:` and `*` can be used at the beginning of a path segment by escaping them with a backslash. A double backslash at the beginning of a segment is interpreted as a single backslash. These escapes are only checked at the very beginning of a path segment; they are not necessary or processed elsewhere in a token.
```go
router.GET("/foo/\\*starToken", handler) // matches /foo/*starToken
router.GET("/foo/star*inTheMiddle", handler) // matches /foo/star*inTheMiddle
router.GET("/foo/starBackslash\\*", handler) // matches /foo/starBackslash\*
router.GET("/foo/\\\\*backslashWithStar") // matches /foo/\*backslashWithStar
```
### Routing Groups
Lets you create a new group of routes with a given path prefix. Makes it easier to create clusters of paths like:
* `/api/v1/foo`
* `/api/v1/bar`
To use this you do:
```go
router = httptreemux.New()
api := router.NewGroup("/api/v1")
api.GET("/foo", fooHandler) // becomes /api/v1/foo
api.GET("/bar", barHandler) // becomes /api/v1/bar
```
### Routing Priority
The priority rules in the router are simple.
1. Static path segments take the highest priority. If a segment and its subtree are able to match the URL, that match is returned.
2. Wildcards take second priority. For a particular wildcard to match, that wildcard and its subtree must match the URL.
3. Finally, a catch-all rule will match when the earlier path segments have matched, and none of the static or wildcard conditions have matched. Catch-all rules must be at the end of a pattern.
So with the following patterns adapted from [simpleblog](https://www.github.com/dimfeld/simpleblog), we'll see certain matches:
```go
router = httptreemux.New()
router.GET("/:page", pageHandler)
router.GET("/:year/:month/:post", postHandler)
router.GET("/:year/:month", archiveHandler)
router.GET("/images/*path", staticHandler)
router.GET("/favicon.ico", staticHandler)
```
#### Example scenarios
- `/abc` will match `/:page`
- `/2014/05` will match `/:year/:month`
- `/2014/05/really-great-blog-post` will match `/:year/:month/:post`
- `/images/CoolImage.gif` will match `/images/*path`
- `/images/2014/05/MayImage.jpg` will also match `/images/*path`, with all the text after `/images` stored in the variable path.
- `/favicon.ico` will match `/favicon.ico`
### Special Method Behavior
If TreeMux.HeadCanUseGet is set to true, the router will call the GET handler for a pattern when a HEAD request is processed, if no HEAD handler has been added for that pattern. This behavior is enabled by default.
Go's http.ServeContent and related functions already handle the HEAD method correctly by sending only the header, so in most cases your handlers will not need any special cases for it.
By default TreeMux.OptionsHandler is a null handler that doesn't affect your routing. If you set the handler, it will be called on OPTIONS requests to a path already registered by another method. If you set a path specific handler by using `router.OPTIONS`, it will override the global Options Handler for that path.
### Trailing Slashes
The router has special handling for paths with trailing slashes. If a pattern is added to the router with a trailing slash, any matches on that pattern without a trailing slash will be redirected to the version with the slash. If a pattern does not have a trailing slash, matches on that pattern with a trailing slash will be redirected to the version without.
The trailing slash flag is only stored once for a pattern. That is, if a pattern is added for a method with a trailing slash, all other methods for that pattern will also be considered to have a trailing slash, regardless of whether or not it is specified for those methods too.
However this behavior can be turned off by setting TreeMux.RedirectTrailingSlash to false. By default it is set to true.
One exception to this rule is catch-all patterns. By default, trailing slash redirection is disabled on catch-all patterns, since the structure of the entire URL and the desired patterns can not be predicted. If trailing slash removal is desired on catch-all patterns, set TreeMux.RemoveCatchAllTrailingSlash to true.
```go
router = httptreemux.New()
router.GET("/about", pageHandler)
router.GET("/posts/", postIndexHandler)
router.POST("/posts", postFormHandler)
GET /about will match normally.
GET /about/ will redirect to /about.
GET /posts will redirect to /posts/.
GET /posts/ will match normally.
POST /posts will redirect to /posts/, because the GET method used a trailing slash.
```
### Custom Redirects
RedirectBehavior sets the behavior when the router redirects the request to the canonical version of the requested URL using RedirectTrailingSlash or RedirectClean. The default behavior is to return a 301 status, redirecting the browser to the version of the URL that matches the given pattern.
These are the values accepted for RedirectBehavior. You may also add these values to the RedirectMethodBehavior map to define custom per-method redirect behavior.
* Redirect301 - HTTP 301 Moved Permanently; this is the default.
* Redirect307 - HTTP/1.1 Temporary Redirect
* Redirect308 - RFC7538 Permanent Redirect
* UseHandler - Don't redirect to the canonical path. Just call the handler instead.
#### Rationale/Usage
On a POST request, most browsers that receive a 301 will submit a GET request to the redirected URL, meaning that any data will likely be lost. If you want to handle and avoid this behavior, you may use Redirect307, which causes most browsers to resubmit the request using the original method and request body.
Since 307 is supposed to be a temporary redirect, the new 308 status code has been proposed, which is treated the same, except it indicates correctly that the redirection is permanent. The big caveat here is that the RFC is relatively recent, and older or non-compliant browsers will not handle it. Therefore its use is not recommended unless you really know what you're doing.
Finally, the UseHandler value will simply call the handler function for the pattern, without redirecting to the canonical version of the URL.
### RequestURI vs. URL.Path
#### Escaped Slashes
Go automatically processes escaped characters in a URL, converting + to a space and %XX to the corresponding character. This can present issues when the URL contains a %2f, which is unescaped to '/'. This isn't an issue for most applications, but it will prevent the router from correctly matching paths and wildcards.
For example, the pattern `/post/:post` would not match on `/post/abc%2fdef`, which is unescaped to `/post/abc/def`. The desired behavior is that it matches, and the `post` wildcard is set to `abc/def`.
Therefore, this router defaults to using the raw URL, stored in the Request.RequestURI variable. Matching wildcards and catch-alls are then unescaped, to give the desired behavior.
TL;DR: If a requested URL contains a %2f, this router will still do the right thing. Some Go HTTP routers may not due to [Go issue 3659](https://code.google.com/p/go/issues/detail?id=3659).
#### Escaped Characters
As mentioned above, characters in the URL are not unescaped when using RequestURI to determine the matched route. If this is a problem for you and you are unable to switch to URL.Path for the above reasons, you may set `router.EscapeAddedRoutes` to `true`. This option will run each added route through the `URL.EscapedPath` function, and add an additional route if the escaped version differs.
#### http Package Utility Functions
Although using RequestURI avoids the issue described above, certain utility functions such as `http.StripPrefix` modify URL.Path, and expect that the underlying router is using that field to make its decision. If you are using some of these functions, set the router's `PathSource` member to `URLPath`. This will give up the proper handling of escaped slashes described above, while allowing the router to work properly with these utility functions.
## Concurrency
The router contains an `RWMutex` that arbitrates access to the tree. This allows routes to be safely added from multiple goroutines at once.
No concurrency controls are needed when only reading from the tree, so the default behavior is to not use the `RWMutex` when serving a request. This avoids a theoretical slowdown under high-usage scenarios from competing atomic integer operations inside the `RWMutex`. If your application adds routes to the router after it has begun serving requests, you should avoid potential race conditions by setting `router.SafeAddRoutesWhileRunning` to `true` to use the `RWMutex` when serving requests.
## Error Handlers
### NotFoundHandler
TreeMux.NotFoundHandler can be set to provide custom 404-error handling. The default implementation is Go's `http.NotFound` function.
### MethodNotAllowedHandler
If a pattern matches, but the pattern does not have an associated handler for the requested method, the router calls the MethodNotAllowedHandler. The default
version of this handler just writes the status code `http.StatusMethodNotAllowed` and sets the response header's `Allowed` field appropriately.
### Panic Handling
TreeMux.PanicHandler can be set to provide custom panic handling. The `SimplePanicHandler` just writes the status code `http.StatusInternalServerError`. The function `ShowErrorsPanicHandler`, adapted from [gocraft/web](https://github.com/gocraft/web), will print panic errors to the browser in an easily-readable format.
## Unexpected Differences from Other Routers
This router is intentionally light on features in the name of simplicity and
performance. When coming from another router that does heavier processing behind
the scenes, you may encounter some unexpected behavior. This list is by no means
exhaustive, but covers some nonobvious cases that users have encountered.
### gorilla/pat query string modifications
When matching on parameters in a route, the `gorilla/pat` router will modify
`Request.URL.RawQuery` to make it appear like the parameters were in the
query string. `httptreemux` does not do this. See [Issue #26](https://github.com/dimfeld/httptreemux/issues/26) for more details and a
code snippet that can perform this transformation for you, should you want it.
## Middleware
This package provides no middleware. But there are a lot of great options out there and it's pretty easy to write your own.
# Acknowledgements
* Inspiration from Julien Schmidt's [httprouter](https://github.com/julienschmidt/httprouter)
* Show Errors panic handler from [gocraft/web](https://github.com/gocraft/web)

View File

@ -1,123 +0,0 @@
// +build go1.7
package httptreemux
import (
"context"
"net/http"
)
// ContextGroup is a wrapper around Group, with the purpose of mimicking its API, but with the use of http.HandlerFunc-based handlers.
// Instead of passing a parameter map via the handler (i.e. httptreemux.HandlerFunc), the path parameters are accessed via the request
// object's context.
type ContextGroup struct {
group *Group
}
// UsingContext wraps the receiver to return a new instance of a ContextGroup.
// The returned ContextGroup is a sibling to its wrapped Group, within the parent TreeMux.
// The choice of using a *Group as the receiver, as opposed to a function parameter, allows chaining
// while method calls between a TreeMux, Group, and ContextGroup. For example:
//
// tree := httptreemux.New()
// group := tree.NewGroup("/api")
//
// group.GET("/v1", func(w http.ResponseWriter, r *http.Request, params map[string]string) {
// w.Write([]byte(`GET /api/v1`))
// })
//
// group.UsingContext().GET("/v2", func(w http.ResponseWriter, r *http.Request) {
// w.Write([]byte(`GET /api/v2`))
// })
//
// http.ListenAndServe(":8080", tree)
//
func (g *Group) UsingContext() *ContextGroup {
return &ContextGroup{g}
}
// NewContextGroup adds a child context group to its path.
func (cg *ContextGroup) NewContextGroup(path string) *ContextGroup {
return &ContextGroup{cg.group.NewGroup(path)}
}
func (cg *ContextGroup) NewGroup(path string) *ContextGroup {
return cg.NewContextGroup(path)
}
// Handle allows handling HTTP requests via an http.HandlerFunc, as opposed to an httptreemux.HandlerFunc.
// Any parameters from the request URL are stored in a map[string]string in the request's context.
func (cg *ContextGroup) Handle(method, path string, handler http.HandlerFunc) {
cg.group.Handle(method, path, func(w http.ResponseWriter, r *http.Request, params map[string]string) {
if params != nil {
r = r.WithContext(AddParamsToContext(r.Context(), params))
}
handler(w, r)
})
}
// Handler allows handling HTTP requests via an http.Handler interface, as opposed to an httptreemux.HandlerFunc.
// Any parameters from the request URL are stored in a map[string]string in the request's context.
func (cg *ContextGroup) Handler(method, path string, handler http.Handler) {
cg.group.Handle(method, path, func(w http.ResponseWriter, r *http.Request, params map[string]string) {
if params != nil {
r = r.WithContext(AddParamsToContext(r.Context(), params))
}
handler.ServeHTTP(w, r)
})
}
// GET is convenience method for handling GET requests on a context group.
func (cg *ContextGroup) GET(path string, handler http.HandlerFunc) {
cg.Handle("GET", path, handler)
}
// POST is convenience method for handling POST requests on a context group.
func (cg *ContextGroup) POST(path string, handler http.HandlerFunc) {
cg.Handle("POST", path, handler)
}
// PUT is convenience method for handling PUT requests on a context group.
func (cg *ContextGroup) PUT(path string, handler http.HandlerFunc) {
cg.Handle("PUT", path, handler)
}
// DELETE is convenience method for handling DELETE requests on a context group.
func (cg *ContextGroup) DELETE(path string, handler http.HandlerFunc) {
cg.Handle("DELETE", path, handler)
}
// PATCH is convenience method for handling PATCH requests on a context group.
func (cg *ContextGroup) PATCH(path string, handler http.HandlerFunc) {
cg.Handle("PATCH", path, handler)
}
// HEAD is convenience method for handling HEAD requests on a context group.
func (cg *ContextGroup) HEAD(path string, handler http.HandlerFunc) {
cg.Handle("HEAD", path, handler)
}
// OPTIONS is convenience method for handling OPTIONS requests on a context group.
func (cg *ContextGroup) OPTIONS(path string, handler http.HandlerFunc) {
cg.Handle("OPTIONS", path, handler)
}
// ContextParams returns the params map associated with the given context if one exists. Otherwise, an empty map is returned.
func ContextParams(ctx context.Context) map[string]string {
if p, ok := ctx.Value(paramsContextKey).(map[string]string); ok {
return p
}
return map[string]string{}
}
// AddParamsToContext inserts a parameters map into a context using
// the package's internal context key. Clients of this package should
// really only use this for unit tests.
func AddParamsToContext(ctx context.Context, params map[string]string) context.Context {
return context.WithValue(ctx, paramsContextKey, params)
}
type contextKey int
// paramsContextKey is used to retrieve a path's params map from a request's context.
const paramsContextKey contextKey = 0

View File

@ -1,195 +0,0 @@
package httptreemux
import (
"fmt"
"net/url"
"strings"
)
type Group struct {
path string
mux *TreeMux
}
// Add a sub-group to this group
func (g *Group) NewGroup(path string) *Group {
if len(path) < 1 {
panic("Group path must not be empty")
}
checkPath(path)
path = g.path + path
//Don't want trailing slash as all sub-paths start with slash
if path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
return &Group{path, g.mux}
}
// Path elements starting with : indicate a wildcard in the path. A wildcard will only match on a
// single path segment. That is, the pattern `/post/:postid` will match on `/post/1` or `/post/1/`,
// but not `/post/1/2`.
//
// A path element starting with * is a catch-all, whose value will be a string containing all text
// in the URL matched by the wildcards. For example, with a pattern of `/images/*path` and a
// requested URL `images/abc/def`, path would contain `abc/def`.
//
// # Routing Rule Priority
//
// The priority rules in the router are simple.
//
// 1. Static path segments take the highest priority. If a segment and its subtree are able to match the URL, that match is returned.
//
// 2. Wildcards take second priority. For a particular wildcard to match, that wildcard and its subtree must match the URL.
//
// 3. Finally, a catch-all rule will match when the earlier path segments have matched, and none of the static or wildcard conditions have matched. Catch-all rules must be at the end of a pattern.
//
// So with the following patterns, we'll see certain matches:
// router = httptreemux.New()
// router.GET("/:page", pageHandler)
// router.GET("/:year/:month/:post", postHandler)
// router.GET("/:year/:month", archiveHandler)
// router.GET("/images/*path", staticHandler)
// router.GET("/favicon.ico", staticHandler)
//
// /abc will match /:page
// /2014/05 will match /:year/:month
// /2014/05/really-great-blog-post will match /:year/:month/:post
// /images/CoolImage.gif will match /images/*path
// /images/2014/05/MayImage.jpg will also match /images/*path, with all the text after /images stored in the variable path.
// /favicon.ico will match /favicon.ico
//
// # Trailing Slashes
//
// The router has special handling for paths with trailing slashes. If a pattern is added to the
// router with a trailing slash, any matches on that pattern without a trailing slash will be
// redirected to the version with the slash. If a pattern does not have a trailing slash, matches on
// that pattern with a trailing slash will be redirected to the version without.
//
// The trailing slash flag is only stored once for a pattern. That is, if a pattern is added for a
// method with a trailing slash, all other methods for that pattern will also be considered to have a
// trailing slash, regardless of whether or not it is specified for those methods too.
//
// This behavior can be turned off by setting TreeMux.RedirectTrailingSlash to false. By
// default it is set to true. The specifics of the redirect depend on RedirectBehavior.
//
// One exception to this rule is catch-all patterns. By default, trailing slash redirection is
// disabled on catch-all patterns, since the structure of the entire URL and the desired patterns
// can not be predicted. If trailing slash removal is desired on catch-all patterns, set
// TreeMux.RemoveCatchAllTrailingSlash to true.
//
// router = httptreemux.New()
// router.GET("/about", pageHandler)
// router.GET("/posts/", postIndexHandler)
// router.POST("/posts", postFormHandler)
//
// GET /about will match normally.
// GET /about/ will redirect to /about.
// GET /posts will redirect to /posts/.
// GET /posts/ will match normally.
// POST /posts will redirect to /posts/, because the GET method used a trailing slash.
func (g *Group) Handle(method string, path string, handler HandlerFunc) {
g.mux.mutex.Lock()
defer g.mux.mutex.Unlock()
addSlash := false
addOne := func(thePath string) {
node := g.mux.root.addPath(thePath[1:], nil, false)
if addSlash {
node.addSlash = true
}
node.setHandler(method, handler, false)
if g.mux.HeadCanUseGet && method == "GET" && node.leafHandler["HEAD"] == nil {
node.setHandler("HEAD", handler, true)
}
}
checkPath(path)
path = g.path + path
if len(path) == 0 {
panic("Cannot map an empty path")
}
if len(path) > 1 && path[len(path)-1] == '/' && g.mux.RedirectTrailingSlash {
addSlash = true
path = path[:len(path)-1]
}
if g.mux.EscapeAddedRoutes {
u, err := url.ParseRequestURI(path)
if err != nil {
panic("URL parsing error " + err.Error() + " on url " + path)
}
escapedPath := unescapeSpecial(u.String())
if escapedPath != path {
addOne(escapedPath)
}
}
addOne(path)
}
// Syntactic sugar for Handle("GET", path, handler)
func (g *Group) GET(path string, handler HandlerFunc) {
g.Handle("GET", path, handler)
}
// Syntactic sugar for Handle("POST", path, handler)
func (g *Group) POST(path string, handler HandlerFunc) {
g.Handle("POST", path, handler)
}
// Syntactic sugar for Handle("PUT", path, handler)
func (g *Group) PUT(path string, handler HandlerFunc) {
g.Handle("PUT", path, handler)
}
// Syntactic sugar for Handle("DELETE", path, handler)
func (g *Group) DELETE(path string, handler HandlerFunc) {
g.Handle("DELETE", path, handler)
}
// Syntactic sugar for Handle("PATCH", path, handler)
func (g *Group) PATCH(path string, handler HandlerFunc) {
g.Handle("PATCH", path, handler)
}
// Syntactic sugar for Handle("HEAD", path, handler)
func (g *Group) HEAD(path string, handler HandlerFunc) {
g.Handle("HEAD", path, handler)
}
// Syntactic sugar for Handle("OPTIONS", path, handler)
func (g *Group) OPTIONS(path string, handler HandlerFunc) {
g.Handle("OPTIONS", path, handler)
}
func checkPath(path string) {
// All non-empty paths must start with a slash
if len(path) > 0 && path[0] != '/' {
panic(fmt.Sprintf("Path %s must start with slash", path))
}
}
func unescapeSpecial(s string) string {
// Look for sequences of \*, *, and \: that were escaped, and undo some of that escaping.
// Unescape /* since it references a wildcard token.
s = strings.Replace(s, "/%2A", "/*", -1)
// Unescape /\: since it references a literal colon
s = strings.Replace(s, "/%5C:", "/\\:", -1)
// Replace escaped /\\: with /\:
s = strings.Replace(s, "/%5C%5C:", "/%5C:", -1)
// Replace escaped /\* with /*
s = strings.Replace(s, "/%5C%2A", "/%2A", -1)
// Replace escaped /\\* with /\*
s = strings.Replace(s, "/%5C%5C%2A", "/%5C%2A", -1)
return s
}

View File

@ -1,211 +0,0 @@
package httptreemux
import (
"bufio"
"encoding/json"
"html/template"
"net/http"
"os"
"runtime"
"strings"
)
// SimplePanicHandler just returns error 500.
func SimplePanicHandler(w http.ResponseWriter, r *http.Request, err interface{}) {
w.WriteHeader(http.StatusInternalServerError)
}
// ShowErrorsPanicHandler prints a nice representation of an error to the browser.
// This was taken from github.com/gocraft/web, which adapted it from the Traffic project.
func ShowErrorsPanicHandler(w http.ResponseWriter, r *http.Request, err interface{}) {
const size = 4096
stack := make([]byte, size)
stack = stack[:runtime.Stack(stack, false)]
renderPrettyError(w, r, err, stack)
}
func makeErrorData(r *http.Request, err interface{}, stack []byte, filePath string, line int) map[string]interface{} {
data := map[string]interface{}{
"Stack": string(stack),
"Params": r.URL.Query(),
"Method": r.Method,
"FilePath": filePath,
"Line": line,
"Lines": readErrorFileLines(filePath, line),
}
if e, ok := err.(error); ok {
data["Error"] = e.Error()
} else {
data["Error"] = err
}
return data
}
func renderPrettyError(rw http.ResponseWriter, req *http.Request, err interface{}, stack []byte) {
_, filePath, line, _ := runtime.Caller(5)
data := makeErrorData(req, err, stack, filePath, line)
rw.Header().Set("Content-Type", "text/html")
rw.WriteHeader(http.StatusInternalServerError)
tpl := template.Must(template.New("ErrorPage").Parse(panicPageTpl))
tpl.Execute(rw, data)
}
func ShowErrorsJsonPanicHandler(w http.ResponseWriter, r *http.Request, err interface{}) {
const size = 4096
stack := make([]byte, size)
stack = stack[:runtime.Stack(stack, false)]
_, filePath, line, _ := runtime.Caller(4)
data := makeErrorData(r, err, stack, filePath, line)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(data)
}
func readErrorFileLines(filePath string, errorLine int) map[int]string {
lines := make(map[int]string)
file, err := os.Open(filePath)
if err != nil {
return lines
}
defer file.Close()
reader := bufio.NewReader(file)
currentLine := 0
for {
line, err := reader.ReadString('\n')
if err != nil || currentLine > errorLine+5 {
break
}
currentLine++
if currentLine >= errorLine-5 {
lines[currentLine] = strings.Replace(line, "\n", "", -1)
}
}
return lines
}
const panicPageTpl string = `
<html>
<head>
<title>Panic</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style>
html, body{ padding: 0; margin: 0; }
header { background: #C52F24; color: white; border-bottom: 2px solid #9C0606; }
h1 { padding: 10px 0; margin: 0; }
.container { margin: 0 20px; }
.error { font-size: 18px; background: #FFCCCC; color: #9C0606; padding: 10px 0; }
.file-info .file-name { font-weight: bold; }
.stack { height: 300px; overflow-y: scroll; border: 1px solid #e5e5e5; padding: 10px; }
table.source {
width: 100%;
border-collapse: collapse;
border: 1px solid #e5e5e5;
}
table.source td {
padding: 0;
}
table.source .numbers {
font-size: 14px;
vertical-align: top;
width: 1%;
color: rgba(0,0,0,0.3);
text-align: right;
}
table.source .numbers .number {
display: block;
padding: 0 5px;
border-right: 1px solid #e5e5e5;
}
table.source .numbers .number.line-{{ .Line }} {
border-right: 1px solid #ffcccc;
}
table.source .numbers pre {
white-space: pre-wrap;
}
table.source .code {
font-size: 14px;
vertical-align: top;
}
table.source .code .line {
padding-left: 10px;
display: block;
}
table.source .numbers .number,
table.source .code .line {
padding-top: 1px;
padding-bottom: 1px;
}
table.source .code .line:hover {
background-color: #f6f6f6;
}
table.source .line-{{ .Line }},
table.source line-{{ .Line }},
table.source .code .line.line-{{ .Line }}:hover {
background: #ffcccc;
}
</style>
</head>
<body>
<header>
<div class="container">
<h1>Error</h1>
</div>
</header>
<div class="error">
<p class="container">{{ .Error }}</p>
</div>
<div class="container">
<p class="file-info">
In <span class="file-name">{{ .FilePath }}:{{ .Line }}</span></p>
</p>
<table class="source">
<tr>
<td class="numbers">
<pre>{{ range $lineNumber, $line := .Lines }}<span class="number line-{{ $lineNumber }}">{{ $lineNumber }}</span>{{ end }}</pre>
</td>
<td class="code">
<pre>{{ range $lineNumber, $line := .Lines }}<span class="line line-{{ $lineNumber }}">{{ $line }}<br /></span>{{ end }}</pre>
</td>
</tr>
</table>
<h2>Stack</h2>
<pre class="stack">{{ .Stack }}</pre>
<h2>Request</h2>
<p><strong>Method:</strong> {{ .Method }}</p>
<h3>Parameters:</h3>
<ul>
{{ range $key, $value := .Params }}
<li><strong>{{ $key }}:</strong> {{ $value }}</li>
{{ end }}
</ul>
</div>
</body>
</html>
`

View File

@ -1,127 +0,0 @@
// Copyright 2013 Julien Schmidt. All rights reserved.
// Based on the path package, Copyright 2009 The Go Authors.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package httptreemux
// Clean is the URL version of path.Clean, it returns a canonical URL path
// for p, eliminating . and .. elements.
//
// The following rules are applied iteratively until no further processing can
// be done:
// 1. Replace multiple slashes with a single slash.
// 2. Eliminate each . path name element (the current directory).
// 3. Eliminate each inner .. path name element (the parent directory)
// along with the non-.. element that precedes it.
// 4. Eliminate .. elements that begin a rooted path:
// that is, replace "/.." by "/" at the beginning of a path.
//
// If the result of this process is an empty string, "/" is returned
func Clean(p string) string {
if p == "" {
return "/"
}
n := len(p)
var buf []byte
// Invariants:
// reading from path; r is index of next byte to process.
// writing to buf; w is index of next byte to write.
// path must start with '/'
r := 1
w := 1
if p[0] != '/' {
r = 0
buf = make([]byte, n+1)
buf[0] = '/'
}
trailing := n > 2 && p[n-1] == '/'
// A bit more clunky without a 'lazybuf' like the path package, but the loop
// gets completely inlined (bufApp). So in contrast to the path package this
// loop has no expensive function calls (except 1x make)
for r < n {
switch {
case p[r] == '/':
// empty path element, trailing slash is added after the end
r++
case p[r] == '.' && r+1 == n:
trailing = true
r++
case p[r] == '.' && p[r+1] == '/':
// . element
r++
case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
// .. element: remove to last /
r += 2
if w > 1 {
// can backtrack
w--
if buf == nil {
for w > 1 && p[w] != '/' {
w--
}
} else {
for w > 1 && buf[w] != '/' {
w--
}
}
}
default:
// real path element.
// add slash if needed
if w > 1 {
bufApp(&buf, p, w, '/')
w++
}
// copy element
for r < n && p[r] != '/' {
bufApp(&buf, p, w, p[r])
w++
r++
}
}
}
// re-append trailing slash
if trailing && w > 1 {
bufApp(&buf, p, w, '/')
w++
}
// Turn empty string into "/"
if w == 0 {
return "/"
}
if buf == nil {
return p[:w]
}
return string(buf[:w])
}
// internal helper to lazily create a buffer if necessary
func bufApp(buf *[]byte, s string, w int, c byte) {
if *buf == nil {
if s[w] == c {
return
}
*buf = make([]byte, len(s))
copy(*buf, s[:w])
}
(*buf)[w] = c
}

View File

@ -1,300 +0,0 @@
// This is inspired by Julien Schmidt's httprouter, in that it uses a patricia tree, but the
// implementation is rather different. Specifically, the routing rules are relaxed so that a
// single path segment may be a wildcard in one route and a static token in another. This gives a
// nice combination of high performance with a lot of convenience in designing the routing patterns.
package httptreemux
import (
"fmt"
"net/http"
"net/url"
)
// The params argument contains the parameters parsed from wildcards and catch-alls in the URL.
type HandlerFunc func(http.ResponseWriter, *http.Request, map[string]string)
type PanicHandler func(http.ResponseWriter, *http.Request, interface{})
// RedirectBehavior sets the behavior when the router redirects the request to the
// canonical version of the requested URL using RedirectTrailingSlash or RedirectClean.
// The default behavior is to return a 301 status, redirecting the browser to the version
// of the URL that matches the given pattern.
//
// On a POST request, most browsers that receive a 301 will submit a GET request to
// the redirected URL, meaning that any data will likely be lost. If you want to handle
// and avoid this behavior, you may use Redirect307, which causes most browsers to
// resubmit the request using the original method and request body.
//
// Since 307 is supposed to be a temporary redirect, the new 308 status code has been
// proposed, which is treated the same, except it indicates correctly that the redirection
// is permanent. The big caveat here is that the RFC is relatively recent, and older
// browsers will not know what to do with it. Therefore its use is not recommended
// unless you really know what you're doing.
//
// Finally, the UseHandler value will simply call the handler function for the pattern.
type RedirectBehavior int
type PathSource int
const (
Redirect301 RedirectBehavior = iota // Return 301 Moved Permanently
Redirect307 // Return 307 HTTP/1.1 Temporary Redirect
Redirect308 // Return a 308 RFC7538 Permanent Redirect
UseHandler // Just call the handler function
RequestURI PathSource = iota // Use r.RequestURI
URLPath // Use r.URL.Path
)
// LookupResult contains information about a route lookup, which is returned from Lookup and
// can be passed to ServeLookupResult if the request should be served.
type LookupResult struct {
// StatusCode informs the caller about the result of the lookup.
// This will generally be `http.StatusNotFound` or `http.StatusMethodNotAllowed` for an
// error case. On a normal success, the statusCode will be `http.StatusOK`. A redirect code
// will also be used in the case
StatusCode int
handler HandlerFunc
params map[string]string
leafHandler map[string]HandlerFunc // Only has a value when StatusCode is MethodNotAllowed.
}
// Dump returns a text representation of the routing tree.
func (t *TreeMux) Dump() string {
return t.root.dumpTree("", "")
}
func (t *TreeMux) serveHTTPPanic(w http.ResponseWriter, r *http.Request) {
if err := recover(); err != nil {
t.PanicHandler(w, r, err)
}
}
func (t *TreeMux) redirectStatusCode(method string) (int, bool) {
var behavior RedirectBehavior
var ok bool
if behavior, ok = t.RedirectMethodBehavior[method]; !ok {
behavior = t.RedirectBehavior
}
switch behavior {
case Redirect301:
return http.StatusMovedPermanently, true
case Redirect307:
return http.StatusTemporaryRedirect, true
case Redirect308:
// Go doesn't have a constant for this yet. Yet another sign
// that you probably shouldn't use it.
return 308, true
case UseHandler:
return 0, false
default:
return http.StatusMovedPermanently, true
}
}
func redirectHandler(newPath string, statusCode int) HandlerFunc {
return func(w http.ResponseWriter, r *http.Request, params map[string]string) {
redirect(w, r, newPath, statusCode)
}
}
func redirect(w http.ResponseWriter, r *http.Request, newPath string, statusCode int) {
newURL := url.URL{
Path: newPath,
RawQuery: r.URL.RawQuery,
Fragment: r.URL.Fragment,
}
http.Redirect(w, r, newURL.String(), statusCode)
}
func (t *TreeMux) lookup(w http.ResponseWriter, r *http.Request) (result LookupResult, found bool) {
result.StatusCode = http.StatusNotFound
path := r.RequestURI
unescapedPath := r.URL.Path
pathLen := len(path)
if pathLen > 0 && t.PathSource == RequestURI {
rawQueryLen := len(r.URL.RawQuery)
if rawQueryLen != 0 || path[pathLen-1] == '?' {
// Remove any query string and the ?.
path = path[:pathLen-rawQueryLen-1]
pathLen = len(path)
}
} else {
// In testing with http.NewRequest,
// RequestURI is not set so just grab URL.Path instead.
path = r.URL.Path
pathLen = len(path)
}
trailingSlash := path[pathLen-1] == '/' && pathLen > 1
if trailingSlash && t.RedirectTrailingSlash {
path = path[:pathLen-1]
unescapedPath = unescapedPath[:len(unescapedPath)-1]
}
n, handler, params := t.root.search(r.Method, path[1:])
if n == nil {
if t.RedirectCleanPath {
// Path was not found. Try cleaning it up and search again.
// TODO Test this
cleanPath := Clean(unescapedPath)
n, handler, params = t.root.search(r.Method, cleanPath[1:])
if n == nil {
// Still nothing found.
return
}
if statusCode, ok := t.redirectStatusCode(r.Method); ok {
// Redirect to the actual path
return LookupResult{statusCode, redirectHandler(cleanPath, statusCode), nil, nil}, true
}
} else {
// Not found.
return
}
}
if handler == nil {
if r.Method == "OPTIONS" && t.OptionsHandler != nil {
handler = t.OptionsHandler
}
if handler == nil {
result.leafHandler = n.leafHandler
result.StatusCode = http.StatusMethodNotAllowed
return
}
}
if !n.isCatchAll || t.RemoveCatchAllTrailingSlash {
if trailingSlash != n.addSlash && t.RedirectTrailingSlash {
if statusCode, ok := t.redirectStatusCode(r.Method); ok {
var h HandlerFunc
if n.addSlash {
// Need to add a slash.
h = redirectHandler(unescapedPath+"/", statusCode)
} else if path != "/" {
// We need to remove the slash. This was already done at the
// beginning of the function.
h = redirectHandler(unescapedPath, statusCode)
}
if h != nil {
return LookupResult{statusCode, h, nil, nil}, true
}
}
}
}
var paramMap map[string]string
if len(params) != 0 {
if len(params) != len(n.leafWildcardNames) {
// Need better behavior here. Should this be a panic?
panic(fmt.Sprintf("httptreemux parameter list length mismatch: %v, %v",
params, n.leafWildcardNames))
}
paramMap = make(map[string]string)
numParams := len(params)
for index := 0; index < numParams; index++ {
paramMap[n.leafWildcardNames[numParams-index-1]] = params[index]
}
}
return LookupResult{http.StatusOK, handler, paramMap, nil}, true
}
// Lookup performs a lookup without actually serving the request or mutating the request or response.
// The return values are a LookupResult and a boolean. The boolean will be true when a handler
// was found or the lookup resulted in a redirect which will point to a real handler. It is false
// for requests which would result in a `StatusNotFound` or `StatusMethodNotAllowed`.
//
// Regardless of the returned boolean's value, the LookupResult may be passed to ServeLookupResult
// to be served appropriately.
func (t *TreeMux) Lookup(w http.ResponseWriter, r *http.Request) (LookupResult, bool) {
if t.SafeAddRoutesWhileRunning {
// In concurrency safe mode, we acquire a read lock on the mutex for any access.
// This is optional to avoid potential performance loss in high-usage scenarios.
t.mutex.RLock()
}
result, found := t.lookup(w, r)
if t.SafeAddRoutesWhileRunning {
t.mutex.RUnlock()
}
return result, found
}
// ServeLookupResult serves a request, given a lookup result from the Lookup function.
func (t *TreeMux) ServeLookupResult(w http.ResponseWriter, r *http.Request, lr LookupResult) {
if lr.handler == nil {
if lr.StatusCode == http.StatusMethodNotAllowed && lr.leafHandler != nil {
if t.SafeAddRoutesWhileRunning {
t.mutex.RLock()
}
t.MethodNotAllowedHandler(w, r, lr.leafHandler)
if t.SafeAddRoutesWhileRunning {
t.mutex.RUnlock()
}
} else {
t.NotFoundHandler(w, r)
}
} else {
r = t.setDefaultRequestContext(r)
lr.handler(w, r, lr.params)
}
}
func (t *TreeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if t.PanicHandler != nil {
defer t.serveHTTPPanic(w, r)
}
if t.SafeAddRoutesWhileRunning {
// In concurrency safe mode, we acquire a read lock on the mutex for any access.
// This is optional to avoid potential performance loss in high-usage scenarios.
t.mutex.RLock()
}
result, _ := t.lookup(w, r)
if t.SafeAddRoutesWhileRunning {
t.mutex.RUnlock()
}
t.ServeLookupResult(w, r, result)
}
// MethodNotAllowedHandler is the default handler for TreeMux.MethodNotAllowedHandler,
// which is called for patterns that match, but do not have a handler installed for the
// requested method. It simply writes the status code http.StatusMethodNotAllowed and fills
// in the `Allow` header value appropriately.
func MethodNotAllowedHandler(w http.ResponseWriter, r *http.Request,
methods map[string]HandlerFunc) {
for m := range methods {
w.Header().Add("Allow", m)
}
w.WriteHeader(http.StatusMethodNotAllowed)
}
func New() *TreeMux {
tm := &TreeMux{
root: &node{path: "/"},
NotFoundHandler: http.NotFound,
MethodNotAllowedHandler: MethodNotAllowedHandler,
HeadCanUseGet: true,
RedirectTrailingSlash: true,
RedirectCleanPath: true,
RedirectBehavior: Redirect301,
RedirectMethodBehavior: make(map[string]RedirectBehavior),
PathSource: RequestURI,
EscapeAddedRoutes: false,
}
tm.Group.mux = tm
return tm
}

View File

@ -1,340 +0,0 @@
package httptreemux
import (
"fmt"
"strings"
)
type node struct {
path string
priority int
// The list of static children to check.
staticIndices []byte
staticChild []*node
// If none of the above match, check the wildcard children
wildcardChild *node
// If none of the above match, then we use the catch-all, if applicable.
catchAllChild *node
// Data for the node is below.
addSlash bool
isCatchAll bool
// If true, the head handler was set implicitly, so let it also be set explicitly.
implicitHead bool
// If this node is the end of the URL, then call the handler, if applicable.
leafHandler map[string]HandlerFunc
// The names of the parameters to apply.
leafWildcardNames []string
}
func (n *node) sortStaticChild(i int) {
for i > 0 && n.staticChild[i].priority > n.staticChild[i-1].priority {
n.staticChild[i], n.staticChild[i-1] = n.staticChild[i-1], n.staticChild[i]
n.staticIndices[i], n.staticIndices[i-1] = n.staticIndices[i-1], n.staticIndices[i]
i -= 1
}
}
func (n *node) setHandler(verb string, handler HandlerFunc, implicitHead bool) {
if n.leafHandler == nil {
n.leafHandler = make(map[string]HandlerFunc)
}
_, ok := n.leafHandler[verb]
if ok && (verb != "HEAD" || !n.implicitHead) {
panic(fmt.Sprintf("%s already handles %s", n.path, verb))
}
n.leafHandler[verb] = handler
if verb == "HEAD" {
n.implicitHead = implicitHead
}
}
func (n *node) addPath(path string, wildcards []string, inStaticToken bool) *node {
leaf := len(path) == 0
if leaf {
if wildcards != nil {
// Make sure the current wildcards are the same as the old ones.
// If not then we have an ambiguous path.
if n.leafWildcardNames != nil {
if len(n.leafWildcardNames) != len(wildcards) {
// This should never happen.
panic("Reached leaf node with differing wildcard array length. Please report this as a bug.")
}
for i := 0; i < len(wildcards); i++ {
if n.leafWildcardNames[i] != wildcards[i] {
panic(fmt.Sprintf("Wildcards %v are ambiguous with wildcards %v",
n.leafWildcardNames, wildcards))
}
}
} else {
// No wildcards yet, so just add the existing set.
n.leafWildcardNames = wildcards
}
}
return n
}
c := path[0]
nextSlash := strings.Index(path, "/")
var thisToken string
var tokenEnd int
if c == '/' {
// Done processing the previous token, so reset inStaticToken to false.
thisToken = "/"
tokenEnd = 1
} else if nextSlash == -1 {
thisToken = path
tokenEnd = len(path)
} else {
thisToken = path[0:nextSlash]
tokenEnd = nextSlash
}
remainingPath := path[tokenEnd:]
if c == '*' && !inStaticToken {
// Token starts with a *, so it's a catch-all
thisToken = thisToken[1:]
if n.catchAllChild == nil {
n.catchAllChild = &node{path: thisToken, isCatchAll: true}
}
if path[1:] != n.catchAllChild.path {
panic(fmt.Sprintf("Catch-all name in %s doesn't match %s. You probably tried to define overlapping catchalls",
path, n.catchAllChild.path))
}
if nextSlash != -1 {
panic("/ after catch-all found in " + path)
}
if wildcards == nil {
wildcards = []string{thisToken}
} else {
wildcards = append(wildcards, thisToken)
}
n.catchAllChild.leafWildcardNames = wildcards
return n.catchAllChild
} else if c == ':' && !inStaticToken {
// Token starts with a :
thisToken = thisToken[1:]
if wildcards == nil {
wildcards = []string{thisToken}
} else {
wildcards = append(wildcards, thisToken)
}
if n.wildcardChild == nil {
n.wildcardChild = &node{path: "wildcard"}
}
return n.wildcardChild.addPath(remainingPath, wildcards, false)
} else {
// if strings.ContainsAny(thisToken, ":*") {
// panic("* or : in middle of path component " + path)
// }
unescaped := false
if len(thisToken) >= 2 && !inStaticToken {
if thisToken[0] == '\\' && (thisToken[1] == '*' || thisToken[1] == ':' || thisToken[1] == '\\') {
// The token starts with a character escaped by a backslash. Drop the backslash.
c = thisToken[1]
thisToken = thisToken[1:]
unescaped = true
}
}
// Set inStaticToken to ensure that the rest of this token is not mistaken
// for a wildcard if a prefix split occurs at a '*' or ':'.
inStaticToken = (c != '/')
// Do we have an existing node that starts with the same letter?
for i, index := range n.staticIndices {
if c == index {
// Yes. Split it based on the common prefix of the existing
// node and the new one.
child, prefixSplit := n.splitCommonPrefix(i, thisToken)
child.priority++
n.sortStaticChild(i)
if unescaped {
// Account for the removed backslash.
prefixSplit++
}
return child.addPath(path[prefixSplit:], wildcards, inStaticToken)
}
}
// No existing node starting with this letter, so create it.
child := &node{path: thisToken}
if n.staticIndices == nil {
n.staticIndices = []byte{c}
n.staticChild = []*node{child}
} else {
n.staticIndices = append(n.staticIndices, c)
n.staticChild = append(n.staticChild, child)
}
return child.addPath(remainingPath, wildcards, inStaticToken)
}
}
func (n *node) splitCommonPrefix(existingNodeIndex int, path string) (*node, int) {
childNode := n.staticChild[existingNodeIndex]
if strings.HasPrefix(path, childNode.path) {
// No split needs to be done. Rather, the new path shares the entire
// prefix with the existing node, so the new node is just a child of
// the existing one. Or the new path is the same as the existing path,
// which means that we just move on to the next token. Either way,
// this return accomplishes that
return childNode, len(childNode.path)
}
var i int
// Find the length of the common prefix of the child node and the new path.
for i = range childNode.path {
if i == len(path) {
break
}
if path[i] != childNode.path[i] {
break
}
}
commonPrefix := path[0:i]
childNode.path = childNode.path[i:]
// Create a new intermediary node in the place of the existing node, with
// the existing node as a child.
newNode := &node{
path: commonPrefix,
priority: childNode.priority,
// Index is the first letter of the non-common part of the path.
staticIndices: []byte{childNode.path[0]},
staticChild: []*node{childNode},
}
n.staticChild[existingNodeIndex] = newNode
return newNode, i
}
func (n *node) search(method, path string) (found *node, handler HandlerFunc, params []string) {
// if test != nil {
// test.Logf("Searching for %s in %s", path, n.dumpTree("", ""))
// }
pathLen := len(path)
if pathLen == 0 {
if len(n.leafHandler) == 0 {
return nil, nil, nil
} else {
return n, n.leafHandler[method], nil
}
}
// First see if this matches a static token.
firstChar := path[0]
for i, staticIndex := range n.staticIndices {
if staticIndex == firstChar {
child := n.staticChild[i]
childPathLen := len(child.path)
if pathLen >= childPathLen && child.path == path[:childPathLen] {
nextPath := path[childPathLen:]
found, handler, params = child.search(method, nextPath)
}
break
}
}
// If we found a node and it had a valid handler, then return here. Otherwise
// let's remember that we found this one, but look for a better match.
if handler != nil {
return
}
if n.wildcardChild != nil {
// Didn't find a static token, so check for a wildcard.
nextSlash := strings.IndexByte(path, '/')
if nextSlash < 0 {
nextSlash = pathLen
}
thisToken := path[0:nextSlash]
nextToken := path[nextSlash:]
if len(thisToken) > 0 { // Don't match on empty tokens.
wcNode, wcHandler, wcParams := n.wildcardChild.search(method, nextToken)
if wcHandler != nil || (found == nil && wcNode != nil) {
unescaped, err := unescape(thisToken)
if err != nil {
unescaped = thisToken
}
if wcParams == nil {
wcParams = []string{unescaped}
} else {
wcParams = append(wcParams, unescaped)
}
if wcHandler != nil {
return wcNode, wcHandler, wcParams
}
// Didn't actually find a handler here, so remember that we
// found a node but also see if we can fall through to the
// catchall.
found = wcNode
handler = wcHandler
params = wcParams
}
}
}
catchAllChild := n.catchAllChild
if catchAllChild != nil {
// Hit the catchall, so just assign the whole remaining path if it
// has a matching handler.
handler = catchAllChild.leafHandler[method]
// Found a handler, or we found a catchall node without a handler.
// Either way, return it since there's nothing left to check after this.
if handler != nil || found == nil {
unescaped, err := unescape(path)
if err != nil {
unescaped = path
}
return catchAllChild, handler, []string{unescaped}
}
}
return found, handler, params
}
func (n *node) dumpTree(prefix, nodeType string) string {
line := fmt.Sprintf("%s %02d %s%s [%d] %v wildcards %v\n", prefix, n.priority, nodeType, n.path,
len(n.staticChild), n.leafHandler, n.leafWildcardNames)
prefix += " "
for _, node := range n.staticChild {
line += node.dumpTree(prefix, "")
}
if n.wildcardChild != nil {
line += n.wildcardChild.dumpTree(prefix, ":")
}
if n.catchAllChild != nil {
line += n.catchAllChild.dumpTree(prefix, "*")
}
return line
}

View File

@ -1,86 +0,0 @@
// +build !go1.7
package httptreemux
import (
"net/http"
"sync"
)
type TreeMux struct {
root *node
mutex sync.RWMutex
Group
// The default PanicHandler just returns a 500 code.
PanicHandler PanicHandler
// The default NotFoundHandler is http.NotFound.
NotFoundHandler func(w http.ResponseWriter, r *http.Request)
// Any OPTIONS request that matches a path without its own OPTIONS handler will use this handler,
// if set, instead of calling MethodNotAllowedHandler.
OptionsHandler HandlerFunc
// MethodNotAllowedHandler is called when a pattern matches, but that
// pattern does not have a handler for the requested method. The default
// handler just writes the status code http.StatusMethodNotAllowed and adds
// the required Allowed header.
// The methods parameter contains the map of each method to the corresponding
// handler function.
MethodNotAllowedHandler func(w http.ResponseWriter, r *http.Request,
methods map[string]HandlerFunc)
// HeadCanUseGet allows the router to use the GET handler to respond to
// HEAD requests if no explicit HEAD handler has been added for the
// matching pattern. This is true by default.
HeadCanUseGet bool
// RedirectCleanPath allows the router to try clean the current request path,
// if no handler is registered for it, using CleanPath from github.com/dimfeld/httppath.
// This is true by default.
RedirectCleanPath bool
// RedirectTrailingSlash enables automatic redirection in case router doesn't find a matching route
// for the current request path but a handler for the path with or without the trailing
// slash exists. This is true by default.
RedirectTrailingSlash bool
// RemoveCatchAllTrailingSlash removes the trailing slash when a catch-all pattern
// is matched, if set to true. By default, catch-all paths are never redirected.
RemoveCatchAllTrailingSlash bool
// RedirectBehavior sets the default redirect behavior when RedirectTrailingSlash or
// RedirectCleanPath are true. The default value is Redirect301.
RedirectBehavior RedirectBehavior
// RedirectMethodBehavior overrides the default behavior for a particular HTTP method.
// The key is the method name, and the value is the behavior to use for that method.
RedirectMethodBehavior map[string]RedirectBehavior
// PathSource determines from where the router gets its path to search.
// By default it pulls the data from the RequestURI member, but this can
// be overridden to use URL.Path instead.
//
// There is a small tradeoff here. Using RequestURI allows the router to handle
// encoded slashes (i.e. %2f) in the URL properly, while URL.Path provides
// better compatibility with some utility functions in the http
// library that modify the Request before passing it to the router.
PathSource PathSource
// EscapeAddedRoutes controls URI escaping behavior when adding a route to the tree.
// If set to true, the router will add both the route as originally passed, and
// a version passed through URL.EscapedPath. This behavior is disabled by default.
EscapeAddedRoutes bool
// SafeAddRoutesWhileRunning tells the router to protect all accesses to the tree with an RWMutex. This is only needed
// if you are going to add routes after the router has already begun serving requests. There is a potential
// performance penalty at high load.
SafeAddRoutesWhileRunning bool
}
func (t *TreeMux) setDefaultRequestContext(r *http.Request) *http.Request {
// Nothing to do on Go 1.6 and before
return r
}

View File

@ -1,149 +0,0 @@
// +build go1.7
package httptreemux
import (
"context"
"net/http"
"sync"
)
type TreeMux struct {
root *node
mutex sync.RWMutex
Group
// The default PanicHandler just returns a 500 code.
PanicHandler PanicHandler
// The default NotFoundHandler is http.NotFound.
NotFoundHandler func(w http.ResponseWriter, r *http.Request)
// Any OPTIONS request that matches a path without its own OPTIONS handler will use this handler,
// if set, instead of calling MethodNotAllowedHandler.
OptionsHandler HandlerFunc
// MethodNotAllowedHandler is called when a pattern matches, but that
// pattern does not have a handler for the requested method. The default
// handler just writes the status code http.StatusMethodNotAllowed and adds
// the required Allowed header.
// The methods parameter contains the map of each method to the corresponding
// handler function.
MethodNotAllowedHandler func(w http.ResponseWriter, r *http.Request,
methods map[string]HandlerFunc)
// HeadCanUseGet allows the router to use the GET handler to respond to
// HEAD requests if no explicit HEAD handler has been added for the
// matching pattern. This is true by default.
HeadCanUseGet bool
// RedirectCleanPath allows the router to try clean the current request path,
// if no handler is registered for it, using CleanPath from github.com/dimfeld/httppath.
// This is true by default.
RedirectCleanPath bool
// RedirectTrailingSlash enables automatic redirection in case router doesn't find a matching route
// for the current request path but a handler for the path with or without the trailing
// slash exists. This is true by default.
RedirectTrailingSlash bool
// RemoveCatchAllTrailingSlash removes the trailing slash when a catch-all pattern
// is matched, if set to true. By default, catch-all paths are never redirected.
RemoveCatchAllTrailingSlash bool
// RedirectBehavior sets the default redirect behavior when RedirectTrailingSlash or
// RedirectCleanPath are true. The default value is Redirect301.
RedirectBehavior RedirectBehavior
// RedirectMethodBehavior overrides the default behavior for a particular HTTP method.
// The key is the method name, and the value is the behavior to use for that method.
RedirectMethodBehavior map[string]RedirectBehavior
// PathSource determines from where the router gets its path to search.
// By default it pulls the data from the RequestURI member, but this can
// be overridden to use URL.Path instead.
//
// There is a small tradeoff here. Using RequestURI allows the router to handle
// encoded slashes (i.e. %2f) in the URL properly, while URL.Path provides
// better compatibility with some utility functions in the http
// library that modify the Request before passing it to the router.
PathSource PathSource
// EscapeAddedRoutes controls URI escaping behavior when adding a route to the tree.
// If set to true, the router will add both the route as originally passed, and
// a version passed through URL.EscapedPath. This behavior is disabled by default.
EscapeAddedRoutes bool
// If present, override the default context with this one.
DefaultContext context.Context
// SafeAddRoutesWhileRunning tells the router to protect all accesses to the tree with an RWMutex. This is only needed
// if you are going to add routes after the router has already begun serving requests. There is a potential
// performance penalty at high load.
SafeAddRoutesWhileRunning bool
}
func (t *TreeMux) setDefaultRequestContext(r *http.Request) *http.Request {
if t.DefaultContext != nil {
r = r.WithContext(t.DefaultContext)
}
return r
}
type ContextMux struct {
*TreeMux
*ContextGroup
}
// NewContextMux returns a TreeMux preconfigured to work with standard http
// Handler functions and context objects.
func NewContextMux() *ContextMux {
mux := New()
cg := mux.UsingContext()
return &ContextMux{
TreeMux: mux,
ContextGroup: cg,
}
}
func (cm *ContextMux) NewGroup(path string) *ContextGroup {
return cm.ContextGroup.NewGroup(path)
}
// GET is convenience method for handling GET requests on a context group.
func (cm *ContextMux) GET(path string, handler http.HandlerFunc) {
cm.ContextGroup.Handle("GET", path, handler)
}
// POST is convenience method for handling POST requests on a context group.
func (cm *ContextMux) POST(path string, handler http.HandlerFunc) {
cm.ContextGroup.Handle("POST", path, handler)
}
// PUT is convenience method for handling PUT requests on a context group.
func (cm *ContextMux) PUT(path string, handler http.HandlerFunc) {
cm.ContextGroup.Handle("PUT", path, handler)
}
// DELETE is convenience method for handling DELETE requests on a context group.
func (cm *ContextMux) DELETE(path string, handler http.HandlerFunc) {
cm.ContextGroup.Handle("DELETE", path, handler)
}
// PATCH is convenience method for handling PATCH requests on a context group.
func (cm *ContextMux) PATCH(path string, handler http.HandlerFunc) {
cm.ContextGroup.Handle("PATCH", path, handler)
}
// HEAD is convenience method for handling HEAD requests on a context group.
func (cm *ContextMux) HEAD(path string, handler http.HandlerFunc) {
cm.ContextGroup.Handle("HEAD", path, handler)
}
// OPTIONS is convenience method for handling OPTIONS requests on a context group.
func (cm *ContextMux) OPTIONS(path string, handler http.HandlerFunc) {
cm.ContextGroup.Handle("OPTIONS", path, handler)
}

View File

@ -1,9 +0,0 @@
// +build !go1.8
package httptreemux
import "net/url"
func unescape(path string) (string, error) {
return url.QueryUnescape(path)
}

View File

@ -1,9 +0,0 @@
// +build go1.8
package httptreemux
import "net/url"
func unescape(path string) (string, error) {
return url.PathUnescape(path)
}

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Go Playground
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,172 +0,0 @@
## locales
<img align="right" src="https://raw.githubusercontent.com/go-playground/locales/master/logo.png">![Project status](https://img.shields.io/badge/version-0.12.1-green.svg)
[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/locales/branches/master/badge.svg)](https://semaphoreci.com/joeybloggs/locales)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales)
[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales)
![License](https://img.shields.io/dub/l/vibe-d.svg)
[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).
Features
--------
- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1
- [x] Contains Cardinal, Ordinal and Range Plural Rules
- [x] Contains Month, Weekday and Timezone translations built in
- [x] Contains Date & Time formatting functions
- [x] Contains Number, Currency, Accounting and Percent formatting functions
- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
Full Tests
--------------------
I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment;
any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details.
Installation
-----------
Use go get
```shell
go get github.com/go-playground/locales
```
NOTES
--------
You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body
of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string.
Usage
-------
```go
package main
import (
"fmt"
"time"
"github.com/go-playground/locales/currency"
"github.com/go-playground/locales/en_CA"
)
func main() {
loc, _ := time.LoadLocation("America/Toronto")
datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc)
l := en_CA.New()
// Dates
fmt.Println(l.FmtDateFull(datetime))
fmt.Println(l.FmtDateLong(datetime))
fmt.Println(l.FmtDateMedium(datetime))
fmt.Println(l.FmtDateShort(datetime))
// Times
fmt.Println(l.FmtTimeFull(datetime))
fmt.Println(l.FmtTimeLong(datetime))
fmt.Println(l.FmtTimeMedium(datetime))
fmt.Println(l.FmtTimeShort(datetime))
// Months Wide
fmt.Println(l.MonthWide(time.January))
fmt.Println(l.MonthWide(time.February))
fmt.Println(l.MonthWide(time.March))
// ...
// Months Abbreviated
fmt.Println(l.MonthAbbreviated(time.January))
fmt.Println(l.MonthAbbreviated(time.February))
fmt.Println(l.MonthAbbreviated(time.March))
// ...
// Months Narrow
fmt.Println(l.MonthNarrow(time.January))
fmt.Println(l.MonthNarrow(time.February))
fmt.Println(l.MonthNarrow(time.March))
// ...
// Weekdays Wide
fmt.Println(l.WeekdayWide(time.Sunday))
fmt.Println(l.WeekdayWide(time.Monday))
fmt.Println(l.WeekdayWide(time.Tuesday))
// ...
// Weekdays Abbreviated
fmt.Println(l.WeekdayAbbreviated(time.Sunday))
fmt.Println(l.WeekdayAbbreviated(time.Monday))
fmt.Println(l.WeekdayAbbreviated(time.Tuesday))
// ...
// Weekdays Short
fmt.Println(l.WeekdayShort(time.Sunday))
fmt.Println(l.WeekdayShort(time.Monday))
fmt.Println(l.WeekdayShort(time.Tuesday))
// ...
// Weekdays Narrow
fmt.Println(l.WeekdayNarrow(time.Sunday))
fmt.Println(l.WeekdayNarrow(time.Monday))
fmt.Println(l.WeekdayNarrow(time.Tuesday))
// ...
var f64 float64
f64 = -10356.4523
// Number
fmt.Println(l.FmtNumber(f64, 2))
// Currency
fmt.Println(l.FmtCurrency(f64, 2, currency.CAD))
fmt.Println(l.FmtCurrency(f64, 2, currency.USD))
// Accounting
fmt.Println(l.FmtAccounting(f64, 2, currency.CAD))
fmt.Println(l.FmtAccounting(f64, 2, currency.USD))
f64 = 78.12
// Percent
fmt.Println(l.FmtPercent(f64, 0))
// Plural Rules for locale, so you know what rules you must cover
fmt.Println(l.PluralsCardinal())
fmt.Println(l.PluralsOrdinal())
// Cardinal Plural Rules
fmt.Println(l.CardinalPluralRule(1, 0))
fmt.Println(l.CardinalPluralRule(1.0, 0))
fmt.Println(l.CardinalPluralRule(1.0, 1))
fmt.Println(l.CardinalPluralRule(3, 0))
// Ordinal Plural Rules
fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st
fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd
fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd
fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th
// Range Plural Rules
fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1
fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2
fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8
}
```
NOTES:
-------
These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues
I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time
these locales are regenerated the fix will come with.
I do however realize that time constraints are often important and so there are two options:
1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface.
2. Add an exception in the locale generation code directly and once regenerated, fix will be in place.
Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated.
License
------
Distributed under MIT License, please see license file in code for more details.

View File

@ -1,308 +0,0 @@
package currency
// Type is the currency type associated with the locales currency enum
type Type int
// locale currencies
const (
ADP Type = iota
AED
AFA
AFN
ALK
ALL
AMD
ANG
AOA
AOK
AON
AOR
ARA
ARL
ARM
ARP
ARS
ATS
AUD
AWG
AZM
AZN
BAD
BAM
BAN
BBD
BDT
BEC
BEF
BEL
BGL
BGM
BGN
BGO
BHD
BIF
BMD
BND
BOB
BOL
BOP
BOV
BRB
BRC
BRE
BRL
BRN
BRR
BRZ
BSD
BTN
BUK
BWP
BYB
BYN
BYR
BZD
CAD
CDF
CHE
CHF
CHW
CLE
CLF
CLP
CNH
CNX
CNY
COP
COU
CRC
CSD
CSK
CUC
CUP
CVE
CYP
CZK
DDM
DEM
DJF
DKK
DOP
DZD
ECS
ECV
EEK
EGP
ERN
ESA
ESB
ESP
ETB
EUR
FIM
FJD
FKP
FRF
GBP
GEK
GEL
GHC
GHS
GIP
GMD
GNF
GNS
GQE
GRD
GTQ
GWE
GWP
GYD
HKD
HNL
HRD
HRK
HTG
HUF
IDR
IEP
ILP
ILR
ILS
INR
IQD
IRR
ISJ
ISK
ITL
JMD
JOD
JPY
KES
KGS
KHR
KMF
KPW
KRH
KRO
KRW
KWD
KYD
KZT
LAK
LBP
LKR
LRD
LSL
LTL
LTT
LUC
LUF
LUL
LVL
LVR
LYD
MAD
MAF
MCF
MDC
MDL
MGA
MGF
MKD
MKN
MLF
MMK
MNT
MOP
MRO
MTL
MTP
MUR
MVP
MVR
MWK
MXN
MXP
MXV
MYR
MZE
MZM
MZN
NAD
NGN
NIC
NIO
NLG
NOK
NPR
NZD
OMR
PAB
PEI
PEN
PES
PGK
PHP
PKR
PLN
PLZ
PTE
PYG
QAR
RHD
ROL
RON
RSD
RUB
RUR
RWF
SAR
SBD
SCR
SDD
SDG
SDP
SEK
SGD
SHP
SIT
SKK
SLL
SOS
SRD
SRG
SSP
STD
STN
SUR
SVC
SYP
SZL
THB
TJR
TJS
TMM
TMT
TND
TOP
TPE
TRL
TRY
TTD
TWD
TZS
UAH
UAK
UGS
UGX
USD
USN
USS
UYI
UYP
UYU
UZS
VEB
VEF
VND
VNN
VUV
WST
XAF
XAG
XAU
XBA
XBB
XBC
XBD
XCD
XDR
XEU
XFO
XFU
XOF
XPD
XPF
XPT
XRE
XSU
XTS
XUA
XXX
YDD
YER
YUD
YUM
YUN
YUR
ZAL
ZAR
ZMK
ZMW
ZRN
ZRZ
ZWD
ZWL
ZWR
)

View File

@ -1,650 +0,0 @@
package en
import (
"math"
"strconv"
"time"
"github.com/go-playground/locales"
"github.com/go-playground/locales/currency"
)
type en struct {
locale string
pluralsCardinal []locales.PluralRule
pluralsOrdinal []locales.PluralRule
pluralsRange []locales.PluralRule
decimal string
group string
minus string
percent string
perMille string
timeSeparator string
inifinity string
currencies []string // idx = enum of currency code
currencyNegativePrefix string
currencyNegativeSuffix string
monthsAbbreviated []string
monthsNarrow []string
monthsWide []string
daysAbbreviated []string
daysNarrow []string
daysShort []string
daysWide []string
periodsAbbreviated []string
periodsNarrow []string
periodsShort []string
periodsWide []string
erasAbbreviated []string
erasNarrow []string
erasWide []string
timezones map[string]string
}
// New returns a new instance of translator for the 'en' locale
func New() locales.Translator {
return &en{
locale: "en",
pluralsCardinal: []locales.PluralRule{2, 6},
pluralsOrdinal: []locales.PluralRule{2, 3, 4, 6},
pluralsRange: []locales.PluralRule{6},
decimal: ".",
group: ",",
minus: "-",
percent: "%",
perMille: "‰",
timeSeparator: ":",
inifinity: "∞",
currencies: []string{"ADP", "AED", "AFA", "AFN", "ALK", "ALL", "AMD", "ANG", "AOA", "AOK", "AON", "AOR", "ARA", "ARL", "ARM", "ARP", "ARS", "ATS", "AUD", "AWG", "AZM", "AZN", "BAD", "BAM", "BAN", "BBD", "BDT", "BEC", "BEF", "BEL", "BGL", "BGM", "BGN", "BGO", "BHD", "BIF", "BMD", "BND", "BOB", "BOL", "BOP", "BOV", "BRB", "BRC", "BRE", "BRL", "BRN", "BRR", "BRZ", "BSD", "BTN", "BUK", "BWP", "BYB", "BYN", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF", "CHW", "CLE", "CLF", "CLP", "CNH", "CNX", "CNY", "COP", "COU", "CRC", "CSD", "CSK", "CUC", "CUP", "CVE", "CYP", "CZK", "DDM", "DEM", "DJF", "DKK", "DOP", "DZD", "ECS", "ECV", "EEK", "EGP", "ERN", "ESA", "ESB", "ESP", "ETB", "EUR", "FIM", "FJD", "FKP", "FRF", "GBP", "GEK", "GEL", "GHC", "GHS", "GIP", "GMD", "GNF", "GNS", "GQE", "GRD", "GTQ", "GWE", "GWP", "GYD", "HKD", "HNL", "HRD", "HRK", "HTG", "HUF", "IDR", "IEP", "ILP", "ILR", "ILS", "INR", "IQD", "IRR", "ISJ", "ISK", "ITL", "JMD", "JOD", "¥", "KES", "KGS", "KHR", "KMF", "KPW", "KRH", "KRO", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LTL", "LTT", "LUC", "LUF", "LUL", "LVL", "LVR", "LYD", "MAD", "MAF", "MCF", "MDC", "MDL", "MGA", "MGF", "MKD", "MKN", "MLF", "MMK", "MNT", "MOP", "MRO", "MTL", "MTP", "MUR", "MVP", "MVR", "MWK", "MXN", "MXP", "MXV", "MYR", "MZE", "MZM", "MZN", "NAD", "NGN", "NIC", "NIO", "NLG", "NOK", "NPR", "NZD", "OMR", "PAB", "PEI", "PEN", "PES", "PGK", "PHP", "PKR", "PLN", "PLZ", "PTE", "PYG", "QAR", "RHD", "ROL", "RON", "RSD", "RUB", "RUR", "RWF", "SAR", "SBD", "SCR", "SDD", "SDG", "SDP", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SOS", "SRD", "SRG", "SSP", "STD", "STN", "SUR", "SVC", "SYP", "SZL", "THB", "TJR", "TJS", "TMM", "TMT", "TND", "TOP", "TPE", "TRL", "TRY", "TTD", "TWD", "TZS", "UAH", "UAK", "UGS", "UGX", "$", "USN", "USS", "UYI", "UYP", "UYU", "UZS", "VEB", "VEF", "VND", "VNN", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XEU", "XFO", "XFU", "XOF", "XPD", "XPF", "XPT", "XRE", "XSU", "XTS", "XUA", "XXX", "YDD", "YER", "YUD", "YUM", "YUN", "YUR", "ZAL", "ZAR", "ZMK", "ZMW", "ZRN", "ZRZ", "ZWD", "ZWL", "ZWR"},
currencyNegativePrefix: "(",
currencyNegativeSuffix: ")",
monthsAbbreviated: []string{"", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"},
monthsNarrow: []string{"", "J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"},
monthsWide: []string{"", "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"},
daysAbbreviated: []string{"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"},
daysNarrow: []string{"S", "M", "T", "W", "T", "F", "S"},
daysShort: []string{"Su", "Mo", "Tu", "We", "Th", "Fr", "Sa"},
daysWide: []string{"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"},
periodsAbbreviated: []string{"am", "pm"},
periodsNarrow: []string{"a", "p"},
periodsWide: []string{"am", "pm"},
erasAbbreviated: []string{"BC", "AD"},
erasNarrow: []string{"B", "A"},
erasWide: []string{"Before Christ", "Anno Domini"},
timezones: map[string]string{"IST": "India Standard Time", "WARST": "Western Argentina Summer Time", "HNT": "Newfoundland Standard Time", "VET": "Venezuela Time", "HAST": "Hawaii-Aleutian Standard Time", "CDT": "Central Daylight Time", "HEEG": "East Greenland Summer Time", "HKT": "Hong Kong Standard Time", "SGT": "Singapore Standard Time", "EDT": "Eastern Daylight Time", "HEPM": "St. Pierre & Miquelon Daylight Time", "CST": "Central Standard Time", "HNPMX": "Mexican Pacific Standard Time", "WAT": "West Africa Standard Time", "JDT": "Japan Daylight Time", "ACST": "Australian Central Standard Time", "TMST": "Turkmenistan Summer Time", "LHDT": "Lord Howe Daylight Time", "CAT": "Central Africa Time", "UYT": "Uruguay Standard Time", "HEPMX": "Mexican Pacific Daylight Time", "WEZ": "Western European Standard Time", "BOT": "Bolivia Time", "GFT": "French Guiana Time", "HNNOMX": "Northwest Mexico Standard Time", "OEZ": "Eastern European Standard Time", "AEST": "Australian Eastern Standard Time", "MDT": "Mountain Daylight Time", "SAST": "South Africa Standard Time", "BT": "Bhutan Time", "SRT": "Suriname Time", "TMT": "Turkmenistan Standard Time", "CHADT": "Chatham Daylight Time", "PST": "Pacific Standard Time", "ADT": "Atlantic Daylight Time", "HENOMX": "Northwest Mexico Daylight Time", "EAT": "East Africa Time", "CLT": "Chile Standard Time", "∅∅∅": "Brasilia Summer Time", "WESZ": "Western European Summer Time", "HAT": "Newfoundland Daylight Time", "WIB": "Western Indonesia Time", "NZST": "New Zealand Standard Time", "HNEG": "East Greenland Standard Time", "HNPM": "St. Pierre & Miquelon Standard Time", "WITA": "Central Indonesia Time", "GMT": "Greenwich Mean Time", "UYST": "Uruguay Summer Time", "HNCU": "Cuba Standard Time", "GYT": "Guyana Time", "MYT": "Malaysia Time", "COT": "Colombia Standard Time", "AST": "Atlantic Standard Time", "ACDT": "Australian Central Daylight Time", "MEZ": "Central European Standard Time", "AKDT": "Alaska Daylight Time", "EST": "Eastern Standard Time", "HNOG": "West Greenland Standard Time", "ECT": "Ecuador Time", "ART": "Argentina Standard Time", "HEOG": "West Greenland Summer Time", "MESZ": "Central European Summer Time", "LHST": "Lord Howe Standard Time", "OESZ": "Eastern European Summer Time", "AWST": "Australian Western Standard Time", "AEDT": "Australian Eastern Daylight Time", "ACWDT": "Australian Central Western Daylight Time", "NZDT": "New Zealand Daylight Time", "JST": "Japan Standard Time", "WART": "Western Argentina Standard Time", "CHAST": "Chatham Standard Time", "HECU": "Cuba Daylight Time", "WAST": "West Africa Summer Time", "ACWST": "Australian Central Western Standard Time", "HKST": "Hong Kong Summer Time", "ARST": "Argentina Summer Time", "MST": "Mountain Standard Time", "AKST": "Alaska Standard Time", "CLST": "Chile Summer Time", "WIT": "Eastern Indonesia Time", "HADT": "Hawaii-Aleutian Daylight Time", "ChST": "Chamorro Standard Time", "PDT": "Pacific Daylight Time", "AWDT": "Australian Western Daylight Time", "COST": "Colombia Summer Time"},
}
}
// Locale returns the current translators string locale
func (en *en) Locale() string {
return en.locale
}
// PluralsCardinal returns the list of cardinal plural rules associated with 'en'
func (en *en) PluralsCardinal() []locales.PluralRule {
return en.pluralsCardinal
}
// PluralsOrdinal returns the list of ordinal plural rules associated with 'en'
func (en *en) PluralsOrdinal() []locales.PluralRule {
return en.pluralsOrdinal
}
// PluralsRange returns the list of range plural rules associated with 'en'
func (en *en) PluralsRange() []locales.PluralRule {
return en.pluralsRange
}
// CardinalPluralRule returns the cardinal PluralRule given 'num' and digits/precision of 'v' for 'en'
func (en *en) CardinalPluralRule(num float64, v uint64) locales.PluralRule {
n := math.Abs(num)
i := int64(n)
if i == 1 && v == 0 {
return locales.PluralRuleOne
}
return locales.PluralRuleOther
}
// OrdinalPluralRule returns the ordinal PluralRule given 'num' and digits/precision of 'v' for 'en'
func (en *en) OrdinalPluralRule(num float64, v uint64) locales.PluralRule {
n := math.Abs(num)
nMod100 := math.Mod(n, 100)
nMod10 := math.Mod(n, 10)
if nMod10 == 1 && nMod100 != 11 {
return locales.PluralRuleOne
} else if nMod10 == 2 && nMod100 != 12 {
return locales.PluralRuleTwo
} else if nMod10 == 3 && nMod100 != 13 {
return locales.PluralRuleFew
}
return locales.PluralRuleOther
}
// RangePluralRule returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for 'en'
func (en *en) RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) locales.PluralRule {
return locales.PluralRuleOther
}
// MonthAbbreviated returns the locales abbreviated month given the 'month' provided
func (en *en) MonthAbbreviated(month time.Month) string {
return en.monthsAbbreviated[month]
}
// MonthsAbbreviated returns the locales abbreviated months
func (en *en) MonthsAbbreviated() []string {
return en.monthsAbbreviated[1:]
}
// MonthNarrow returns the locales narrow month given the 'month' provided
func (en *en) MonthNarrow(month time.Month) string {
return en.monthsNarrow[month]
}
// MonthsNarrow returns the locales narrow months
func (en *en) MonthsNarrow() []string {
return en.monthsNarrow[1:]
}
// MonthWide returns the locales wide month given the 'month' provided
func (en *en) MonthWide(month time.Month) string {
return en.monthsWide[month]
}
// MonthsWide returns the locales wide months
func (en *en) MonthsWide() []string {
return en.monthsWide[1:]
}
// WeekdayAbbreviated returns the locales abbreviated weekday given the 'weekday' provided
func (en *en) WeekdayAbbreviated(weekday time.Weekday) string {
return en.daysAbbreviated[weekday]
}
// WeekdaysAbbreviated returns the locales abbreviated weekdays
func (en *en) WeekdaysAbbreviated() []string {
return en.daysAbbreviated
}
// WeekdayNarrow returns the locales narrow weekday given the 'weekday' provided
func (en *en) WeekdayNarrow(weekday time.Weekday) string {
return en.daysNarrow[weekday]
}
// WeekdaysNarrow returns the locales narrow weekdays
func (en *en) WeekdaysNarrow() []string {
return en.daysNarrow
}
// WeekdayShort returns the locales short weekday given the 'weekday' provided
func (en *en) WeekdayShort(weekday time.Weekday) string {
return en.daysShort[weekday]
}
// WeekdaysShort returns the locales short weekdays
func (en *en) WeekdaysShort() []string {
return en.daysShort
}
// WeekdayWide returns the locales wide weekday given the 'weekday' provided
func (en *en) WeekdayWide(weekday time.Weekday) string {
return en.daysWide[weekday]
}
// WeekdaysWide returns the locales wide weekdays
func (en *en) WeekdaysWide() []string {
return en.daysWide
}
// Decimal returns the decimal point of number
func (en *en) Decimal() string {
return en.decimal
}
// Group returns the group of number
func (en *en) Group() string {
return en.group
}
// Group returns the minus sign of number
func (en *en) Minus() string {
return en.minus
}
// FmtNumber returns 'num' with digits/precision of 'v' for 'en' and handles both Whole and Real numbers based on 'v'
func (en *en) FmtNumber(num float64, v uint64) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
l := len(s) + 2 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, en.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, en.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, en.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
return string(b)
}
// FmtPercent returns 'num' with digits/precision of 'v' for 'en' and handles both Whole and Real numbers based on 'v'
// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
func (en *en) FmtPercent(num float64, v uint64) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
l := len(s) + 3
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, en.decimal[0])
continue
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, en.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
b = append(b, en.percent...)
return string(b)
}
// FmtCurrency returns the currency representation of 'num' with digits/precision of 'v' for 'en'
func (en *en) FmtCurrency(num float64, v uint64, currency currency.Type) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
symbol := en.currencies[currency]
l := len(s) + len(symbol) + 2 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, en.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, en.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
for j := len(symbol) - 1; j >= 0; j-- {
b = append(b, symbol[j])
}
if num < 0 {
b = append(b, en.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
if int(v) < 2 {
if v == 0 {
b = append(b, en.decimal...)
}
for i := 0; i < 2-int(v); i++ {
b = append(b, '0')
}
}
return string(b)
}
// FmtAccounting returns the currency representation of 'num' with digits/precision of 'v' for 'en'
// in accounting notation.
func (en *en) FmtAccounting(num float64, v uint64, currency currency.Type) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
symbol := en.currencies[currency]
l := len(s) + len(symbol) + 4 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, en.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, en.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
for j := len(symbol) - 1; j >= 0; j-- {
b = append(b, symbol[j])
}
b = append(b, en.currencyNegativePrefix[0])
} else {
for j := len(symbol) - 1; j >= 0; j-- {
b = append(b, symbol[j])
}
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
if int(v) < 2 {
if v == 0 {
b = append(b, en.decimal...)
}
for i := 0; i < 2-int(v); i++ {
b = append(b, '0')
}
}
if num < 0 {
b = append(b, en.currencyNegativeSuffix...)
}
return string(b)
}
// FmtDateShort returns the short date representation of 't' for 'en'
func (en *en) FmtDateShort(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Month()), 10)
b = append(b, []byte{0x2f}...)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x2f}...)
if t.Year() > 9 {
b = append(b, strconv.Itoa(t.Year())[2:]...)
} else {
b = append(b, strconv.Itoa(t.Year())[1:]...)
}
return string(b)
}
// FmtDateMedium returns the medium date representation of 't' for 'en'
func (en *en) FmtDateMedium(t time.Time) string {
b := make([]byte, 0, 32)
b = append(b, en.monthsAbbreviated[t.Month()]...)
b = append(b, []byte{0x20}...)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x2c, 0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateLong returns the long date representation of 't' for 'en'
func (en *en) FmtDateLong(t time.Time) string {
b := make([]byte, 0, 32)
b = append(b, en.monthsWide[t.Month()]...)
b = append(b, []byte{0x20}...)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x2c, 0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateFull returns the full date representation of 't' for 'en'
func (en *en) FmtDateFull(t time.Time) string {
b := make([]byte, 0, 32)
b = append(b, en.daysWide[t.Weekday()]...)
b = append(b, []byte{0x2c, 0x20}...)
b = append(b, en.monthsWide[t.Month()]...)
b = append(b, []byte{0x20}...)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x2c, 0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtTimeShort returns the short time representation of 't' for 'en'
func (en *en) FmtTimeShort(t time.Time) string {
b := make([]byte, 0, 32)
h := t.Hour()
if h > 12 {
h -= 12
}
b = strconv.AppendInt(b, int64(h), 10)
b = append(b, en.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, []byte{0x20}...)
if t.Hour() < 12 {
b = append(b, en.periodsAbbreviated[0]...)
} else {
b = append(b, en.periodsAbbreviated[1]...)
}
return string(b)
}
// FmtTimeMedium returns the medium time representation of 't' for 'en'
func (en *en) FmtTimeMedium(t time.Time) string {
b := make([]byte, 0, 32)
h := t.Hour()
if h > 12 {
h -= 12
}
b = strconv.AppendInt(b, int64(h), 10)
b = append(b, en.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, en.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20}...)
if t.Hour() < 12 {
b = append(b, en.periodsAbbreviated[0]...)
} else {
b = append(b, en.periodsAbbreviated[1]...)
}
return string(b)
}
// FmtTimeLong returns the long time representation of 't' for 'en'
func (en *en) FmtTimeLong(t time.Time) string {
b := make([]byte, 0, 32)
h := t.Hour()
if h > 12 {
h -= 12
}
b = strconv.AppendInt(b, int64(h), 10)
b = append(b, en.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, en.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20}...)
if t.Hour() < 12 {
b = append(b, en.periodsAbbreviated[0]...)
} else {
b = append(b, en.periodsAbbreviated[1]...)
}
b = append(b, []byte{0x20}...)
tz, _ := t.Zone()
b = append(b, tz...)
return string(b)
}
// FmtTimeFull returns the full time representation of 't' for 'en'
func (en *en) FmtTimeFull(t time.Time) string {
b := make([]byte, 0, 32)
h := t.Hour()
if h > 12 {
h -= 12
}
b = strconv.AppendInt(b, int64(h), 10)
b = append(b, en.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, en.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20}...)
if t.Hour() < 12 {
b = append(b, en.periodsAbbreviated[0]...)
} else {
b = append(b, en.periodsAbbreviated[1]...)
}
b = append(b, []byte{0x20}...)
tz, _ := t.Zone()
if btz, ok := en.timezones[tz]; ok {
b = append(b, btz...)
} else {
b = append(b, tz...)
}
return string(b)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

View File

@ -1,293 +0,0 @@
package locales
import (
"strconv"
"time"
"github.com/go-playground/locales/currency"
)
// // ErrBadNumberValue is returned when the number passed for
// // plural rule determination cannot be parsed
// type ErrBadNumberValue struct {
// NumberValue string
// InnerError error
// }
// // Error returns ErrBadNumberValue error string
// func (e *ErrBadNumberValue) Error() string {
// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError)
// }
// var _ error = new(ErrBadNumberValue)
// PluralRule denotes the type of plural rules
type PluralRule int
// PluralRule's
const (
PluralRuleUnknown PluralRule = iota
PluralRuleZero // zero
PluralRuleOne // one - singular
PluralRuleTwo // two - dual
PluralRuleFew // few - paucal
PluralRuleMany // many - also used for fractions if they have a separate class
PluralRuleOther // other - required—general plural form—also used if the language only has a single form
)
const (
pluralsString = "UnknownZeroOneTwoFewManyOther"
)
// Translator encapsulates an instance of a locale
// NOTE: some values are returned as a []byte just in case the caller
// wishes to add more and can help avoid allocations; otherwise just cast as string
type Translator interface {
// The following Functions are for overriding, debugging or developing
// with a Translator Locale
// Locale returns the string value of the translator
Locale() string
// returns an array of cardinal plural rules associated
// with this translator
PluralsCardinal() []PluralRule
// returns an array of ordinal plural rules associated
// with this translator
PluralsOrdinal() []PluralRule
// returns an array of range plural rules associated
// with this translator
PluralsRange() []PluralRule
// returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale
CardinalPluralRule(num float64, v uint64) PluralRule
// returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale
OrdinalPluralRule(num float64, v uint64) PluralRule
// returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale
RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule
// returns the locales abbreviated month given the 'month' provided
MonthAbbreviated(month time.Month) string
// returns the locales abbreviated months
MonthsAbbreviated() []string
// returns the locales narrow month given the 'month' provided
MonthNarrow(month time.Month) string
// returns the locales narrow months
MonthsNarrow() []string
// returns the locales wide month given the 'month' provided
MonthWide(month time.Month) string
// returns the locales wide months
MonthsWide() []string
// returns the locales abbreviated weekday given the 'weekday' provided
WeekdayAbbreviated(weekday time.Weekday) string
// returns the locales abbreviated weekdays
WeekdaysAbbreviated() []string
// returns the locales narrow weekday given the 'weekday' provided
WeekdayNarrow(weekday time.Weekday) string
// WeekdaysNarrowreturns the locales narrow weekdays
WeekdaysNarrow() []string
// returns the locales short weekday given the 'weekday' provided
WeekdayShort(weekday time.Weekday) string
// returns the locales short weekdays
WeekdaysShort() []string
// returns the locales wide weekday given the 'weekday' provided
WeekdayWide(weekday time.Weekday) string
// returns the locales wide weekdays
WeekdaysWide() []string
// The following Functions are common Formatting functionsfor the Translator's Locale
// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
FmtNumber(num float64, v uint64) string
// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
FmtPercent(num float64, v uint64) string
// returns the currency representation of 'num' with digits/precision of 'v' for locale
FmtCurrency(num float64, v uint64, currency currency.Type) string
// returns the currency representation of 'num' with digits/precision of 'v' for locale
// in accounting notation.
FmtAccounting(num float64, v uint64, currency currency.Type) string
// returns the short date representation of 't' for locale
FmtDateShort(t time.Time) string
// returns the medium date representation of 't' for locale
FmtDateMedium(t time.Time) string
// returns the long date representation of 't' for locale
FmtDateLong(t time.Time) string
// returns the full date representation of 't' for locale
FmtDateFull(t time.Time) string
// returns the short time representation of 't' for locale
FmtTimeShort(t time.Time) string
// returns the medium time representation of 't' for locale
FmtTimeMedium(t time.Time) string
// returns the long time representation of 't' for locale
FmtTimeLong(t time.Time) string
// returns the full time representation of 't' for locale
FmtTimeFull(t time.Time) string
}
// String returns the string value of PluralRule
func (p PluralRule) String() string {
switch p {
case PluralRuleZero:
return pluralsString[7:11]
case PluralRuleOne:
return pluralsString[11:14]
case PluralRuleTwo:
return pluralsString[14:17]
case PluralRuleFew:
return pluralsString[17:20]
case PluralRuleMany:
return pluralsString[20:24]
case PluralRuleOther:
return pluralsString[24:]
default:
return pluralsString[:7]
}
}
//
// Precision Notes:
//
// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh
//
// v := float64(3.141)
// i := float64(int64(v))
//
// fmt.Println(v - i)
//
// or
//
// s := strconv.FormatFloat(v-i, 'f', -1, 64)
// fmt.Println(s)
//
// these will not print what you'd expect: 0.14100000000000001
// and so this library requires a precision to be specified, or
// inaccurate plural rules could be applied.
//
//
//
// n - absolute value of the source number (integer and decimals).
// i - integer digits of n.
// v - number of visible fraction digits in n, with trailing zeros.
// w - number of visible fraction digits in n, without trailing zeros.
// f - visible fractional digits in n, with trailing zeros.
// t - visible fractional digits in n, without trailing zeros.
//
//
// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above.
//
// n := math.Abs(num)
// i := int64(n)
// v := v
//
//
// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's....
// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64
// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's....
//
//
//
// General Inclusion Rules
// - v will always be available inherently
// - all require n
// - w requires i
//
// W returns the number of visible fraction digits in N, without trailing zeros.
func W(n float64, v uint64) (w int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then w will be zero
// otherwise need to parse
if len(s) != 1 {
s = s[2:]
end := len(s) + 1
for i := end; i >= 0; i-- {
if s[i] != '0' {
end = i + 1
break
}
}
w = int64(len(s[:end]))
}
return
}
// F returns the visible fractional digits in N, with trailing zeros.
func F(n float64, v uint64) (f int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then f will be zero
// otherwise need to parse
if len(s) != 1 {
// ignoring error, because it can't fail as we generated
// the string internally from a real number
f, _ = strconv.ParseInt(s[2:], 10, 64)
}
return
}
// T returns the visible fractional digits in N, without trailing zeros.
func T(n float64, v uint64) (t int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then t will be zero
// otherwise need to parse
if len(s) != 1 {
s = s[2:]
end := len(s) + 1
for i := end; i >= 0; i-- {
if s[i] != '0' {
end = i + 1
break
}
}
// ignoring error, because it can't fail as we generated
// the string internally from a real number
t, _ = strconv.ParseInt(s[:end], 10, 64)
}
return
}

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Go Playground
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,90 +0,0 @@
## universal-translator
<img align="right" src="https://raw.githubusercontent.com/go-playground/universal-translator/master/logo.png">
![Project status](https://img.shields.io/badge/version-0.16.0-green.svg)
[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/universal-translator/branches/master/badge.svg)](https://semaphoreci.com/joeybloggs/universal-translator)
[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator)
[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator)
![License](https://img.shields.io/dub/l/vibe-d.svg)
[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules
Why another i18n library?
--------------------------
Because none of the plural rules seem to be correct out there, including the previous implementation of this package,
so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package
is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for
use in your applications.
Features
--------
- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3
- [x] Contains Cardinal, Ordinal and Range Plural Rules
- [x] Contains Month, Weekday and Timezone translations built in
- [x] Contains Date & Time formatting functions
- [x] Contains Number, Currency, Accounting and Percent formatting functions
- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
- [x] Support loading translations from files
- [x] Exporting translations to file(s), mainly for getting them professionally translated
- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated
- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1)
Installation
-----------
Use go get
```shell
go get github.com/go-playground/universal-translator
```
Usage & Documentation
-------
Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs
##### Examples:
- [Basic](https://github.com/go-playground/universal-translator/tree/master/examples/basic)
- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/examples/full-no-files)
- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/examples/full-with-files)
File formatting
--------------
All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s);
they are only separated for easy viewing.
##### Examples:
- [Formats](https://github.com/go-playground/universal-translator/tree/master/examples/file-formats)
##### Basic Makeup
NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/examples/file-formats)
```json
{
"locale": "en",
"key": "days-left",
"trans": "You have {0} day left.",
"type": "Cardinal",
"rule": "One",
"override": false
}
```
|Field|Description|
|---|---|
|locale|The locale for which the translation is for.|
|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.|
|trans|The actual translation text.|
|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)|
|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)|
|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.|
Help With Tests
---------------
To anyone interesting in helping or contributing, I sure could use some help creating tests for each language.
Please see issue [here](https://github.com/go-playground/locales/issues/1) for details.
License
------
Distributed under MIT License, please see license file in code for more details.

View File

@ -1,148 +0,0 @@
package ut
import (
"errors"
"fmt"
"github.com/go-playground/locales"
)
var (
// ErrUnknowTranslation indicates the translation could not be found
ErrUnknowTranslation = errors.New("Unknown Translation")
)
var _ error = new(ErrConflictingTranslation)
var _ error = new(ErrRangeTranslation)
var _ error = new(ErrOrdinalTranslation)
var _ error = new(ErrCardinalTranslation)
var _ error = new(ErrMissingPluralTranslation)
var _ error = new(ErrExistingTranslator)
// ErrExistingTranslator is the error representing a conflicting translator
type ErrExistingTranslator struct {
locale string
}
// Error returns ErrExistingTranslator's internal error text
func (e *ErrExistingTranslator) Error() string {
return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale)
}
// ErrConflictingTranslation is the error representing a conflicting translation
type ErrConflictingTranslation struct {
locale string
key interface{}
rule locales.PluralRule
text string
}
// Error returns ErrConflictingTranslation's internal error text
func (e *ErrConflictingTranslation) Error() string {
if _, ok := e.key.(string); !ok {
return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
}
return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
}
// ErrRangeTranslation is the error representing a range translation error
type ErrRangeTranslation struct {
text string
}
// Error returns ErrRangeTranslation's internal error text
func (e *ErrRangeTranslation) Error() string {
return e.text
}
// ErrOrdinalTranslation is the error representing an ordinal translation error
type ErrOrdinalTranslation struct {
text string
}
// Error returns ErrOrdinalTranslation's internal error text
func (e *ErrOrdinalTranslation) Error() string {
return e.text
}
// ErrCardinalTranslation is the error representing a cardinal translation error
type ErrCardinalTranslation struct {
text string
}
// Error returns ErrCardinalTranslation's internal error text
func (e *ErrCardinalTranslation) Error() string {
return e.text
}
// ErrMissingPluralTranslation is the error signifying a missing translation given
// the locales plural rules.
type ErrMissingPluralTranslation struct {
locale string
key interface{}
rule locales.PluralRule
translationType string
}
// Error returns ErrMissingPluralTranslation's internal error text
func (e *ErrMissingPluralTranslation) Error() string {
if _, ok := e.key.(string); !ok {
return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
}
return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
}
// ErrMissingBracket is the error representing a missing bracket in a translation
// eg. This is a {0 <-- missing ending '}'
type ErrMissingBracket struct {
locale string
key interface{}
text string
}
// Error returns ErrMissingBracket error message
func (e *ErrMissingBracket) Error() string {
return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text)
}
// ErrBadParamSyntax is the error representing a bad parameter definition in a translation
// eg. This is a {must-be-int}
type ErrBadParamSyntax struct {
locale string
param string
key interface{}
text string
}
// Error returns ErrBadParamSyntax error message
func (e *ErrBadParamSyntax) Error() string {
return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text)
}
// import/export errors
// ErrMissingLocale is the error representing an expected locale that could
// not be found aka locale not registered with the UniversalTranslator Instance
type ErrMissingLocale struct {
locale string
}
// Error returns ErrMissingLocale's internal error text
func (e *ErrMissingLocale) Error() string {
return fmt.Sprintf("error: locale '%s' not registered.", e.locale)
}
// ErrBadPluralDefinition is the error representing an incorrect plural definition
// usually found within translations defined within files during the import process.
type ErrBadPluralDefinition struct {
tl translation
}
// Error returns ErrBadPluralDefinition's internal error text
func (e *ErrBadPluralDefinition) Error() string {
return fmt.Sprintf("error: bad plural definition '%#v'", e.tl)
}

View File

@ -1,274 +0,0 @@
package ut
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"io"
"github.com/go-playground/locales"
)
type translation struct {
Locale string `json:"locale"`
Key interface{} `json:"key"` // either string or integer
Translation string `json:"trans"`
PluralType string `json:"type,omitempty"`
PluralRule string `json:"rule,omitempty"`
OverrideExisting bool `json:"override,omitempty"`
}
const (
cardinalType = "Cardinal"
ordinalType = "Ordinal"
rangeType = "Range"
)
// ImportExportFormat is the format of the file import or export
type ImportExportFormat uint8
// supported Export Formats
const (
FormatJSON ImportExportFormat = iota
)
// Export writes the translations out to a file on disk.
//
// NOTE: this currently only works with string or int translations keys.
func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
_, err := os.Stat(dirname)
fmt.Println(dirname, err, os.IsNotExist(err))
if err != nil {
if !os.IsNotExist(err) {
return err
}
if err = os.MkdirAll(dirname, 0744); err != nil {
return err
}
}
// build up translations
var trans []translation
var b []byte
var ext string
for _, locale := range t.translators {
for k, v := range locale.(*translator).translations {
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k,
Translation: v.text,
})
}
for k, pluralTrans := range locale.(*translator).cardinalTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: cardinalType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
for k, pluralTrans := range locale.(*translator).ordinalTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: ordinalType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
for k, pluralTrans := range locale.(*translator).rangeTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: rangeType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
switch format {
case FormatJSON:
b, err = json.MarshalIndent(trans, "", " ")
ext = ".json"
}
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
if err != nil {
return err
}
trans = trans[0:0]
}
return nil
}
// Import reads the translations out of a file or directory on disk.
//
// NOTE: this currently only works with string or int translations keys.
func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error {
fi, err := os.Stat(dirnameOrFilename)
if err != nil {
return err
}
processFn := func(filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
return t.ImportByReader(format, f)
}
if !fi.IsDir() {
return processFn(dirnameOrFilename)
}
// recursively go through directory
walker := func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
switch format {
case FormatJSON:
// skip non JSON files
if filepath.Ext(info.Name()) != ".json" {
return nil
}
}
return processFn(path)
}
return filepath.Walk(dirnameOrFilename, walker)
}
// ImportByReader imports the the translations found within the contents read from the supplied reader.
//
// NOTE: generally used when assets have been embedded into the binary and are already in memory.
func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
var trans []translation
switch format {
case FormatJSON:
err = json.Unmarshal(b, &trans)
}
if err != nil {
return err
}
for _, tl := range trans {
locale, found := t.FindTranslator(tl.Locale)
if !found {
return &ErrMissingLocale{locale: tl.Locale}
}
pr := stringToPR(tl.PluralRule)
if pr == locales.PluralRuleUnknown {
err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting)
if err != nil {
return err
}
continue
}
switch tl.PluralType {
case cardinalType:
err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
case ordinalType:
err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
case rangeType:
err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting)
default:
return &ErrBadPluralDefinition{tl: tl}
}
if err != nil {
return err
}
}
return nil
}
func stringToPR(s string) locales.PluralRule {
switch s {
case "One":
return locales.PluralRuleOne
case "Two":
return locales.PluralRuleTwo
case "Few":
return locales.PluralRuleFew
case "Many":
return locales.PluralRuleMany
case "Other":
return locales.PluralRuleOther
default:
return locales.PluralRuleUnknown
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

View File

@ -1,420 +0,0 @@
package ut
import (
"fmt"
"strconv"
"strings"
"github.com/go-playground/locales"
)
const (
paramZero = "{0}"
paramOne = "{1}"
unknownTranslation = ""
)
// Translator is universal translators
// translator instance which is a thin wrapper
// around locales.Translator instance providing
// some extra functionality
type Translator interface {
locales.Translator
// adds a normal translation for a particular language/locale
// {#} is the only replacement type accepted and are ad infinitum
// eg. one: '{0} day left' other: '{0} days left'
Add(key interface{}, text string, override bool) error
// adds a cardinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error
// adds an ordinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring'
// - 1st, 2nd, 3rd...
AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error
// adds a range plural translation for a particular language/locale
// {0} and {1} are the only replacement types accepted and only these are accepted.
// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error
// creates the translation for the locale given the 'key' and params passed in
T(key interface{}, params ...string) (string, error)
// creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments
// and param passed in
C(key interface{}, num float64, digits uint64, param string) (string, error)
// creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments
// and param passed in
O(key interface{}, num float64, digits uint64, param string) (string, error)
// creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and
// 'digit2' arguments and 'param1' and 'param2' passed in
R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error)
// VerifyTranslations checks to ensures that no plural rules have been
// missed within the translations.
VerifyTranslations() error
}
var _ Translator = new(translator)
var _ locales.Translator = new(translator)
type translator struct {
locales.Translator
translations map[interface{}]*transText
cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown
ordinalTanslations map[interface{}][]*transText
rangeTanslations map[interface{}][]*transText
}
type transText struct {
text string
indexes []int
}
func newTranslator(trans locales.Translator) Translator {
return &translator{
Translator: trans,
translations: make(map[interface{}]*transText), // translation text broken up by byte index
cardinalTanslations: make(map[interface{}][]*transText),
ordinalTanslations: make(map[interface{}][]*transText),
rangeTanslations: make(map[interface{}][]*transText),
}
}
// Add adds a normal translation for a particular language/locale
// {#} is the only replacement type accepted and are ad infinitum
// eg. one: '{0} day left' other: '{0} days left'
func (t *translator) Add(key interface{}, text string, override bool) error {
if _, ok := t.translations[key]; ok && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text}
}
lb := strings.Count(text, "{")
rb := strings.Count(text, "}")
if lb != rb {
return &ErrMissingBracket{locale: t.Locale(), key: key, text: text}
}
trans := &transText{
text: text,
}
var idx int
for i := 0; i < lb; i++ {
s := "{" + strconv.Itoa(i) + "}"
idx = strings.Index(text, s)
if idx == -1 {
return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text}
}
trans.indexes = append(trans.indexes, idx)
trans.indexes = append(trans.indexes, idx+len(s))
}
t.translations[key] = trans
return nil
}
// AddCardinal adds a cardinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsCardinal() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.cardinalTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7, 7)
t.cardinalTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 2, 2),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
return nil
}
// AddOrdinal adds an ordinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd...
func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsOrdinal() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.ordinalTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7, 7)
t.ordinalTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 2, 2),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
return nil
}
// AddRange adds a range plural translation for a particular language/locale
// {0} and {1} are the only replacement types accepted and only these are accepted.
// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsRange() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.rangeTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7, 7)
t.rangeTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 4, 4),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
idx = strings.Index(text, paramOne)
if idx == -1 {
tarr[rule] = nil
return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)}
}
trans.indexes[2] = idx
trans.indexes[3] = idx + len(paramOne)
return nil
}
// T creates the translation for the locale given the 'key' and params passed in
func (t *translator) T(key interface{}, params ...string) (string, error) {
trans, ok := t.translations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
b := make([]byte, 0, 64)
var start, end, count int
for i := 0; i < len(trans.indexes); i++ {
end = trans.indexes[i]
b = append(b, trans.text[start:end]...)
b = append(b, params[count]...)
i++
start = trans.indexes[i]
count++
}
b = append(b, trans.text[start:]...)
return string(b), nil
}
// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) {
tarr, ok := t.cardinalTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.CardinalPluralRule(num, digits)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param...)
b = append(b, trans.text[trans.indexes[1]:]...)
return string(b), nil
}
// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) {
tarr, ok := t.ordinalTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.OrdinalPluralRule(num, digits)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param...)
b = append(b, trans.text[trans.indexes[1]:]...)
return string(b), nil
}
// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments
// and 'param1' and 'param2' passed in
func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) {
tarr, ok := t.rangeTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.RangePluralRule(num1, digits1, num2, digits2)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param1...)
b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...)
b = append(b, param2...)
b = append(b, trans.text[trans.indexes[3]:]...)
return string(b), nil
}
// VerifyTranslations checks to ensures that no plural rules have been
// missed within the translations.
func (t *translator) VerifyTranslations() error {
for k, v := range t.cardinalTanslations {
for _, rule := range t.PluralsCardinal() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k}
}
}
}
for k, v := range t.ordinalTanslations {
for _, rule := range t.PluralsOrdinal() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k}
}
}
}
for k, v := range t.rangeTanslations {
for _, rule := range t.PluralsRange() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k}
}
}
}
return nil
}

View File

@ -1,113 +0,0 @@
package ut
import (
"strings"
"github.com/go-playground/locales"
)
// UniversalTranslator holds all locale & translation data
type UniversalTranslator struct {
translators map[string]Translator
fallback Translator
}
// New returns a new UniversalTranslator instance set with
// the fallback locale and locales it should support
func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator {
t := &UniversalTranslator{
translators: make(map[string]Translator),
}
for _, v := range supportedLocales {
trans := newTranslator(v)
t.translators[strings.ToLower(trans.Locale())] = trans
if fallback.Locale() == v.Locale() {
t.fallback = trans
}
}
if t.fallback == nil && fallback != nil {
t.fallback = newTranslator(fallback)
}
return t
}
// FindTranslator trys to find a Translator based on an array of locales
// and returns the first one it can find, otherwise returns the
// fallback translator.
func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) {
for _, locale := range locales {
if trans, found = t.translators[strings.ToLower(locale)]; found {
return
}
}
return t.fallback, false
}
// GetTranslator returns the specified translator for the given locale,
// or fallback if not found
func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) {
if trans, found = t.translators[strings.ToLower(locale)]; found {
return
}
return t.fallback, false
}
// GetFallback returns the fallback locale
func (t *UniversalTranslator) GetFallback() Translator {
return t.fallback
}
// AddTranslator adds the supplied translator, if it already exists the override param
// will be checked and if false an error will be returned, otherwise the translator will be
// overridden; if the fallback matches the supplied translator it will be overridden as well
// NOTE: this is normally only used when translator is embedded within a library
func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error {
lc := strings.ToLower(translator.Locale())
_, ok := t.translators[lc]
if ok && !override {
return &ErrExistingTranslator{locale: translator.Locale()}
}
trans := newTranslator(translator)
if t.fallback.Locale() == translator.Locale() {
// because it's optional to have a fallback, I don't impose that limitation
// don't know why you wouldn't but...
if !override {
return &ErrExistingTranslator{locale: translator.Locale()}
}
t.fallback = trans
}
t.translators[lc] = trans
return nil
}
// VerifyTranslations runs through all locales and identifies any issues
// eg. missing plural rules for a locale
func (t *UniversalTranslator) VerifyTranslations() (err error) {
for _, trans := range t.translators {
err = trans.VerifyTranslations()
if err != nil {
return
}
}
return
}

View File

@ -1,27 +0,0 @@
Copyright (c) 2017 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,89 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package cmpopts provides common options for the cmp package.
package cmpopts
import (
"math"
"reflect"
"github.com/google/go-cmp/cmp"
)
func equateAlways(_, _ interface{}) bool { return true }
// EquateEmpty returns a Comparer option that determines all maps and slices
// with a length of zero to be equal, regardless of whether they are nil.
//
// EquateEmpty can be used in conjunction with SortSlices and SortMaps.
func EquateEmpty() cmp.Option {
return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways))
}
func isEmpty(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
(vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) &&
(vx.Len() == 0 && vy.Len() == 0)
}
// EquateApprox returns a Comparer option that determines float32 or float64
// values to be equal if they are within a relative fraction or absolute margin.
// This option is not used when either x or y is NaN or infinite.
//
// The fraction determines that the difference of two values must be within the
// smaller fraction of the two values, while the margin determines that the two
// values must be within some absolute margin.
// To express only a fraction or only a margin, use 0 for the other parameter.
// The fraction and margin must be non-negative.
//
// The mathematical expression used is equivalent to:
// |x-y| ≤ max(fraction*min(|x|, |y|), margin)
//
// EquateApprox can be used in conjunction with EquateNaNs.
func EquateApprox(fraction, margin float64) cmp.Option {
if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) {
panic("margin or fraction must be a non-negative number")
}
a := approximator{fraction, margin}
return cmp.Options{
cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)),
cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)),
}
}
type approximator struct{ frac, marg float64 }
func areRealF64s(x, y float64) bool {
return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0)
}
func areRealF32s(x, y float32) bool {
return areRealF64s(float64(x), float64(y))
}
func (a approximator) compareF64(x, y float64) bool {
relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y))
return math.Abs(x-y) <= math.Max(a.marg, relMarg)
}
func (a approximator) compareF32(x, y float32) bool {
return a.compareF64(float64(x), float64(y))
}
// EquateNaNs returns a Comparer option that determines float32 and float64
// NaN values to be equal.
//
// EquateNaNs can be used in conjunction with EquateApprox.
func EquateNaNs() cmp.Option {
return cmp.Options{
cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)),
cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)),
}
}
func areNaNsF64s(x, y float64) bool {
return math.IsNaN(x) && math.IsNaN(y)
}
func areNaNsF32s(x, y float32) bool {
return areNaNsF64s(float64(x), float64(y))
}

View File

@ -1,145 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp"
)
// IgnoreFields returns an Option that ignores exported fields of the
// given names on a single struct type.
// The struct type is specified by passing in a value of that type.
//
// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a
// specific sub-field that is embedded or nested within the parent struct.
//
// This does not handle unexported fields; use IgnoreUnexported instead.
func IgnoreFields(typ interface{}, names ...string) cmp.Option {
sf := newStructFilter(typ, names...)
return cmp.FilterPath(sf.filter, cmp.Ignore())
}
// IgnoreTypes returns an Option that ignores all values assignable to
// certain types, which are specified by passing in a value of each type.
func IgnoreTypes(typs ...interface{}) cmp.Option {
tf := newTypeFilter(typs...)
return cmp.FilterPath(tf.filter, cmp.Ignore())
}
type typeFilter []reflect.Type
func newTypeFilter(typs ...interface{}) (tf typeFilter) {
for _, typ := range typs {
t := reflect.TypeOf(typ)
if t == nil {
// This occurs if someone tries to pass in sync.Locker(nil)
panic("cannot determine type; consider using IgnoreInterfaces")
}
tf = append(tf, t)
}
return tf
}
func (tf typeFilter) filter(p cmp.Path) bool {
if len(p) < 1 {
return false
}
t := p.Last().Type()
for _, ti := range tf {
if t.AssignableTo(ti) {
return true
}
}
return false
}
// IgnoreInterfaces returns an Option that ignores all values or references of
// values assignable to certain interface types. These interfaces are specified
// by passing in an anonymous struct with the interface types embedded in it.
// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}.
func IgnoreInterfaces(ifaces interface{}) cmp.Option {
tf := newIfaceFilter(ifaces)
return cmp.FilterPath(tf.filter, cmp.Ignore())
}
type ifaceFilter []reflect.Type
func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) {
t := reflect.TypeOf(ifaces)
if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct {
panic("input must be an anonymous struct")
}
for i := 0; i < t.NumField(); i++ {
fi := t.Field(i)
switch {
case !fi.Anonymous:
panic("struct cannot have named fields")
case fi.Type.Kind() != reflect.Interface:
panic("embedded field must be an interface type")
case fi.Type.NumMethod() == 0:
// This matches everything; why would you ever want this?
panic("cannot ignore empty interface")
default:
tf = append(tf, fi.Type)
}
}
return tf
}
func (tf ifaceFilter) filter(p cmp.Path) bool {
if len(p) < 1 {
return false
}
t := p.Last().Type()
for _, ti := range tf {
if t.AssignableTo(ti) {
return true
}
if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) {
return true
}
}
return false
}
// IgnoreUnexported returns an Option that only ignores the immediate unexported
// fields of a struct, including anonymous fields of unexported types.
// In particular, unexported fields within the struct's exported fields
// of struct types, including anonymous fields, will not be ignored unless the
// type of the field itself is also passed to IgnoreUnexported.
func IgnoreUnexported(typs ...interface{}) cmp.Option {
ux := newUnexportedFilter(typs...)
return cmp.FilterPath(ux.filter, cmp.Ignore())
}
type unexportedFilter struct{ m map[reflect.Type]bool }
func newUnexportedFilter(typs ...interface{}) unexportedFilter {
ux := unexportedFilter{m: make(map[reflect.Type]bool)}
for _, typ := range typs {
t := reflect.TypeOf(typ)
if t == nil || t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
ux.m[t] = true
}
return ux
}
func (xf unexportedFilter) filter(p cmp.Path) bool {
sf, ok := p.Index(-1).(cmp.StructField)
if !ok {
return false
}
return xf.m[p.Index(-2).Type()] && !isExported(sf.Name())
}
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}

View File

@ -1,146 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/internal/function"
)
// SortSlices returns a Transformer option that sorts all []V.
// The less function must be of the form "func(T, T) bool" which is used to
// sort any slice with element type V that is assignable to T.
//
// The less function must be:
// • Deterministic: less(x, y) == less(x, y)
// • Irreflexive: !less(x, x)
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
//
// The less function does not have to be "total". That is, if !less(x, y) and
// !less(y, x) for two elements x and y, their relative order is maintained.
//
// SortSlices can be used in conjunction with EquateEmpty.
func SortSlices(less interface{}) cmp.Option {
vf := reflect.ValueOf(less)
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
panic(fmt.Sprintf("invalid less function: %T", less))
}
ss := sliceSorter{vf.Type().In(0), vf}
return cmp.FilterValues(ss.filter, cmp.Transformer("Sort", ss.sort))
}
type sliceSorter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (ss sliceSorter) filter(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
if !(x != nil && y != nil && vx.Type() == vy.Type()) ||
!(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) ||
(vx.Len() <= 1 && vy.Len() <= 1) {
return false
}
// Check whether the slices are already sorted to avoid an infinite
// recursion cycle applying the same transform to itself.
ok1 := sliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) })
ok2 := sliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) })
return !ok1 || !ok2
}
func (ss sliceSorter) sort(x interface{}) interface{} {
src := reflect.ValueOf(x)
dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ {
dst.Index(i).Set(src.Index(i))
}
sortSliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) })
ss.checkSort(dst)
return dst.Interface()
}
func (ss sliceSorter) checkSort(v reflect.Value) {
start := -1 // Start of a sequence of equal elements.
for i := 1; i < v.Len(); i++ {
if ss.less(v, i-1, i) {
// Check that first and last elements in v[start:i] are equal.
if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) {
panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i)))
}
start = -1
} else if start == -1 {
start = i
}
}
}
func (ss sliceSorter) less(v reflect.Value, i, j int) bool {
vx, vy := v.Index(i), v.Index(j)
return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
}
// SortMaps returns a Transformer option that flattens map[K]V types to be a
// sorted []struct{K, V}. The less function must be of the form
// "func(T, T) bool" which is used to sort any map with key K that is
// assignable to T.
//
// Flattening the map into a slice has the property that cmp.Equal is able to
// use Comparers on K or the K.Equal method if it exists.
//
// The less function must be:
// • Deterministic: less(x, y) == less(x, y)
// • Irreflexive: !less(x, x)
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
// • Total: if x != y, then either less(x, y) or less(y, x)
//
// SortMaps can be used in conjunction with EquateEmpty.
func SortMaps(less interface{}) cmp.Option {
vf := reflect.ValueOf(less)
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
panic(fmt.Sprintf("invalid less function: %T", less))
}
ms := mapSorter{vf.Type().In(0), vf}
return cmp.FilterValues(ms.filter, cmp.Transformer("Sort", ms.sort))
}
type mapSorter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (ms mapSorter) filter(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
(vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) &&
(vx.Len() != 0 || vy.Len() != 0)
}
func (ms mapSorter) sort(x interface{}) interface{} {
src := reflect.ValueOf(x)
outType := mapEntryType(src.Type())
dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len())
for i, k := range src.MapKeys() {
v := reflect.New(outType).Elem()
v.Field(0).Set(k)
v.Field(1).Set(src.MapIndex(k))
dst.Index(i).Set(v)
}
sortSlice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) })
ms.checkSort(dst)
return dst.Interface()
}
func (ms mapSorter) checkSort(v reflect.Value) {
for i := 1; i < v.Len(); i++ {
if !ms.less(v, i-1, i) {
panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i)))
}
}
}
func (ms mapSorter) less(v reflect.Value, i, j int) bool {
vx, vy := v.Index(i).Field(0), v.Index(j).Field(0)
if !hasReflectStructOf {
vx, vy = vx.Elem(), vy.Elem()
}
return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
}

View File

@ -1,46 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !go1.8
package cmpopts
import (
"reflect"
"sort"
)
const hasReflectStructOf = false
func mapEntryType(reflect.Type) reflect.Type {
return reflect.TypeOf(struct{ K, V interface{} }{})
}
func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool {
return sort.IsSorted(reflectSliceSorter{reflect.ValueOf(slice), less})
}
func sortSlice(slice interface{}, less func(i, j int) bool) {
sort.Sort(reflectSliceSorter{reflect.ValueOf(slice), less})
}
func sortSliceStable(slice interface{}, less func(i, j int) bool) {
sort.Stable(reflectSliceSorter{reflect.ValueOf(slice), less})
}
type reflectSliceSorter struct {
slice reflect.Value
less func(i, j int) bool
}
func (ss reflectSliceSorter) Len() int {
return ss.slice.Len()
}
func (ss reflectSliceSorter) Less(i, j int) bool {
return ss.less(i, j)
}
func (ss reflectSliceSorter) Swap(i, j int) {
vi := ss.slice.Index(i).Interface()
vj := ss.slice.Index(j).Interface()
ss.slice.Index(i).Set(reflect.ValueOf(vj))
ss.slice.Index(j).Set(reflect.ValueOf(vi))
}

View File

@ -1,31 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build go1.8
package cmpopts
import (
"reflect"
"sort"
)
const hasReflectStructOf = true
func mapEntryType(t reflect.Type) reflect.Type {
return reflect.StructOf([]reflect.StructField{
{Name: "K", Type: t.Key()},
{Name: "V", Type: t.Elem()},
})
}
func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool {
return sort.SliceIsSorted(slice, less)
}
func sortSlice(slice interface{}, less func(i, j int) bool) {
sort.Slice(slice, less)
}
func sortSliceStable(slice interface{}, less func(i, j int) bool) {
sort.SliceStable(slice, less)
}

View File

@ -1,182 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp"
)
// filterField returns a new Option where opt is only evaluated on paths that
// include a specific exported field on a single struct type.
// The struct type is specified by passing in a value of that type.
//
// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a
// specific sub-field that is embedded or nested within the parent struct.
func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option {
// TODO: This is currently unexported over concerns of how helper filters
// can be composed together easily.
// TODO: Add tests for FilterField.
sf := newStructFilter(typ, name)
return cmp.FilterPath(sf.filter, opt)
}
type structFilter struct {
t reflect.Type // The root struct type to match on
ft fieldTree // Tree of fields to match on
}
func newStructFilter(typ interface{}, names ...string) structFilter {
// TODO: Perhaps allow * as a special identifier to allow ignoring any
// number of path steps until the next field match?
// This could be useful when a concrete struct gets transformed into
// an anonymous struct where it is not possible to specify that by type,
// but the transformer happens to provide guarantees about the names of
// the transformed fields.
t := reflect.TypeOf(typ)
if t == nil || t.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T must be a struct", typ))
}
var ft fieldTree
for _, name := range names {
cname, err := canonicalName(t, name)
if err != nil {
panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err))
}
ft.insert(cname)
}
return structFilter{t, ft}
}
func (sf structFilter) filter(p cmp.Path) bool {
for i, ps := range p {
if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) {
return true
}
}
return false
}
// fieldTree represents a set of dot-separated identifiers.
//
// For example, inserting the following selectors:
// Foo
// Foo.Bar.Baz
// Foo.Buzz
// Nuka.Cola.Quantum
//
// Results in a tree of the form:
// {sub: {
// "Foo": {ok: true, sub: {
// "Bar": {sub: {
// "Baz": {ok: true},
// }},
// "Buzz": {ok: true},
// }},
// "Nuka": {sub: {
// "Cola": {sub: {
// "Quantum": {ok: true},
// }},
// }},
// }}
type fieldTree struct {
ok bool // Whether this is a specified node
sub map[string]fieldTree // The sub-tree of fields under this node
}
// insert inserts a sequence of field accesses into the tree.
func (ft *fieldTree) insert(cname []string) {
if ft.sub == nil {
ft.sub = make(map[string]fieldTree)
}
if len(cname) == 0 {
ft.ok = true
return
}
sub := ft.sub[cname[0]]
sub.insert(cname[1:])
ft.sub[cname[0]] = sub
}
// matchPrefix reports whether any selector in the fieldTree matches
// the start of path p.
func (ft fieldTree) matchPrefix(p cmp.Path) bool {
for _, ps := range p {
switch ps := ps.(type) {
case cmp.StructField:
ft = ft.sub[ps.Name()]
if ft.ok {
return true
}
if len(ft.sub) == 0 {
return false
}
case cmp.Indirect:
default:
return false
}
}
return false
}
// canonicalName returns a list of identifiers where any struct field access
// through an embedded field is expanded to include the names of the embedded
// types themselves.
//
// For example, suppose field "Foo" is not directly in the parent struct,
// but actually from an embedded struct of type "Bar". Then, the canonical name
// of "Foo" is actually "Bar.Foo".
//
// Suppose field "Foo" is not directly in the parent struct, but actually
// a field in two different embedded structs of types "Bar" and "Baz".
// Then the selector "Foo" causes a panic since it is ambiguous which one it
// refers to. The user must specify either "Bar.Foo" or "Baz.Foo".
func canonicalName(t reflect.Type, sel string) ([]string, error) {
var name string
sel = strings.TrimPrefix(sel, ".")
if sel == "" {
return nil, fmt.Errorf("name must not be empty")
}
if i := strings.IndexByte(sel, '.'); i < 0 {
name, sel = sel, ""
} else {
name, sel = sel[:i], sel[i:]
}
// Type must be a struct or pointer to struct.
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("%v must be a struct", t)
}
// Find the canonical name for this current field name.
// If the field exists in an embedded struct, then it will be expanded.
if !isExported(name) {
// Disallow unexported fields:
// * To discourage people from actually touching unexported fields
// * FieldByName is buggy (https://golang.org/issue/4876)
return []string{name}, fmt.Errorf("name must be exported")
}
sf, ok := t.FieldByName(name)
if !ok {
return []string{name}, fmt.Errorf("does not exist")
}
var ss []string
for i := range sf.Index {
ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name)
}
if sel == "" {
return ss, nil
}
ssPost, err := canonicalName(sf.Type, sel)
return append(ss, ssPost...), err
}

View File

@ -1,553 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package cmp determines equality of values.
//
// This package is intended to be a more powerful and safer alternative to
// reflect.DeepEqual for comparing whether two values are semantically equal.
//
// The primary features of cmp are:
//
// • When the default behavior of equality does not suit the needs of the test,
// custom equality functions can override the equality operation.
// For example, an equality function may report floats as equal so long as they
// are within some tolerance of each other.
//
// • Types that have an Equal method may use that method to determine equality.
// This allows package authors to determine the equality operation for the types
// that they define.
//
// • If no custom equality functions are used and no Equal method is defined,
// equality is determined by recursively comparing the primitive kinds on both
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
// fields are not compared by default; they result in panics unless suppressed
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared
// using the AllowUnexported option.
package cmp
import (
"fmt"
"reflect"
"github.com/google/go-cmp/cmp/internal/diff"
"github.com/google/go-cmp/cmp/internal/function"
"github.com/google/go-cmp/cmp/internal/value"
)
// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to
// the reflection package's inability to retrieve such entries. Equal will panic
// anytime it comes across a NaN key, but this behavior may change.
//
// See https://golang.org/issue/11104 for more details.
var nothing = reflect.Value{}
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
// • If two values are not of the same type, then they are never equal
// and the overall result is false.
//
// • Let S be the set of all Ignore, Transformer, and Comparer options that
// remain after applying all path filters, value filters, and type filters.
// If at least one Ignore exists in S, then the comparison is ignored.
// If the number of Transformer and Comparer options in S is greater than one,
// then Equal panics because it is ambiguous which option to use.
// If S contains a single Transformer, then use that to transform the current
// values and recursively call Equal on the output values.
// If S contains a single Comparer, then use that to compare the current values.
// Otherwise, evaluation proceeds to the next rule.
//
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
// x.Equal(y) even if x or y is nil.
// Otherwise, no such method exists and evaluation proceeds to the next rule.
//
// • Lastly, try to compare x and y based on their basic kinds.
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
// channels are compared using the equivalent of the == operator in Go.
// Functions are only equal if they are both nil, otherwise they are unequal.
// Pointers are equal if the underlying values they point to are also equal.
// Interfaces are equal if their underlying concrete values are also equal.
//
// Structs are equal if all of their fields are equal. If a struct contains
// unexported fields, Equal panics unless the AllowUnexported option is used or
// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field.
//
// Arrays, slices, and maps are equal if they are both nil or both non-nil
// with the same length and the elements at each index or key are equal.
// Note that a non-nil empty slice and a nil slice are not equal.
// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
// Map keys are equal according to the == operator.
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
func Equal(x, y interface{}, opts ...Option) bool {
s := newState(opts)
s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y))
return s.result.Equal()
}
// Diff returns a human-readable report of the differences between two values.
// It returns an empty string if and only if Equal returns true for the same
// input values and options. The output string will use the "-" symbol to
// indicate elements removed from x, and the "+" symbol to indicate elements
// added to y.
//
// Do not depend on this output being stable.
func Diff(x, y interface{}, opts ...Option) string {
r := new(defaultReporter)
opts = Options{Options(opts), r}
eq := Equal(x, y, opts...)
d := r.String()
if (d == "") != eq {
panic("inconsistent difference and equality results")
}
return d
}
type state struct {
// These fields represent the "comparison state".
// Calling statelessCompare must not result in observable changes to these.
result diff.Result // The current result of comparison
curPath Path // The current path in the value tree
reporter reporter // Optional reporter used for difference formatting
// dynChecker triggers pseudo-random checks for option correctness.
// It is safe for statelessCompare to mutate this value.
dynChecker dynChecker
// These fields, once set by processOption, will not change.
exporters map[reflect.Type]bool // Set of structs with unexported field visibility
opts Options // List of all fundamental and filter options
}
func newState(opts []Option) *state {
s := new(state)
for _, opt := range opts {
s.processOption(opt)
}
return s
}
func (s *state) processOption(opt Option) {
switch opt := opt.(type) {
case nil:
case Options:
for _, o := range opt {
s.processOption(o)
}
case coreOption:
type filtered interface {
isFiltered() bool
}
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
}
s.opts = append(s.opts, opt)
case visibleStructs:
if s.exporters == nil {
s.exporters = make(map[reflect.Type]bool)
}
for t := range opt {
s.exporters[t] = true
}
case reporter:
if s.reporter != nil {
panic("difference reporter already registered")
}
s.reporter = opt
default:
panic(fmt.Sprintf("unknown option %T", opt))
}
}
// statelessCompare compares two values and returns the result.
// This function is stateless in that it does not alter the current result,
// or output to any registered reporters.
func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result {
// We do not save and restore the curPath because all of the compareX
// methods should properly push and pop from the path.
// It is an implementation bug if the contents of curPath differs from
// when calling this function to when returning from it.
oldResult, oldReporter := s.result, s.reporter
s.result = diff.Result{} // Reset result
s.reporter = nil // Remove reporter to avoid spurious printouts
s.compareAny(vx, vy)
res := s.result
s.result, s.reporter = oldResult, oldReporter
return res
}
func (s *state) compareAny(vx, vy reflect.Value) {
// TODO: Support cyclic data structures.
// Rule 0: Differing types are never equal.
if !vx.IsValid() || !vy.IsValid() {
s.report(vx.IsValid() == vy.IsValid(), vx, vy)
return
}
if vx.Type() != vy.Type() {
s.report(false, vx, vy) // Possible for path to be empty
return
}
t := vx.Type()
if len(s.curPath) == 0 {
s.curPath.push(&pathStep{typ: t})
defer s.curPath.pop()
}
vx, vy = s.tryExporting(vx, vy)
// Rule 1: Check whether an option applies on this node in the value tree.
if s.tryOptions(vx, vy, t) {
return
}
// Rule 2: Check whether the type has a valid Equal method.
if s.tryMethod(vx, vy, t) {
return
}
// Rule 3: Recursively descend into each value's underlying kind.
switch t.Kind() {
case reflect.Bool:
s.report(vx.Bool() == vy.Bool(), vx, vy)
return
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s.report(vx.Int() == vy.Int(), vx, vy)
return
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s.report(vx.Uint() == vy.Uint(), vx, vy)
return
case reflect.Float32, reflect.Float64:
s.report(vx.Float() == vy.Float(), vx, vy)
return
case reflect.Complex64, reflect.Complex128:
s.report(vx.Complex() == vy.Complex(), vx, vy)
return
case reflect.String:
s.report(vx.String() == vy.String(), vx, vy)
return
case reflect.Chan, reflect.UnsafePointer:
s.report(vx.Pointer() == vy.Pointer(), vx, vy)
return
case reflect.Func:
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
case reflect.Ptr:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
s.curPath.push(&indirect{pathStep{t.Elem()}})
defer s.curPath.pop()
s.compareAny(vx.Elem(), vy.Elem())
return
case reflect.Interface:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
if vx.Elem().Type() != vy.Elem().Type() {
s.report(false, vx.Elem(), vy.Elem())
return
}
s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
defer s.curPath.pop()
s.compareAny(vx.Elem(), vy.Elem())
return
case reflect.Slice:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
fallthrough
case reflect.Array:
s.compareArray(vx, vy, t)
return
case reflect.Map:
s.compareMap(vx, vy, t)
return
case reflect.Struct:
s.compareStruct(vx, vy, t)
return
default:
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
}
}
func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) {
if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
if sf.force {
// Use unsafe pointer arithmetic to get read-write access to an
// unexported field in the struct.
vx = unsafeRetrieveField(sf.pvx, sf.field)
vy = unsafeRetrieveField(sf.pvy, sf.field)
} else {
// We are not allowed to export the value, so invalidate them
// so that tryOptions can panic later if not explicitly ignored.
vx = nothing
vy = nothing
}
}
return vx, vy
}
func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
// If there were no FilterValues, we will not detect invalid inputs,
// so manually check for them and append invalid if necessary.
// We still evaluate the options since an ignore can override invalid.
opts := s.opts
if !vx.IsValid() || !vy.IsValid() {
opts = Options{opts, invalid{}}
}
// Evaluate all filters and apply the remaining options.
if opt := opts.filter(s, vx, vy, t); opt != nil {
opt.apply(s, vx, vy)
return true
}
return false
}
func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
// Check if this type even has an Equal method.
m, ok := t.MethodByName("Equal")
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
return false
}
eq := s.callTTBFunc(m.Func, vx, vy)
s.report(eq, vx, vy)
return true
}
func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
v = sanitizeValue(v, f.Type().In(0))
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{v})[0]
}
// Run the function twice and ensure that we get the same results back.
// We run in goroutines so that the race detector (if enabled) can detect
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, v)
want := f.Call([]reflect.Value{v})[0]
if got := <-c; !s.statelessCompare(got, want).Equal() {
// To avoid false-positives with non-reflexive equality operations,
// we sanity check whether a value is equal to itself.
if !s.statelessCompare(want, want).Equal() {
return want
}
fn := getFuncName(f.Pointer())
panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
}
return want
}
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
x = sanitizeValue(x, f.Type().In(0))
y = sanitizeValue(y, f.Type().In(1))
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{x, y})[0].Bool()
}
// Swapping the input arguments is sufficient to check that
// f is symmetric and deterministic.
// We run in goroutines so that the race detector (if enabled) can detect
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, y, x)
want := f.Call([]reflect.Value{x, y})[0].Bool()
if got := <-c; !got.IsValid() || got.Bool() != want {
fn := getFuncName(f.Pointer())
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
}
return want
}
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
var ret reflect.Value
defer func() {
recover() // Ignore panics, let the other call to f panic instead
c <- ret
}()
ret = f.Call(vs)[0]
}
// sanitizeValue converts nil interfaces of type T to those of type R,
// assuming that T is assignable to R.
// Otherwise, it returns the input value as is.
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
// TODO(dsnet): Remove this hacky workaround.
// See https://golang.org/issue/22143
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
return reflect.New(t).Elem()
}
return v
}
func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) {
step := &sliceIndex{pathStep{t.Elem()}, 0, 0}
s.curPath.push(step)
// Compute an edit-script for slices vx and vy.
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
step.xkey, step.ykey = ix, iy
return s.statelessCompare(vx.Index(ix), vy.Index(iy))
})
// Report the entire slice as is if the arrays are of primitive kind,
// and the arrays are different enough.
isPrimitive := false
switch t.Elem().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
isPrimitive = true
}
if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 {
s.curPath.pop() // Pop first since we are reporting the whole slice
s.report(false, vx, vy)
return
}
// Replay the edit-script.
var ix, iy int
for _, e := range es {
switch e {
case diff.UniqueX:
step.xkey, step.ykey = ix, -1
s.report(false, vx.Index(ix), nothing)
ix++
case diff.UniqueY:
step.xkey, step.ykey = -1, iy
s.report(false, nothing, vy.Index(iy))
iy++
default:
step.xkey, step.ykey = ix, iy
if e == diff.Identity {
s.report(true, vx.Index(ix), vy.Index(iy))
} else {
s.compareAny(vx.Index(ix), vy.Index(iy))
}
ix++
iy++
}
}
s.curPath.pop()
return
}
func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) {
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
// We combine and sort the two map keys so that we can perform the
// comparisons in a deterministic order.
step := &mapIndex{pathStep: pathStep{t.Elem()}}
s.curPath.push(step)
defer s.curPath.pop()
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
step.key = k
vvx := vx.MapIndex(k)
vvy := vy.MapIndex(k)
switch {
case vvx.IsValid() && vvy.IsValid():
s.compareAny(vvx, vvy)
case vvx.IsValid() && !vvy.IsValid():
s.report(false, vvx, nothing)
case !vvx.IsValid() && vvy.IsValid():
s.report(false, nothing, vvy)
default:
// It is possible for both vvx and vvy to be invalid if the
// key contained a NaN value in it. There is no way in
// reflection to be able to retrieve these values.
// See https://golang.org/issue/11104
panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath))
}
}
}
func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) {
var vax, vay reflect.Value // Addressable versions of vx and vy
step := &structField{}
s.curPath.push(step)
defer s.curPath.pop()
for i := 0; i < t.NumField(); i++ {
vvx := vx.Field(i)
vvy := vy.Field(i)
step.typ = t.Field(i).Type
step.name = t.Field(i).Name
step.idx = i
step.unexported = !isExported(step.name)
if step.unexported {
// Defer checking of unexported fields until later to give an
// Ignore a chance to ignore the field.
if !vax.IsValid() || !vay.IsValid() {
// For unsafeRetrieveField to work, the parent struct must
// be addressable. Create a new copy of the values if
// necessary to make them addressable.
vax = makeAddressable(vx)
vay = makeAddressable(vy)
}
step.force = s.exporters[t]
step.pvx = vax
step.pvy = vay
step.field = t.Field(i)
}
s.compareAny(vvx, vvy)
}
}
// report records the result of a single comparison.
// It also calls Report if any reporter is registered.
func (s *state) report(eq bool, vx, vy reflect.Value) {
if eq {
s.result.NSame++
} else {
s.result.NDiff++
}
if s.reporter != nil {
s.reporter.Report(vx, vy, eq, s.curPath)
}
}
// dynChecker tracks the state needed to periodically perform checks that
// user provided functions are symmetric and deterministic.
// The zero value is safe for immediate use.
type dynChecker struct{ curr, next int }
// Next increments the state and reports whether a check should be performed.
//
// Checks occur every Nth function call, where N is a triangular number:
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
// See https://en.wikipedia.org/wiki/Triangular_number
//
// This sequence ensures that the cost of checks drops significantly as
// the number of functions calls grows larger.
func (dc *dynChecker) Next() bool {
ok := dc.curr == dc.next
if ok {
dc.curr = 0
dc.next++
}
dc.curr++
return ok
}
// makeAddressable returns a value that is always addressable.
// It returns the input verbatim if it is already addressable,
// otherwise it creates a new value and returns an addressable copy.
func makeAddressable(v reflect.Value) reflect.Value {
if v.CanAddr() {
return v
}
vc := reflect.New(v.Type()).Elem()
vc.Set(v)
return vc
}

View File

@ -1,17 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !debug
package diff
var debug debugger
type debugger struct{}
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
return f
}
func (debugger) Update() {}
func (debugger) Finish() {}

View File

@ -1,122 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build debug
package diff
import (
"fmt"
"strings"
"sync"
"time"
)
// The algorithm can be seen running in real-time by enabling debugging:
// go test -tags=debug -v
//
// Example output:
// === RUN TestDifference/#34
// ┌───────────────────────────────┐
// │ \ · · · · · · · · · · · · · · │
// │ · # · · · · · · · · · · · · · │
// │ · \ · · · · · · · · · · · · · │
// │ · · \ · · · · · · · · · · · · │
// │ · · · X # · · · · · · · · · · │
// │ · · · # \ · · · · · · · · · · │
// │ · · · · · # # · · · · · · · · │
// │ · · · · · # \ · · · · · · · · │
// │ · · · · · · · \ · · · · · · · │
// │ · · · · · · · · \ · · · · · · │
// │ · · · · · · · · · \ · · · · · │
// │ · · · · · · · · · · \ · · # · │
// │ · · · · · · · · · · · \ # # · │
// │ · · · · · · · · · · · # # # · │
// │ · · · · · · · · · · # # # # · │
// │ · · · · · · · · · # # # # # · │
// │ · · · · · · · · · · · · · · \ │
// └───────────────────────────────┘
// [.Y..M.XY......YXYXY.|]
//
// The grid represents the edit-graph where the horizontal axis represents
// list X and the vertical axis represents list Y. The start of the two lists
// is the top-left, while the ends are the bottom-right. The '·' represents
// an unexplored node in the graph. The '\' indicates that the two symbols
// from list X and Y are equal. The 'X' indicates that two symbols are similar
// (but not exactly equal) to each other. The '#' indicates that the two symbols
// are different (and not similar). The algorithm traverses this graph trying to
// make the paths starting in the top-left and the bottom-right connect.
//
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
// the currently established path from the forward and reverse searches,
// separated by a '|' character.
const (
updateDelay = 100 * time.Millisecond
finishDelay = 500 * time.Millisecond
ansiTerminal = true // ANSI escape codes used to move terminal cursor
)
var debug debugger
type debugger struct {
sync.Mutex
p1, p2 EditScript
fwdPath, revPath *EditScript
grid []byte
lines int
}
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
dbg.Lock()
dbg.fwdPath, dbg.revPath = p1, p2
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
row := "│ " + strings.Repeat("· ", nx) + "│\n"
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
dbg.lines = strings.Count(dbg.String(), "\n")
fmt.Print(dbg)
// Wrap the EqualFunc so that we can intercept each result.
return func(ix, iy int) (r Result) {
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
for i := range cell {
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
}
switch r = f(ix, iy); {
case r.Equal():
cell[0] = '\\'
case r.Similar():
cell[0] = 'X'
default:
cell[0] = '#'
}
return
}
}
func (dbg *debugger) Update() {
dbg.print(updateDelay)
}
func (dbg *debugger) Finish() {
dbg.print(finishDelay)
dbg.Unlock()
}
func (dbg *debugger) String() string {
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
}
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
}
func (dbg *debugger) print(d time.Duration) {
if ansiTerminal {
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
}
fmt.Print(dbg)
time.Sleep(d)
}

View File

@ -1,363 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package diff implements an algorithm for producing edit-scripts.
// The edit-script is a sequence of operations needed to transform one list
// of symbols into another (or vice-versa). The edits allowed are insertions,
// deletions, and modifications. The summation of all edits is called the
// Levenshtein distance as this problem is well-known in computer science.
//
// This package prioritizes performance over accuracy. That is, the run time
// is more important than obtaining a minimal Levenshtein distance.
package diff
// EditType represents a single operation within an edit-script.
type EditType uint8
const (
// Identity indicates that a symbol pair is identical in both list X and Y.
Identity EditType = iota
// UniqueX indicates that a symbol only exists in X and not Y.
UniqueX
// UniqueY indicates that a symbol only exists in Y and not X.
UniqueY
// Modified indicates that a symbol pair is a modification of each other.
Modified
)
// EditScript represents the series of differences between two lists.
type EditScript []EditType
// String returns a human-readable string representing the edit-script where
// Identity, UniqueX, UniqueY, and Modified are represented by the
// '.', 'X', 'Y', and 'M' characters, respectively.
func (es EditScript) String() string {
b := make([]byte, len(es))
for i, e := range es {
switch e {
case Identity:
b[i] = '.'
case UniqueX:
b[i] = 'X'
case UniqueY:
b[i] = 'Y'
case Modified:
b[i] = 'M'
default:
panic("invalid edit-type")
}
}
return string(b)
}
// stats returns a histogram of the number of each type of edit operation.
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
for _, e := range es {
switch e {
case Identity:
s.NI++
case UniqueX:
s.NX++
case UniqueY:
s.NY++
case Modified:
s.NM++
default:
panic("invalid edit-type")
}
}
return
}
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
// lists X and Y are equal.
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
// LenX is the length of the X list.
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
// LenY is the length of the Y list.
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
// When called by Difference, the index is guaranteed to be within nx and ny.
type EqualFunc func(ix int, iy int) Result
// Result is the result of comparison.
// NSame is the number of sub-elements that are equal.
// NDiff is the number of sub-elements that are not equal.
type Result struct{ NSame, NDiff int }
// Equal indicates whether the symbols are equal. Two symbols are equal
// if and only if NDiff == 0. If Equal, then they are also Similar.
func (r Result) Equal() bool { return r.NDiff == 0 }
// Similar indicates whether two symbols are similar and may be represented
// by using the Modified type. As a special case, we consider binary comparisons
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
//
// The exact ratio of NSame to NDiff to determine similarity may change.
func (r Result) Similar() bool {
// Use NSame+1 to offset NSame so that binary comparisons are similar.
return r.NSame+1 >= r.NDiff
}
// Difference reports whether two lists of lengths nx and ny are equal
// given the definition of equality provided as f.
//
// This function returns an edit-script, which is a sequence of operations
// needed to convert one list into the other. The following invariants for
// the edit-script are maintained:
// • eq == (es.Dist()==0)
// • nx == es.LenX()
// • ny == es.LenY()
//
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
// produces an edit-script with a minimal Levenshtein distance). This algorithm
// favors performance over optimality. The exact output is not guaranteed to
// be stable and may change over time.
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// • fwdFrontier.X < revFrontier.X
// • fwdFrontier.Y < revFrontier.Y
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// • Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner). The goal of
// the search is connect with the search from the opposite corner.
// • As we search, we build a path in a greedy manner, where the first
// match seen is added to the path (this is sub-optimal, but provides a
// decent result in practice). When matches are found, we try the next pair
// of symbols in the lists and follow all matches as far as possible.
// • When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found, we advance the
// frontier towards the opposite corner.
// • This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
//
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Running the tests with the "debug" build tag prints a visualization of
// the algorithm running in real-time. This is educational for understanding
// how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
for {
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
break
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
break
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
}
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
}
type path struct {
dir int // +1 if forward, -1 if reverse
point // Leading point of the EditScript path
es EditScript
}
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
// to the edit-script to connect p.point to dst.
func (p *path) connect(dst point, f EqualFunc) {
if p.dir > 0 {
// Connect in forward direction.
for dst.X > p.X && dst.Y > p.Y {
switch r := f(p.X, p.Y); {
case r.Equal():
p.append(Identity)
case r.Similar():
p.append(Modified)
case dst.X-p.X >= dst.Y-p.Y:
p.append(UniqueX)
default:
p.append(UniqueY)
}
}
for dst.X > p.X {
p.append(UniqueX)
}
for dst.Y > p.Y {
p.append(UniqueY)
}
} else {
// Connect in reverse direction.
for p.X > dst.X && p.Y > dst.Y {
switch r := f(p.X-1, p.Y-1); {
case r.Equal():
p.append(Identity)
case r.Similar():
p.append(Modified)
case p.Y-dst.Y >= p.X-dst.X:
p.append(UniqueY)
default:
p.append(UniqueX)
}
}
for p.X > dst.X {
p.append(UniqueX)
}
for p.Y > dst.Y {
p.append(UniqueY)
}
}
}
func (p *path) append(t EditType) {
p.es = append(p.es, t)
switch t {
case Identity, Modified:
p.add(p.dir, p.dir)
case UniqueX:
p.add(p.dir, 0)
case UniqueY:
p.add(0, p.dir)
}
debug.Update()
}
type point struct{ X, Y int }
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
func zigzag(x int) int {
if x&1 != 0 {
x = ^x
}
return x >> 1
}

View File

@ -1,49 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package function identifies function types.
package function
import "reflect"
type funcType int
const (
_ funcType = iota
ttbFunc // func(T, T) bool
tibFunc // func(T, I) bool
trFunc // func(T) R
Equal = ttbFunc // func(T, T) bool
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
Transformer = trFunc // func(T) R
ValueFilter = ttbFunc // func(T, T) bool
Less = ttbFunc // func(T, T) bool
)
var boolType = reflect.TypeOf(true)
// IsType reports whether the reflect.Type is of the specified function type.
func IsType(t reflect.Type, ft funcType) bool {
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
return false
}
ni, no := t.NumIn(), t.NumOut()
switch ft {
case ttbFunc: // func(T, T) bool
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true
}
case tibFunc: // func(T, I) bool
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
return true
}
case trFunc: // func(T) R
if ni == 1 && no == 1 {
return true
}
}
return false
}

View File

@ -1,277 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package value provides functionality for reflect.Value types.
package value
import (
"fmt"
"reflect"
"strconv"
"strings"
"unicode"
)
var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
// Format formats the value v as a string.
//
// This is similar to fmt.Sprintf("%+v", v) except this:
// * Prints the type unless it can be elided
// * Avoids printing struct fields that are zero
// * Prints a nil-slice as being nil, not empty
// * Prints map entries in deterministic order
func Format(v reflect.Value, conf FormatConfig) string {
conf.printType = true
conf.followPointers = true
conf.realPointers = true
return formatAny(v, conf, nil)
}
type FormatConfig struct {
UseStringer bool // Should the String method be used if available?
printType bool // Should we print the type before the value?
PrintPrimitiveType bool // Should we print the type of primitives?
followPointers bool // Should we recursively follow pointers?
realPointers bool // Should we print the real address of pointers?
}
func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string {
// TODO: Should this be a multi-line printout in certain situations?
if !v.IsValid() {
return "<non-existent>"
}
if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() {
if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() {
return "<nil>"
}
const stringerPrefix = "s" // Indicates that the String method was used
s := v.Interface().(fmt.Stringer).String()
return stringerPrefix + formatString(s)
}
switch v.Kind() {
case reflect.Bool:
return formatPrimitive(v.Type(), v.Bool(), conf)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return formatPrimitive(v.Type(), v.Int(), conf)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr {
// Unnamed uints are usually bytes or words, so use hexadecimal.
return formatPrimitive(v.Type(), formatHex(v.Uint()), conf)
}
return formatPrimitive(v.Type(), v.Uint(), conf)
case reflect.Float32, reflect.Float64:
return formatPrimitive(v.Type(), v.Float(), conf)
case reflect.Complex64, reflect.Complex128:
return formatPrimitive(v.Type(), v.Complex(), conf)
case reflect.String:
return formatPrimitive(v.Type(), formatString(v.String()), conf)
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
return formatPointer(v, conf)
case reflect.Ptr:
if v.IsNil() {
if conf.printType {
return fmt.Sprintf("(%v)(nil)", v.Type())
}
return "<nil>"
}
if visited[v.Pointer()] || !conf.followPointers {
return formatPointer(v, conf)
}
visited = insertPointer(visited, v.Pointer())
return "&" + formatAny(v.Elem(), conf, visited)
case reflect.Interface:
if v.IsNil() {
if conf.printType {
return fmt.Sprintf("%v(nil)", v.Type())
}
return "<nil>"
}
return formatAny(v.Elem(), conf, visited)
case reflect.Slice:
if v.IsNil() {
if conf.printType {
return fmt.Sprintf("%v(nil)", v.Type())
}
return "<nil>"
}
if visited[v.Pointer()] {
return formatPointer(v, conf)
}
visited = insertPointer(visited, v.Pointer())
fallthrough
case reflect.Array:
var ss []string
subConf := conf
subConf.printType = v.Type().Elem().Kind() == reflect.Interface
for i := 0; i < v.Len(); i++ {
s := formatAny(v.Index(i), subConf, visited)
ss = append(ss, s)
}
s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
if conf.printType {
return v.Type().String() + s
}
return s
case reflect.Map:
if v.IsNil() {
if conf.printType {
return fmt.Sprintf("%v(nil)", v.Type())
}
return "<nil>"
}
if visited[v.Pointer()] {
return formatPointer(v, conf)
}
visited = insertPointer(visited, v.Pointer())
var ss []string
keyConf, valConf := conf, conf
keyConf.printType = v.Type().Key().Kind() == reflect.Interface
keyConf.followPointers = false
valConf.printType = v.Type().Elem().Kind() == reflect.Interface
for _, k := range SortKeys(v.MapKeys()) {
sk := formatAny(k, keyConf, visited)
sv := formatAny(v.MapIndex(k), valConf, visited)
ss = append(ss, fmt.Sprintf("%s: %s", sk, sv))
}
s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
if conf.printType {
return v.Type().String() + s
}
return s
case reflect.Struct:
var ss []string
subConf := conf
subConf.printType = true
for i := 0; i < v.NumField(); i++ {
vv := v.Field(i)
if isZero(vv) {
continue // Elide zero value fields
}
name := v.Type().Field(i).Name
subConf.UseStringer = conf.UseStringer
s := formatAny(vv, subConf, visited)
ss = append(ss, fmt.Sprintf("%s: %s", name, s))
}
s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
if conf.printType {
return v.Type().String() + s
}
return s
default:
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
}
}
func formatString(s string) string {
// Use quoted string if it the same length as a raw string literal.
// Otherwise, attempt to use the raw string form.
qs := strconv.Quote(s)
if len(qs) == 1+len(s)+1 {
return qs
}
// Disallow newlines to ensure output is a single line.
// Only allow printable runes for readability purposes.
rawInvalid := func(r rune) bool {
return r == '`' || r == '\n' || !unicode.IsPrint(r)
}
if strings.IndexFunc(s, rawInvalid) < 0 {
return "`" + s + "`"
}
return qs
}
func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string {
if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") {
return fmt.Sprintf("%v(%v)", t, v)
}
return fmt.Sprintf("%v", v)
}
func formatPointer(v reflect.Value, conf FormatConfig) string {
p := v.Pointer()
if !conf.realPointers {
p = 0 // For deterministic printing purposes
}
s := formatHex(uint64(p))
if conf.printType {
return fmt.Sprintf("(%v)(%s)", v.Type(), s)
}
return s
}
func formatHex(u uint64) string {
var f string
switch {
case u <= 0xff:
f = "0x%02x"
case u <= 0xffff:
f = "0x%04x"
case u <= 0xffffff:
f = "0x%06x"
case u <= 0xffffffff:
f = "0x%08x"
case u <= 0xffffffffff:
f = "0x%010x"
case u <= 0xffffffffffff:
f = "0x%012x"
case u <= 0xffffffffffffff:
f = "0x%014x"
case u <= 0xffffffffffffffff:
f = "0x%016x"
}
return fmt.Sprintf(f, u)
}
// insertPointer insert p into m, allocating m if necessary.
func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool {
if m == nil {
m = make(map[uintptr]bool)
}
m[p] = true
return m
}
// isZero reports whether v is the zero value.
// This does not rely on Interface and so can be used on unexported fields.
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return v.Bool() == false
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() == 0
case reflect.String:
return v.String() == ""
case reflect.UnsafePointer:
return v.Pointer() == 0
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
return v.IsNil()
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if !isZero(v.Index(i)) {
return false
}
}
return true
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

View File

@ -1,111 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package value
import (
"fmt"
"math"
"reflect"
"sort"
)
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
// The type of each value must be comparable.
func SortKeys(vs []reflect.Value) []reflect.Value {
if len(vs) == 0 {
return vs
}
// Sort the map keys.
sort.Sort(valueSorter(vs))
// Deduplicate keys (fails for NaNs).
vs2 := vs[:1]
for _, v := range vs[1:] {
if isLess(vs2[len(vs2)-1], v) {
vs2 = append(vs2, v)
}
}
return vs2
}
// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above.
type valueSorter []reflect.Value
func (vs valueSorter) Len() int { return len(vs) }
func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) }
func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
// isLess is a generic function for sorting arbitrary map keys.
// The inputs must be of the same type and must be comparable.
func isLess(x, y reflect.Value) bool {
switch x.Type().Kind() {
case reflect.Bool:
return !x.Bool() && y.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return x.Int() < y.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return x.Uint() < y.Uint()
case reflect.Float32, reflect.Float64:
fx, fy := x.Float(), y.Float()
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
case reflect.Complex64, reflect.Complex128:
cx, cy := x.Complex(), y.Complex()
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
}
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
return x.Pointer() < y.Pointer()
case reflect.String:
return x.String() < y.String()
case reflect.Array:
for i := 0; i < x.Len(); i++ {
if isLess(x.Index(i), y.Index(i)) {
return true
}
if isLess(y.Index(i), x.Index(i)) {
return false
}
}
return false
case reflect.Struct:
for i := 0; i < x.NumField(); i++ {
if isLess(x.Field(i), y.Field(i)) {
return true
}
if isLess(y.Field(i), x.Field(i)) {
return false
}
}
return false
case reflect.Interface:
vx, vy := x.Elem(), y.Elem()
if !vx.IsValid() || !vy.IsValid() {
return !vx.IsValid() && vy.IsValid()
}
tx, ty := vx.Type(), vy.Type()
if tx == ty {
return isLess(x.Elem(), y.Elem())
}
if tx.Kind() != ty.Kind() {
return vx.Kind() < vy.Kind()
}
if tx.String() != ty.String() {
return tx.String() < ty.String()
}
if tx.PkgPath() != ty.PkgPath() {
return tx.PkgPath() < ty.PkgPath()
}
// This can happen in rare situations, so we fallback to just comparing
// the unique pointer for a reflect.Type. This guarantees deterministic
// ordering within a program, but it is obviously not stable.
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
default:
// Must be Func, Map, or Slice; which are not comparable.
panic(fmt.Sprintf("%T is not comparable", x.Type()))
}
}

View File

@ -1,453 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"runtime"
"strings"
"github.com/google/go-cmp/cmp/internal/function"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
type Option interface {
// filter applies all filters and returns the option that remains.
// Each option may only read s.curPath and call s.callTTBFunc.
//
// An Options is returned only if multiple comparers or transformers
// can apply simultaneously and will only contain values of those types
// or sub-Options containing values of those types.
filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption
}
// applicableOption represents the following types:
// Fundamental: ignore | invalid | *comparer | *transformer
// Grouping: Options
type applicableOption interface {
Option
// apply executes the option, which may mutate s or panic.
apply(s *state, vx, vy reflect.Value)
}
// coreOption represents the following types:
// Fundamental: ignore | invalid | *comparer | *transformer
// Filters: *pathFilter | *valuesFilter
type coreOption interface {
Option
isCore()
}
type core struct{}
func (core) isCore() {}
// Options is a list of Option values that also satisfies the Option interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
// on all individual options held within.
type Options []Option
func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) {
for _, opt := range opts {
switch opt := opt.filter(s, vx, vy, t); opt.(type) {
case ignore:
return ignore{} // Only ignore can short-circuit evaluation
case invalid:
out = invalid{} // Takes precedence over comparer or transformer
case *comparer, *transformer, Options:
switch out.(type) {
case nil:
out = opt
case invalid:
// Keep invalid
case *comparer, *transformer, Options:
out = Options{out, opt} // Conflicting comparers or transformers
}
}
}
return out
}
func (opts Options) apply(s *state, _, _ reflect.Value) {
const warning = "ambiguous set of applicable options"
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
var ss []string
for _, opt := range flattenOptions(nil, opts) {
ss = append(ss, fmt.Sprint(opt))
}
set := strings.Join(ss, "\n\t")
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
}
func (opts Options) String() string {
var ss []string
for _, opt := range opts {
ss = append(ss, fmt.Sprint(opt))
}
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
}
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
}
if opt := normalizeOption(opt); opt != nil {
return &pathFilter{fnc: f, opt: opt}
}
return nil
}
type pathFilter struct {
core
fnc func(Path) bool
opt Option
}
func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
if f.fnc(s.curPath) {
return f.opt.filter(s, vx, vy, t)
}
return nil
}
func (f pathFilter) String() string {
fn := getFuncName(reflect.ValueOf(f.fnc).Pointer())
return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt)
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If the type of the values is not
// assignable to T, then this filter implicitly returns false.
//
// The filter function must be
// symmetric (i.e., agnostic to the order of the inputs) and
// deterministic (i.e., produces the same result when given the same inputs).
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
panic(fmt.Sprintf("invalid values filter function: %T", f))
}
if opt := normalizeOption(opt); opt != nil {
vf := &valuesFilter{fnc: v, opt: opt}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
vf.typ = ti
}
return vf
}
return nil
}
type valuesFilter struct {
core
typ reflect.Type // T
fnc reflect.Value // func(T, T) bool
opt Option
}
func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
if !vx.IsValid() || !vy.IsValid() {
return invalid{}
}
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
return f.opt.filter(s, vx, vy, t)
}
return nil
}
func (f valuesFilter) String() string {
fn := getFuncName(f.fnc.Pointer())
return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt)
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
func Ignore() Option { return ignore{} }
type ignore struct{ core }
func (ignore) isFiltered() bool { return false }
func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} }
func (ignore) apply(_ *state, _, _ reflect.Value) { return }
func (ignore) String() string { return "Ignore()" }
// invalid is a sentinel Option type to indicate that some options could not
// be evaluated due to unexported fields.
type invalid struct{ core }
func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} }
func (invalid) apply(s *state, _, _ reflect.Value) {
const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported"
panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
}
// Transformer returns an Option that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
// type T to those of type R and is implicitly filtered to input values
// assignable to T. The transformer must not mutate T in any way.
//
// To help prevent some cases of infinite recursive cycles applying the
// same transform to the output of itself (e.g., in the case where the
// input and output types are the same), an implicit filter is added such that
// a transformer is applicable only if that exact transformer is not already
// in the tail of the Path since the last non-Transform step.
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep. If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
panic(fmt.Sprintf("invalid transformer function: %T", f))
}
if name == "" {
name = "λ" // Lambda-symbol as place-holder for anonymous transformer
}
if !isValid(name) {
panic(fmt.Sprintf("invalid name: %q", name))
}
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
tr.typ = ti
}
return tr
}
type transformer struct {
core
name string
typ reflect.Type // T
fnc reflect.Value // func(T) R
}
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption {
for i := len(s.curPath) - 1; i >= 0; i-- {
if t, ok := s.curPath[i].(*transform); !ok {
break // Hit most recent non-Transform step
} else if tr == t.trans {
return nil // Cannot directly use same Transform
}
}
if tr.typ == nil || t.AssignableTo(tr.typ) {
return tr
}
return nil
}
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
// Update path before calling the Transformer so that dynamic checks
// will use the updated path.
s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr})
defer s.curPath.pop()
vx = s.callTRFunc(tr.fnc, vx)
vy = s.callTRFunc(tr.fnc, vy)
s.compareAny(vx, vy)
}
func (tr transformer) String() string {
return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer()))
}
// Comparer returns an Option that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
// filtered to input values assignable to T. If T is an interface, it is
// possible that f is called with two values of different concrete types that
// both implement T.
//
// The equality function must be:
// • Symmetric: equal(x, y) == equal(y, x)
// • Deterministic: equal(x, y) == equal(x, y)
// • Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
panic(fmt.Sprintf("invalid comparer function: %T", f))
}
cm := &comparer{fnc: v}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
cm.typ = ti
}
return cm
}
type comparer struct {
core
typ reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption {
if cm.typ == nil || t.AssignableTo(cm.typ) {
return cm
}
return nil
}
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
eq := s.callTTBFunc(cm.fnc, vx, vy)
s.report(eq, vx, vy)
}
func (cm comparer) String() string {
return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer()))
}
// AllowUnexported returns an Option that forcibly allows operations on
// unexported fields in certain structs, which are specified by passing in a
// value of each struct type.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// For some cases, a custom Comparer should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
func AllowUnexported(types ...interface{}) Option {
if !supportAllowUnexported {
panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS")
}
m := make(map[reflect.Type]bool)
for _, typ := range types {
t := reflect.TypeOf(typ)
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
m[t] = true
}
return visibleStructs(m)
}
type visibleStructs map[reflect.Type]bool
func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption {
panic("not implemented")
}
// reporter is an Option that configures how differences are reported.
type reporter interface {
// TODO: Not exported yet.
//
// Perhaps add PushStep and PopStep and change Report to only accept
// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
// it clear that we are traversing the value tree in a depth-first-search
// manner, which has an effect on how values are printed.
Option
// Report is called for every comparison made and will be provided with
// the two values being compared, the equality result, and the
// current path in the value tree. It is possible for x or y to be an
// invalid reflect.Value if one of the values is non-existent;
// which is possible with maps and slices.
Report(x, y reflect.Value, eq bool, p Path)
}
// normalizeOption normalizes the input options such that all Options groups
// are flattened and groups with a single element are reduced to that element.
// Only coreOptions and Options containing coreOptions are allowed.
func normalizeOption(src Option) Option {
switch opts := flattenOptions(nil, Options{src}); len(opts) {
case 0:
return nil
case 1:
return opts[0]
default:
return opts
}
}
// flattenOptions copies all options in src to dst as a flat list.
// Only coreOptions and Options containing coreOptions are allowed.
func flattenOptions(dst, src Options) Options {
for _, opt := range src {
switch opt := opt.(type) {
case nil:
continue
case Options:
dst = flattenOptions(dst, opt)
case coreOption:
dst = append(dst, opt)
default:
panic(fmt.Sprintf("invalid option type: %T", opt))
}
}
return dst
}
// getFuncName returns a short function name from the pointer.
// The string parsing logic works up until Go1.9.
func getFuncName(p uintptr) string {
fnc := runtime.FuncForPC(p)
if fnc == nil {
return "<unknown>"
}
name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
// Strip the package name from method name.
name = strings.TrimSuffix(name, ")-fm")
name = strings.TrimSuffix(name, ")·fm")
if i := strings.LastIndexByte(name, '('); i >= 0 {
methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
methodName = methodName[j+1:] // E.g., "myfunc"
}
name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
}
}
if i := strings.LastIndexByte(name, '/'); i >= 0 {
// Strip the package name.
name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
}
return name
}

View File

@ -1,309 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
type (
// Path is a list of PathSteps describing the sequence of operations to get
// from some root type to the current position in the value tree.
// The first Path element is always an operation-less PathStep that exists
// simply to identify the initial type.
//
// When traversing structs with embedded structs, the embedded struct will
// always be accessed as a field before traversing the fields of the
// embedded struct themselves. That is, an exported field from the
// embedded struct will never be accessed directly from the parent struct.
Path []PathStep
// PathStep is a union-type for specific operations to traverse
// a value's tree structure. Users of this package never need to implement
// these types as values of this type will be returned by this package.
PathStep interface {
String() string
Type() reflect.Type // Resulting type after performing the path step
isPathStep()
}
// SliceIndex is an index operation on a slice or array at some index Key.
SliceIndex interface {
PathStep
Key() int // May return -1 if in a split state
// SplitKeys returns the indexes for indexing into slices in the
// x and y values, respectively. These indexes may differ due to the
// insertion or removal of an element in one of the slices, causing
// all of the indexes to be shifted. If an index is -1, then that
// indicates that the element does not exist in the associated slice.
//
// Key is guaranteed to return -1 if and only if the indexes returned
// by SplitKeys are not the same. SplitKeys will never return -1 for
// both indexes.
SplitKeys() (x int, y int)
isSliceIndex()
}
// MapIndex is an index operation on a map at some index Key.
MapIndex interface {
PathStep
Key() reflect.Value
isMapIndex()
}
// TypeAssertion represents a type assertion on an interface.
TypeAssertion interface {
PathStep
isTypeAssertion()
}
// StructField represents a struct field access on a field called Name.
StructField interface {
PathStep
Name() string
Index() int
isStructField()
}
// Indirect represents pointer indirection on the parent type.
Indirect interface {
PathStep
isIndirect()
}
// Transform is a transformation from the parent type to the current type.
Transform interface {
PathStep
Name() string
Func() reflect.Value
// Option returns the originally constructed Transformer option.
// The == operator can be used to detect the exact option used.
Option() Option
isTransform()
}
)
func (pa *Path) push(s PathStep) {
*pa = append(*pa, s)
}
func (pa *Path) pop() {
*pa = (*pa)[:len(*pa)-1]
}
// Last returns the last PathStep in the Path.
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
func (pa Path) Last() PathStep {
return pa.Index(-1)
}
// Index returns the ith step in the Path and supports negative indexing.
// A negative index starts counting from the tail of the Path such that -1
// refers to the last step, -2 refers to the second-to-last step, and so on.
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
func (pa Path) Index(i int) PathStep {
if i < 0 {
i = len(pa) + i
}
if i < 0 || i >= len(pa) {
return pathStep{}
}
return pa[i]
}
// String returns the simplified path to a node.
// The simplified path only contains struct field accesses.
//
// For example:
// MyMap.MySlices.MyField
func (pa Path) String() string {
var ss []string
for _, s := range pa {
if _, ok := s.(*structField); ok {
ss = append(ss, s.String())
}
}
return strings.TrimPrefix(strings.Join(ss, ""), ".")
}
// GoString returns the path to a specific node using Go syntax.
//
// For example:
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
func (pa Path) GoString() string {
var ssPre, ssPost []string
var numIndirect int
for i, s := range pa {
var nextStep PathStep
if i+1 < len(pa) {
nextStep = pa[i+1]
}
switch s := s.(type) {
case *indirect:
numIndirect++
pPre, pPost := "(", ")"
switch nextStep.(type) {
case *indirect:
continue // Next step is indirection, so let them batch up
case *structField:
numIndirect-- // Automatic indirection on struct fields
case nil:
pPre, pPost = "", "" // Last step; no need for parenthesis
}
if numIndirect > 0 {
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
ssPost = append(ssPost, pPost)
}
numIndirect = 0
continue
case *transform:
ssPre = append(ssPre, s.trans.name+"(")
ssPost = append(ssPost, ")")
continue
case *typeAssertion:
// As a special-case, elide type assertions on anonymous types
// since they are typically generated dynamically and can be very
// verbose. For example, some transforms return interface{} because
// of Go's lack of generics, but typically take in and return the
// exact same concrete type.
if s.Type().PkgPath() == "" {
continue
}
}
ssPost = append(ssPost, s.String())
}
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
}
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
}
type (
pathStep struct {
typ reflect.Type
}
sliceIndex struct {
pathStep
xkey, ykey int
}
mapIndex struct {
pathStep
key reflect.Value
}
typeAssertion struct {
pathStep
}
structField struct {
pathStep
name string
idx int
// These fields are used for forcibly accessing an unexported field.
// pvx, pvy, and field are only valid if unexported is true.
unexported bool
force bool // Forcibly allow visibility
pvx, pvy reflect.Value // Parent values
field reflect.StructField // Field information
}
indirect struct {
pathStep
}
transform struct {
pathStep
trans *transformer
}
)
func (ps pathStep) Type() reflect.Type { return ps.typ }
func (ps pathStep) String() string {
if ps.typ == nil {
return "<nil>"
}
s := ps.typ.String()
if s == "" || strings.ContainsAny(s, "{}\n") {
return "root" // Type too simple or complex to print
}
return fmt.Sprintf("{%s}", s)
}
func (si sliceIndex) String() string {
switch {
case si.xkey == si.ykey:
return fmt.Sprintf("[%d]", si.xkey)
case si.ykey == -1:
// [5->?] means "I don't know where X[5] went"
return fmt.Sprintf("[%d->?]", si.xkey)
case si.xkey == -1:
// [?->3] means "I don't know where Y[3] came from"
return fmt.Sprintf("[?->%d]", si.ykey)
default:
// [5->3] means "X[5] moved to Y[3]"
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
}
}
func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) }
func (in indirect) String() string { return "*" }
func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
func (si sliceIndex) Key() int {
if si.xkey != si.ykey {
return -1
}
return si.xkey
}
func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey }
func (mi mapIndex) Key() reflect.Value { return mi.key }
func (sf structField) Name() string { return sf.name }
func (sf structField) Index() int { return sf.idx }
func (tf transform) Name() string { return tf.trans.name }
func (tf transform) Func() reflect.Value { return tf.trans.fnc }
func (tf transform) Option() Option { return tf.trans }
func (pathStep) isPathStep() {}
func (sliceIndex) isSliceIndex() {}
func (mapIndex) isMapIndex() {}
func (typeAssertion) isTypeAssertion() {}
func (structField) isStructField() {}
func (indirect) isIndirect() {}
func (transform) isTransform() {}
var (
_ SliceIndex = sliceIndex{}
_ MapIndex = mapIndex{}
_ TypeAssertion = typeAssertion{}
_ StructField = structField{}
_ Indirect = indirect{}
_ Transform = transform{}
_ PathStep = sliceIndex{}
_ PathStep = mapIndex{}
_ PathStep = typeAssertion{}
_ PathStep = structField{}
_ PathStep = indirect{}
_ PathStep = transform{}
)
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}
// isValid reports whether the identifier is valid.
// Empty and underscore-only strings are not valid.
func isValid(id string) bool {
ok := id != "" && id != "_"
for j, c := range id {
ok = ok && (j > 0 || !unicode.IsDigit(c))
ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c))
}
return ok
}

View File

@ -1,53 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp/internal/value"
)
type defaultReporter struct {
Option
diffs []string // List of differences, possibly truncated
ndiffs int // Total number of differences
nbytes int // Number of bytes in diffs
nlines int // Number of lines in diffs
}
var _ reporter = (*defaultReporter)(nil)
func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) {
if eq {
return // Ignore equal results
}
const maxBytes = 4096
const maxLines = 256
r.ndiffs++
if r.nbytes < maxBytes && r.nlines < maxLines {
sx := value.Format(x, value.FormatConfig{UseStringer: true})
sy := value.Format(y, value.FormatConfig{UseStringer: true})
if sx == sy {
// Unhelpful output, so use more exact formatting.
sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true})
sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true})
}
s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy)
r.diffs = append(r.diffs, s)
r.nbytes += len(s)
r.nlines += strings.Count(s, "\n")
}
}
func (r *defaultReporter) String() string {
s := strings.Join(r.diffs, "")
if r.ndiffs == len(r.diffs) {
return s
}
return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs))
}

View File

@ -1,15 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build purego appengine js
package cmp
import "reflect"
const supportAllowUnexported = false
func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value {
panic("unsafeRetrieveField is not implemented")
}

View File

@ -1,23 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !purego,!appengine,!js
package cmp
import (
"reflect"
"unsafe"
)
const supportAllowUnexported = true
// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct
// such that the value has read-write permissions.
//
// The parent struct, v, must be addressable, while f must be a StructField
// describing the field to retrieve.
func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value {
return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem()
}

View File

@ -1,7 +0,0 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip

View File

@ -1,19 +0,0 @@
Copyright (c) 2013 Kelsey Hightower
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,2 +0,0 @@
Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower
Travis Parker travis.parker@gmail.com github.com/teepark

View File

@ -1,188 +0,0 @@
# envconfig
[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig)
```Go
import "github.com/kelseyhightower/envconfig"
```
## Documentation
See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig)
## Usage
Set some environment variables:
```Bash
export MYAPP_DEBUG=false
export MYAPP_PORT=8080
export MYAPP_USER=Kelsey
export MYAPP_RATE="0.5"
export MYAPP_TIMEOUT="3m"
export MYAPP_USERS="rob,ken,robert"
export MYAPP_COLORCODES="red:1,green:2,blue:3"
```
Write some code:
```Go
package main
import (
"fmt"
"log"
"time"
"github.com/kelseyhightower/envconfig"
)
type Specification struct {
Debug bool
Port int
User string
Users []string
Rate float32
Timeout time.Duration
ColorCodes map[string]int
}
func main() {
var s Specification
err := envconfig.Process("myapp", &s)
if err != nil {
log.Fatal(err.Error())
}
format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n"
_, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate)
if err != nil {
log.Fatal(err.Error())
}
fmt.Println("Users:")
for _, u := range s.Users {
fmt.Printf(" %s\n", u)
}
fmt.Println("Color codes:")
for k, v := range s.ColorCodes {
fmt.Printf(" %s: %d\n", k, v)
}
}
```
Results:
```Bash
Debug: false
Port: 8080
User: Kelsey
Rate: 0.500000
Timeout: 3m0s
Users:
rob
ken
robert
Color codes:
red: 1
green: 2
blue: 3
```
## Struct Tag Support
Envconfig supports the use of struct tags to specify alternate, default, and required
environment variables.
For example, consider the following struct:
```Go
type Specification struct {
ManualOverride1 string `envconfig:"manual_override_1"`
DefaultVar string `default:"foobar"`
RequiredVar string `required:"true"`
IgnoredVar string `ignored:"true"`
AutoSplitVar string `split_words:"true"`
}
```
Envconfig has automatic support for CamelCased struct elements when the
`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above
would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the
setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers
will get globbed into the previous word. If the setting does not do the
right thing, you may use a manual override.
Envconfig will process value for `ManualOverride1` by populating it with the
value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have
instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag
it would have looked up `MYAPP_MANUAL_OVERRIDE1`.
```Bash
export MYAPP_MANUAL_OVERRIDE_1="this will be the value"
# export MYAPP_MANUALOVERRIDE1="and this will not"
```
If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
it will populate it with "foobar" as a default value.
If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`,
it will return an error when asked to process the struct.
If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there
is a struct tag defined, it will try to populate your variable with an environment
variable that directly matches the envconfig tag in your struct definition:
```shell
export SERVICE_HOST=127.0.0.1
export MYAPP_DEBUG=true
```
```Go
type Specification struct {
ServiceHost string `envconfig:"SERVICE_HOST"`
Debug bool
}
```
Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding
environment variable is set.
## Supported Struct Field Types
envconfig supports supports these struct field types:
* string
* int8, int16, int32, int64
* bool
* float32, float64
* slices of any supported type
* maps (keys and values of any supported type)
* [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler)
Embedded structs using these fields are also supported.
## Custom Decoders
Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can
control its own deserialization:
```Bash
export DNS_SERVER=8.8.8.8
```
```Go
type IPDecoder net.IP
func (ipd *IPDecoder) Decode(value string) error {
*ipd = IPDecoder(net.ParseIP(value))
return nil
}
type DNSConfig struct {
Address IPDecoder `envconfig:"DNS_SERVER"`
}
```
Also, envconfig will use a `Set(string) error` method like from the
[flag.Value](https://godoc.org/flag#Value) interface if implemented.

View File

@ -1,8 +0,0 @@
// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
// Package envconfig implements decoding of environment variables based on a user
// defined specification. A typical use is using environment variables for
// configuration settings.
package envconfig

View File

@ -1,7 +0,0 @@
// +build appengine
package envconfig
import "os"
var lookupEnv = os.LookupEnv

View File

@ -1,7 +0,0 @@
// +build !appengine
package envconfig
import "syscall"
var lookupEnv = syscall.Getenv

View File

@ -1,319 +0,0 @@
// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
package envconfig
import (
"encoding"
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
// ErrInvalidSpecification indicates that a specification is of the wrong type.
var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
// A ParseError occurs when an environment variable cannot be converted to
// the type required by a struct field during assignment.
type ParseError struct {
KeyName string
FieldName string
TypeName string
Value string
Err error
}
// Decoder has the same semantics as Setter, but takes higher precedence.
// It is provided for historical compatibility.
type Decoder interface {
Decode(value string) error
}
// Setter is implemented by types can self-deserialize values.
// Any type that implements flag.Value also implements Setter.
type Setter interface {
Set(value string) error
}
func (e *ParseError) Error() string {
return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
}
// varInfo maintains information about the configuration variable
type varInfo struct {
Name string
Alt string
Key string
Field reflect.Value
Tags reflect.StructTag
}
// GatherInfo gathers information about the specified struct
func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
s := reflect.ValueOf(spec)
if s.Kind() != reflect.Ptr {
return nil, ErrInvalidSpecification
}
s = s.Elem()
if s.Kind() != reflect.Struct {
return nil, ErrInvalidSpecification
}
typeOfSpec := s.Type()
// over allocate an info array, we will extend if needed later
infos := make([]varInfo, 0, s.NumField())
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
ftype := typeOfSpec.Field(i)
if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
continue
}
for f.Kind() == reflect.Ptr {
if f.IsNil() {
if f.Type().Elem().Kind() != reflect.Struct {
// nil pointer to a non-struct: leave it alone
break
}
// nil pointer to struct: create a zero instance
f.Set(reflect.New(f.Type().Elem()))
}
f = f.Elem()
}
// Capture information about the config variable
info := varInfo{
Name: ftype.Name,
Field: f,
Tags: ftype.Tag,
Alt: strings.ToUpper(ftype.Tag.Get("envconfig")),
}
// Default to the field name as the env var name (will be upcased)
info.Key = info.Name
// Best effort to un-pick camel casing as separate words
if ftype.Tag.Get("split_words") == "true" {
words := expr.FindAllStringSubmatch(ftype.Name, -1)
if len(words) > 0 {
var name []string
for _, words := range words {
name = append(name, words[0])
}
info.Key = strings.Join(name, "_")
}
}
if info.Alt != "" {
info.Key = info.Alt
}
if prefix != "" {
info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
}
info.Key = strings.ToUpper(info.Key)
infos = append(infos, info)
if f.Kind() == reflect.Struct {
// honor Decode if present
if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
innerPrefix := prefix
if !ftype.Anonymous {
innerPrefix = info.Key
}
embeddedPtr := f.Addr().Interface()
embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
if err != nil {
return nil, err
}
infos = append(infos[:len(infos)-1], embeddedInfos...)
continue
}
}
}
return infos, nil
}
// Process populates the specified struct based on environment variables
func Process(prefix string, spec interface{}) error {
infos, err := gatherInfo(prefix, spec)
for _, info := range infos {
// `os.Getenv` cannot differentiate between an explicitly set empty value
// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
// but it is only available in go1.5 or newer. We're using Go build tags
// here to use os.LookupEnv for >=go1.5
value, ok := lookupEnv(info.Key)
if !ok && info.Alt != "" {
value, ok = lookupEnv(info.Alt)
}
def := info.Tags.Get("default")
if def != "" && !ok {
value = def
}
req := info.Tags.Get("required")
if !ok && def == "" {
if req == "true" {
return fmt.Errorf("required key %s missing value", info.Key)
}
continue
}
err := processField(value, info.Field)
if err != nil {
return &ParseError{
KeyName: info.Key,
FieldName: info.Name,
TypeName: info.Field.Type().String(),
Value: value,
Err: err,
}
}
}
return err
}
// MustProcess is the same as Process but panics if an error occurs
func MustProcess(prefix string, spec interface{}) {
if err := Process(prefix, spec); err != nil {
panic(err)
}
}
func processField(value string, field reflect.Value) error {
typ := field.Type()
decoder := decoderFrom(field)
if decoder != nil {
return decoder.Decode(value)
}
// look for Set method if Decode not defined
setter := setterFrom(field)
if setter != nil {
return setter.Set(value)
}
if t := textUnmarshaler(field); t != nil {
return t.UnmarshalText([]byte(value))
}
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
if field.IsNil() {
field.Set(reflect.New(typ))
}
field = field.Elem()
}
switch typ.Kind() {
case reflect.String:
field.SetString(value)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var (
val int64
err error
)
if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
var d time.Duration
d, err = time.ParseDuration(value)
val = int64(d)
} else {
val, err = strconv.ParseInt(value, 0, typ.Bits())
}
if err != nil {
return err
}
field.SetInt(val)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val, err := strconv.ParseUint(value, 0, typ.Bits())
if err != nil {
return err
}
field.SetUint(val)
case reflect.Bool:
val, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(val)
case reflect.Float32, reflect.Float64:
val, err := strconv.ParseFloat(value, typ.Bits())
if err != nil {
return err
}
field.SetFloat(val)
case reflect.Slice:
vals := strings.Split(value, ",")
sl := reflect.MakeSlice(typ, len(vals), len(vals))
for i, val := range vals {
err := processField(val, sl.Index(i))
if err != nil {
return err
}
}
field.Set(sl)
case reflect.Map:
pairs := strings.Split(value, ",")
mp := reflect.MakeMap(typ)
for _, pair := range pairs {
kvpair := strings.Split(pair, ":")
if len(kvpair) != 2 {
return fmt.Errorf("invalid map item: %q", pair)
}
k := reflect.New(typ.Key()).Elem()
err := processField(kvpair[0], k)
if err != nil {
return err
}
v := reflect.New(typ.Elem()).Elem()
err = processField(kvpair[1], v)
if err != nil {
return err
}
mp.SetMapIndex(k, v)
}
field.Set(mp)
}
return nil
}
func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
// it may be impossible for a struct field to fail this check
if !field.CanInterface() {
return
}
var ok bool
fn(field.Interface(), &ok)
if !ok && field.CanAddr() {
fn(field.Addr().Interface(), &ok)
}
}
func decoderFrom(field reflect.Value) (d Decoder) {
interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
return d
}
func setterFrom(field reflect.Value) (s Setter) {
interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
return s
}
func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
return t
}

View File

@ -1,158 +0,0 @@
// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
package envconfig
import (
"encoding"
"fmt"
"io"
"os"
"reflect"
"strconv"
"strings"
"text/tabwriter"
"text/template"
)
const (
// DefaultListFormat constant to use to display usage in a list format
DefaultListFormat = `This application is configured via the environment. The following environment
variables can be used:
{{range .}}
{{usage_key .}}
[description] {{usage_description .}}
[type] {{usage_type .}}
[default] {{usage_default .}}
[required] {{usage_required .}}{{end}}
`
// DefaultTableFormat constant to use to display usage in a tabluar format
DefaultTableFormat = `This application is configured via the environment. The following environment
variables can be used:
KEY TYPE DEFAULT REQUIRED DESCRIPTION
{{range .}}{{usage_key .}} {{usage_type .}} {{usage_default .}} {{usage_required .}} {{usage_description .}}
{{end}}`
)
var (
decoderType = reflect.TypeOf((*Decoder)(nil)).Elem()
setterType = reflect.TypeOf((*Setter)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
func implementsInterface(t reflect.Type) bool {
return t.Implements(decoderType) ||
reflect.PtrTo(t).Implements(decoderType) ||
t.Implements(setterType) ||
reflect.PtrTo(t).Implements(setterType) ||
t.Implements(unmarshalerType) ||
reflect.PtrTo(t).Implements(unmarshalerType)
}
// toTypeDescription converts Go types into a human readable description
func toTypeDescription(t reflect.Type) string {
switch t.Kind() {
case reflect.Array, reflect.Slice:
return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
case reflect.Map:
return fmt.Sprintf(
"Comma-separated list of %s:%s pairs",
toTypeDescription(t.Key()),
toTypeDescription(t.Elem()),
)
case reflect.Ptr:
return toTypeDescription(t.Elem())
case reflect.Struct:
if implementsInterface(t) && t.Name() != "" {
return t.Name()
}
return ""
case reflect.String:
name := t.Name()
if name != "" && name != "string" {
return name
}
return "String"
case reflect.Bool:
name := t.Name()
if name != "" && name != "bool" {
return name
}
return "True or False"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
name := t.Name()
if name != "" && !strings.HasPrefix(name, "int") {
return name
}
return "Integer"
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
name := t.Name()
if name != "" && !strings.HasPrefix(name, "uint") {
return name
}
return "Unsigned Integer"
case reflect.Float32, reflect.Float64:
name := t.Name()
if name != "" && !strings.HasPrefix(name, "float") {
return name
}
return "Float"
}
return fmt.Sprintf("%+v", t)
}
// Usage writes usage information to stderr using the default header and table format
func Usage(prefix string, spec interface{}) error {
// The default is to output the usage information as a table
// Create tabwriter instance to support table output
tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
err := Usagef(prefix, spec, tabs, DefaultTableFormat)
tabs.Flush()
return err
}
// Usagef writes usage information to the specified io.Writer using the specifed template specification
func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
// Specify the default usage template functions
functions := template.FuncMap{
"usage_key": func(v varInfo) string { return v.Key },
"usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
"usage_type": func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
"usage_default": func(v varInfo) string { return v.Tags.Get("default") },
"usage_required": func(v varInfo) (string, error) {
req := v.Tags.Get("required")
if req != "" {
reqB, err := strconv.ParseBool(req)
if err != nil {
return "", err
}
if reqB {
req = "true"
}
}
return req, nil
},
}
tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
if err != nil {
return err
}
return Usaget(prefix, spec, out, tmpl)
}
// Usaget writes usage information to the specified io.Writer using the specified template
func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
// gather first
infos, err := gatherInfo(prefix, spec)
if err != nil {
return err
}
return tmpl.Execute(out, infos)
}

View File

@ -1,9 +0,0 @@
*.exe
*.dll
*.so
*.dylib
*.test
*.out
*.txt

View File

@ -1,15 +0,0 @@
language: go
go:
- 1.9.x
- 1.10.x
- tip
before_install:
- go get -t -v ./...
script:
- go test -race -coverprofile=coverage.txt -covermode=atomic
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,55 +0,0 @@
[![Build](https://img.shields.io/travis/leodido/go-urn/master.svg?style=for-the-badge)](https://travis-ci.org/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn)
**A parser for URNs**.
> As seen on [RFC 2141](https://tools.ietf.org/html/rfc2141#ref-1).
[API documentation](https://godoc.org/github.com/leodido/go-urn).
## Installation
```
go get github.com/leodido/go-urn
```
## Performances
This implementation results to be really fast.
Usually below ½ microsecond on my machine<sup>[1](#mymachine)</sup>.
Notice it also performs, while parsing:
1. fine-grained and informative erroring
2. specific-string normalization
```
ok/00/urn:a:b______________________________________/-4 20000000 265 ns/op 182 B/op 6 allocs/op
ok/01/URN:foo:a123,456_____________________________/-4 30000000 296 ns/op 200 B/op 6 allocs/op
ok/02/urn:foo:a123%2c456___________________________/-4 20000000 331 ns/op 208 B/op 6 allocs/op
ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-4 20000000 430 ns/op 280 B/op 6 allocs/op
ok/04/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 411 ns/op 312 B/op 6 allocs/op
ok/05/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 472 ns/op 344 B/op 6 allocs/op
ok/06/urn:burnout:nss______________________________/-4 30000000 257 ns/op 192 B/op 6 allocs/op
ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-4 20000000 375 ns/op 213 B/op 6 allocs/op
ok/08/urn:urnurnurn:urn____________________________/-4 30000000 265 ns/op 197 B/op 6 allocs/op
ok/09/urn:ciao:@!=%2c(xyz)+a,b.*@g=$_'_____________/-4 20000000 307 ns/op 248 B/op 6 allocs/op
ok/10/URN:x:abc%1dz%2f%3az_________________________/-4 30000000 259 ns/op 212 B/op 6 allocs/op
no/11/URN:-xxx:x___________________________________/-4 20000000 445 ns/op 320 B/op 6 allocs/op
no/12/urn::colon:nss_______________________________/-4 20000000 461 ns/op 320 B/op 6 allocs/op
no/13/urn:abcdefghilmnopqrstuvzabcdefghilmn:specifi/-4 10000000 660 ns/op 320 B/op 6 allocs/op
no/14/URN:a!?:x____________________________________/-4 20000000 507 ns/op 320 B/op 6 allocs/op
no/15/urn:urn:NSS__________________________________/-4 20000000 429 ns/op 288 B/op 6 allocs/op
no/16/urn:white_space:NSS__________________________/-4 20000000 482 ns/op 320 B/op 6 allocs/op
no/17/urn:concat:no_spaces_________________________/-4 20000000 539 ns/op 328 B/op 7 allocs/op
no/18/urn:a:/______________________________________/-4 20000000 470 ns/op 320 B/op 7 allocs/op
no/19/urn:UrN:NSS__________________________________/-4 20000000 399 ns/op 288 B/op 6 allocs/op
```
---
* <a name="mymachine">[1]</a>: Intel Core i7-7600U CPU @ 2.80GHz
---
[![Analytics](https://ga-beacon.appspot.com/UA-49657176-1/go-urn?flat)](https://github.com/igrigorik/ga-beacon)

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More