1
0
mirror of https://github.com/imgproxy/imgproxy.git synced 2024-11-24 08:12:38 +02:00

fasthttp; Optimized memory allocation

This commit is contained in:
DarthSim 2018-10-05 21:17:36 +06:00
parent 6e8933198f
commit 34a61d287f
95 changed files with 23591 additions and 179 deletions

41
Gopkg.lock generated
View File

@ -56,6 +56,26 @@
pruneopts = "UT"
revision = "0b12d6b5"
[[projects]]
digest = "1:cdbed6a7b574b13bbabcd8c36c5be56d6444789bab21fcd37d39655eab0b2141"
name = "github.com/klauspost/compress"
packages = [
"flate",
"gzip",
"zlib",
]
pruneopts = "UT"
revision = "b939724e787a27c0005cabe3f78e7ed7987ac74f"
version = "v1.4.0"
[[projects]]
digest = "1:4ea0668d490ca32a38366453a486e2e8c60fbdaf1f2607c96b4a093d8a5c8de7"
name = "github.com/klauspost/cpuid"
packages = ["."]
pruneopts = "UT"
revision = "ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da"
version = "v1.1"
[[projects]]
branch = "master"
digest = "1:97e35d2efabea52337e58f96e8b798fd461bf42d2519beb50f485029c6ad6aa5"
@ -64,6 +84,26 @@
pruneopts = "UT"
revision = "3de1538a83bcb1749aa54482113e5cd06e4fb938"
[[projects]]
digest = "1:c468422f334a6b46a19448ad59aaffdfc0a36b08fdcc1c749a0b29b6453d7e59"
name = "github.com/valyala/bytebufferpool"
packages = ["."]
pruneopts = "UT"
revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:7c1085da40cd79011bff52dde0c8e489d3947bd296bc8361aa71285b44b79737"
name = "github.com/valyala/fasthttp"
packages = [
".",
"fasthttputil",
"stackless",
]
pruneopts = "UT"
revision = "8dfc881b9ec613747a4b7bb8ebe7504b3a18a409"
[[projects]]
digest = "1:72b36febaabad58e1864de2b43de05689ce27a2c9a582a61a25e71a31ba23d0b"
name = "golang.org/x/image"
@ -91,6 +131,7 @@
"github.com/aws/aws-sdk-go/aws/session",
"github.com/aws/aws-sdk-go/service/s3",
"github.com/matoous/go-nanoid",
"github.com/valyala/fasthttp",
"golang.org/x/image/webp",
"golang.org/x/net/netutil",
]

View File

@ -33,6 +33,10 @@
branch = "master"
name = "github.com/matoous/go-nanoid"
[[constraint]]
branch = "master"
name = "github.com/valyala/fasthttp"
[prune]
go-tests = true
unused-packages = true

View File

@ -35,7 +35,7 @@ imgproxy only includes the must-have features for image processing, fine-tuning
imgproxy uses probably the most efficient image processing library there is, called `libvips`. It is screaming fast and has a very low memory footprint; with it, we can handle the processing for a massive amount of images on the fly.
imgproxy also uses native Go's `net/http` routing for the best HTTP networking support.
imgproxy also uses [fasthttp](https://github.com/valyala/fasthttp) for the best HTTP networking speed and less memory usage.
#### Security

View File

@ -3,6 +3,7 @@ package main
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
@ -10,6 +11,7 @@ import (
"io"
"io/ioutil"
"net/http"
"sync"
"time"
_ "image/gif"
@ -19,17 +21,27 @@ import (
_ "golang.org/x/image/webp"
)
var downloadClient *http.Client
var (
downloadClient *http.Client
imageTypeCtxKey = ctxKey("imageType")
imageDataCtxKey = ctxKey("imageData")
)
var downloadBufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
type netReader struct {
reader *bufio.Reader
buf *bytes.Buffer
}
func newNetReader(r io.Reader) *netReader {
func newNetReader(r io.Reader, buf *bytes.Buffer) *netReader {
return &netReader{
reader: bufio.NewReader(r),
buf: bytes.NewBuffer([]byte{}),
buf: buf,
}
}
@ -45,15 +57,11 @@ func (r *netReader) Peek(n int) ([]byte, error) {
return r.reader.Peek(n)
}
func (r *netReader) ReadAll() ([]byte, error) {
func (r *netReader) ReadAll() error {
if _, err := r.buf.ReadFrom(r.reader); err != nil {
return []byte{}, err
return err
}
return r.buf.Bytes(), nil
}
func (r *netReader) GrowBuf(s int) {
r.buf.Grow(s)
return nil
}
func initDownloading() {
@ -99,36 +107,49 @@ func checkTypeAndDimensions(r io.Reader) (imageType, error) {
return imgtype, nil
}
func readAndCheckImage(res *http.Response) ([]byte, imageType, error) {
nr := newNetReader(res.Body)
func readAndCheckImage(ctx context.Context, res *http.Response) (context.Context, context.CancelFunc, error) {
buf := downloadBufPool.Get().(*bytes.Buffer)
cancel := func() {
buf.Reset()
downloadBufPool.Put(buf)
}
nr := newNetReader(res.Body, buf)
imgtype, err := checkTypeAndDimensions(nr)
if err != nil {
return nil, imageTypeUnknown, err
return ctx, cancel, err
}
if res.ContentLength > 0 {
nr.GrowBuf(int(res.ContentLength))
if err = nr.ReadAll(); err == nil {
ctx = context.WithValue(ctx, imageTypeCtxKey, imgtype)
ctx = context.WithValue(ctx, imageDataCtxKey, nr.buf)
}
b, err := nr.ReadAll()
return b, imgtype, err
return ctx, cancel, err
}
func downloadImage(url string) ([]byte, imageType, error) {
fullURL := fmt.Sprintf("%s%s", conf.BaseURL, url)
func downloadImage(ctx context.Context) (context.Context, context.CancelFunc, error) {
url := fmt.Sprintf("%s%s", conf.BaseURL, getImageURL(ctx))
res, err := downloadClient.Get(fullURL)
res, err := downloadClient.Get(url)
if err != nil {
return nil, imageTypeUnknown, err
return ctx, func() {}, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
body, _ := ioutil.ReadAll(res.Body)
return nil, imageTypeUnknown, fmt.Errorf("Can't download image; Status: %d; %s", res.StatusCode, string(body))
return ctx, func() {}, fmt.Errorf("Can't download image; Status: %d; %s", res.StatusCode, string(body))
}
return readAndCheckImage(res)
return readAndCheckImage(ctx, res)
}
func getImageType(ctx context.Context) imageType {
return ctx.Value(imageTypeCtxKey).(imageType)
}
func getImageData(ctx context.Context) *bytes.Buffer {
return ctx.Value(imageDataCtxKey).(*bytes.Buffer)
}

24
gzip.go Normal file
View File

@ -0,0 +1,24 @@
package main
import (
"compress/gzip"
"io"
"os"
"sync"
)
var nullwriter, _ = os.Open("/dev/null")
var gzipPool = sync.Pool{
New: func() interface{} {
gz, _ := gzip.NewWriterLevel(nullwriter, conf.GZipCompression)
return gz
},
}
func gzipData(data []byte, w io.Writer) {
gz := gzipPool.Get().(*gzip.Writer)
gz.Reset(w)
gz.Write(data)
gz.Close()
}

13
main.go
View File

@ -1,20 +1,21 @@
package main
import (
"log"
"os"
"os/signal"
"runtime/debug"
"time"
"net/http"
_ "net/http/pprof"
)
const version = "2.0.0"
type ctxKey string
func main() {
// Force garbage collection
go func() {
for range time.Tick(10 * time.Second) {
debug.FreeOSMemory()
}
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
s := startServer()

15
mutex.go Normal file
View File

@ -0,0 +1,15 @@
package main
type mutex chan struct{}
func newMutex(size int) mutex {
return make(mutex, size)
}
func (m mutex) Lock() {
m <- struct{}{}
}
func (m mutex) Unock() {
<-m
}

View File

@ -8,6 +8,7 @@ package main
import "C"
import (
"context"
"errors"
"log"
"math"
@ -121,17 +122,17 @@ func extractMeta(img *C.VipsImage) (int, int, int, bool) {
return width, height, angle, flip
}
func needToScale(width, height int, po processingOptions) bool {
func needToScale(width, height int, po *processingOptions) bool {
return ((po.Width != 0 && po.Width != width) || (po.Height != 0 && po.Height != height)) &&
(po.Resize == resizeFill || po.Resize == resizeFit)
}
func needToCrop(width, height int, po processingOptions) bool {
func needToCrop(width, height int, po *processingOptions) bool {
return (po.Width != width || po.Height != height) &&
(po.Resize == resizeFill || po.Resize == resizeCrop)
}
func calcScale(width, height int, po processingOptions) float64 {
func calcScale(width, height int, po *processingOptions) float64 {
fsw, fsh, fow, foh := float64(width), float64(height), float64(po.Width), float64(po.Height)
wr := fow / fsw
@ -172,7 +173,7 @@ func calcShink(scale float64, imgtype imageType) int {
return 1
}
func calcCrop(width, height int, po processingOptions) (left, top int) {
func calcCrop(width, height int, po *processingOptions) (left, top int) {
left = (width - po.Width + 1) / 2
top = (height - po.Height + 1) / 2
@ -203,9 +204,12 @@ func calcCrop(width, height int, po processingOptions) (left, top int) {
return
}
func processImage(data []byte, imgtype imageType, po processingOptions, t *timer) ([]byte, error) {
func processImage(ctx context.Context) ([]byte, error) {
defer C.vips_cleanup()
defer runtime.KeepAlive(data)
data := getImageData(ctx).Bytes()
po := getprocessingOptions(ctx)
imgtype := getImageType(ctx)
if po.Gravity.Type == gravitySmart && !vipsSupportSmartcrop {
return nil, errors.New("Smart crop is not supported by used version of libvips")
@ -217,7 +221,7 @@ func processImage(data []byte, imgtype imageType, po processingOptions, t *timer
}
defer C.clear_image(&img)
t.Check()
checkTimeout(ctx)
imgWidth, imgHeight, angle, flip := extractMeta(img)
@ -282,7 +286,7 @@ func processImage(data []byte, imgtype imageType, po processingOptions, t *timer
return nil, err
}
t.Check()
checkTimeout(ctx)
if angle != C.VIPS_ANGLE_D0 || flip {
if err = vipsImageCopyMemory(&img); err != nil {
@ -302,7 +306,7 @@ func processImage(data []byte, imgtype imageType, po processingOptions, t *timer
}
}
t.Check()
checkTimeout(ctx)
if po.Width == 0 {
po.Width = imgWidth
@ -327,7 +331,7 @@ func processImage(data []byte, imgtype imageType, po processingOptions, t *timer
}
}
t.Check()
checkTimeout(ctx)
}
if hasAlpha && po.Flatten {
@ -348,7 +352,7 @@ func processImage(data []byte, imgtype imageType, po processingOptions, t *timer
}
}
t.Check()
checkTimeout(ctx)
return vipsSaveImage(img, po.Format)
}

View File

@ -7,13 +7,16 @@ package main
import "C"
import (
"context"
"encoding/base64"
"errors"
"fmt"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/valyala/fasthttp"
)
type urlOptions map[string][]string
@ -100,6 +103,18 @@ type processingOptions struct {
UsedPresets []string
}
const (
imageURLCtxKey = ctxKey("imageUrl")
processingOptionsCtxKey = ctxKey("processingOptions")
)
var (
errInvalidURLEncoding = errors.New("Invalid url encoding")
errInvalidPath = errors.New("Invalid path")
errInvalidImageURL = errors.New("Invalid image url")
errResultingImageFormatIsNotSupported = errors.New("Resulting image format is not supported")
)
func (it imageType) String() string {
for k, v := range imageTypes {
if v == it {
@ -165,7 +180,7 @@ func decodeURL(parts []string) (string, string, error) {
urlParts := strings.Split(strings.Join(parts, ""), ".")
if len(urlParts) > 2 {
return "", "", errors.New("Invalid url encoding")
return "", "", errInvalidURLEncoding
}
if len(urlParts) == 2 {
@ -174,7 +189,7 @@ func decodeURL(parts []string) (string, string, error) {
url, err := base64.RawURLEncoding.DecodeString(urlParts[0])
if err != nil {
return "", "", errors.New("Invalid url encoding")
return "", "", errInvalidURLEncoding
}
return string(url), extension, nil
@ -413,7 +428,7 @@ func applyFormatOption(po *processingOptions, args []string) error {
}
if !vipsTypeSupportSave[po.Format] {
return errors.New("Resulting image type not supported")
return errResultingImageFormatIsNotSupported
}
return nil
@ -524,7 +539,7 @@ func defaultProcessingOptions(acceptHeader string) (processingOptions, error) {
Format: imageTypeJPEG,
Blur: 0,
Sharpen: 0,
UsedPresets: make([]string, 0),
UsedPresets: make([]string, 0, len(conf.Presets)),
}
if (conf.EnableWebpDetection || conf.EnforceWebp) && strings.Contains(acceptHeader, "image/webp") {
@ -557,7 +572,7 @@ func parsePathAdvanced(parts []string, acceptHeader string) (string, processingO
if len(extension) > 0 {
if err := applyFormatOption(&po, []string{extension}); err != nil {
return "", po, errors.New("Resulting image type not supported")
return "", po, err
}
}
@ -568,7 +583,7 @@ func parsePathSimple(parts []string, acceptHeader string) (string, processingOpt
var err error
if len(parts) < 6 {
return "", processingOptions{}, errors.New("Invalid path")
return "", processingOptions{}, errInvalidPath
}
po, err := defaultProcessingOptions(acceptHeader)
@ -601,35 +616,56 @@ func parsePathSimple(parts []string, acceptHeader string) (string, processingOpt
if len(extension) > 0 {
if err := applyFormatOption(&po, []string{extension}); err != nil {
return "", po, errors.New("Resulting image type not supported")
return "", po, err
}
}
return string(url), po, nil
}
func parsePath(r *http.Request) (string, processingOptions, error) {
path := r.URL.Path
func parsePath(ctx context.Context, rctx *fasthttp.RequestCtx) (context.Context, error) {
path := string(rctx.RequestURI())
parts := strings.Split(strings.TrimPrefix(path, "/"), "/")
var acceptHeader string
if h, ok := r.Header["Accept"]; ok {
acceptHeader = h[0]
if h := rctx.Request.Header.Peek("Accept"); len(h) > 0 {
acceptHeader = string(h)
}
if len(parts) < 3 {
return "", processingOptions{}, errors.New("Invalid path")
return ctx, errInvalidPath
}
if !conf.AllowInsecure {
if err := validatePath(parts[0], strings.TrimPrefix(path, fmt.Sprintf("/%s", parts[0]))); err != nil {
return "", processingOptions{}, err
return ctx, err
}
}
var imageURL string
var po processingOptions
var err error
if _, ok := resizeTypes[parts[1]]; ok {
return parsePathSimple(parts[1:], acceptHeader)
imageURL, po, err = parsePathSimple(parts[1:], acceptHeader)
} else {
imageURL, po, err = parsePathAdvanced(parts[1:], acceptHeader)
}
return parsePathAdvanced(parts[1:], acceptHeader)
if _, err = url.ParseRequestURI(imageURL); err != nil {
return ctx, errInvalidImageURL
}
ctx = context.WithValue(ctx, imageURLCtxKey, imageURL)
ctx = context.WithValue(ctx, processingOptionsCtxKey, &po)
return ctx, err
}
func getImageURL(ctx context.Context) string {
return ctx.Value(imageURLCtxKey).(string)
}
func getprocessingOptions(ctx context.Context) *processingOptions {
return ctx.Value(processingOptionsCtxKey).(*processingOptions)
}

208
server.go
View File

@ -2,63 +2,61 @@ package main
import (
"bytes"
"compress/gzip"
"context"
"crypto/subtle"
"fmt"
"log"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
nanoid "github.com/matoous/go-nanoid"
"golang.org/x/net/netutil"
"github.com/valyala/fasthttp"
)
var mimes = map[imageType]string{
imageTypeJPEG: "image/jpeg",
imageTypePNG: "image/png",
imageTypeWEBP: "image/webp",
}
type httpHandler struct {
sem chan struct{}
}
func newHTTPHandler() *httpHandler {
return &httpHandler{make(chan struct{}, conf.Concurrency)}
}
func startServer() *http.Server {
l, err := net.Listen("tcp", conf.Bind)
if err != nil {
log.Fatal(err)
var (
mimes = map[imageType]string{
imageTypeJPEG: "image/jpeg",
imageTypePNG: "image/png",
imageTypeWEBP: "image/webp",
}
s := &http.Server{
Handler: newHTTPHandler(),
ReadTimeout: time.Duration(conf.ReadTimeout) * time.Second,
MaxHeaderBytes: 1 << 20,
responseBufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
authHeaderMust []byte
healthRequestURI = []byte("/health")
serverMutex mutex
)
func startServer() *fasthttp.Server {
serverMutex = newMutex(conf.Concurrency)
s := &fasthttp.Server{
Name: "imgproxy",
Handler: serveHTTP,
Concurrency: conf.MaxClients,
ReadTimeout: time.Duration(conf.ReadTimeout) * time.Second,
}
go func() {
log.Printf("Starting server at %s\n", conf.Bind)
log.Fatal(s.Serve(netutil.LimitListener(l, conf.MaxClients)))
if err := s.ListenAndServe(conf.Bind); err != nil {
log.Fatalln(err)
}
}()
return s
}
func shutdownServer(s *http.Server) {
func shutdownServer(s *fasthttp.Server) {
log.Println("Shutting down the server...")
ctx, close := context.WithTimeout(context.Background(), 5*time.Second)
defer close()
s.Shutdown(ctx)
s.Shutdown()
}
func logResponse(status int, msg string) {
@ -75,143 +73,137 @@ func logResponse(status int, msg string) {
log.Printf("|\033[7;%dm %d \033[0m| %s\n", color, status, msg)
}
func writeCORS(rw http.ResponseWriter) {
func writeCORS(rctx *fasthttp.RequestCtx) {
if len(conf.AllowOrigin) > 0 {
rw.Header().Set("Access-Control-Allow-Origin", conf.AllowOrigin)
rw.Header().Set("Access-Control-Allow-Methods", "GET, OPTIONs")
rctx.Request.Header.Set("Access-Control-Allow-Origin", conf.AllowOrigin)
rctx.Request.Header.Set("Access-Control-Allow-Methods", "GET, OPTIONs")
}
}
func respondWithImage(reqID string, r *http.Request, rw http.ResponseWriter, data []byte, imgURL string, po processingOptions, duration time.Duration) {
gzipped := strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") && conf.GZipCompression > 0
func respondWithImage(ctx context.Context, reqID string, rctx *fasthttp.RequestCtx, data []byte) {
rctx.SetStatusCode(200)
rw.Header().Set("Expires", time.Now().Add(time.Second*time.Duration(conf.TTL)).Format(http.TimeFormat))
rw.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d, public", conf.TTL))
rw.Header().Set("Content-Type", mimes[po.Format])
po := getprocessingOptions(ctx)
dataToRespond := data
rctx.SetContentType(mimes[po.Format])
rctx.Response.Header.Set("Cache-Control", fmt.Sprintf("max-age=%d, public", conf.TTL))
rctx.Response.Header.Set("Expires", time.Now().Add(time.Second*time.Duration(conf.TTL)).Format(http.TimeFormat))
if gzipped {
var buf bytes.Buffer
gz, _ := gzip.NewWriterLevel(&buf, conf.GZipCompression)
gz.Write(data)
gz.Close()
dataToRespond = buf.Bytes()
rw.Header().Set("Content-Encoding", "gzip")
if conf.GZipCompression > 0 && rctx.Request.Header.HasAcceptEncodingBytes([]byte("gzip")) {
rctx.Response.Header.Set("Content-Encoding", "gzip")
gzipData(data, rctx)
} else {
rctx.SetBody(data)
}
rw.Header().Set("Content-Length", strconv.Itoa(len(dataToRespond)))
rw.WriteHeader(200)
rw.Write(dataToRespond)
logResponse(200, fmt.Sprintf("[%s] Processed in %s: %s; %+v", reqID, duration, imgURL, po))
logResponse(200, fmt.Sprintf("[%s] Processed in %s: %s; %+v", reqID, getTimerSince(ctx), getImageURL(ctx), po))
}
func respondWithError(reqID string, rw http.ResponseWriter, err imgproxyError) {
func respondWithError(reqID string, rctx *fasthttp.RequestCtx, err imgproxyError) {
logResponse(err.StatusCode, fmt.Sprintf("[%s] %s", reqID, err.Message))
rw.WriteHeader(err.StatusCode)
rw.Write([]byte(err.PublicMessage))
rctx.SetStatusCode(err.StatusCode)
rctx.SetBodyString(err.PublicMessage)
}
func respondWithOptions(reqID string, rw http.ResponseWriter) {
func respondWithOptions(reqID string, rctx *fasthttp.RequestCtx) {
logResponse(200, fmt.Sprintf("[%s] Respond with options", reqID))
rw.WriteHeader(200)
rctx.SetStatusCode(200)
}
func checkSecret(s string) bool {
func prepareAuthHeaderMust() []byte {
if len(authHeaderMust) == 0 {
buf := bytes.NewBufferString("Bearer ")
buf.WriteString(conf.Secret)
authHeaderMust = []byte(conf.Secret)
}
return authHeaderMust
}
func checkSecret(rctx *fasthttp.RequestCtx) bool {
if len(conf.Secret) == 0 {
return true
}
return strings.HasPrefix(s, "Bearer ") && subtle.ConstantTimeCompare([]byte(strings.TrimPrefix(s, "Bearer ")), []byte(conf.Secret)) == 1
return subtle.ConstantTimeCompare(
rctx.Request.Header.Peek("Authorization"),
prepareAuthHeaderMust(),
) == 1
}
func (h *httpHandler) lock() {
h.sem <- struct{}{}
}
func (h *httpHandler) unlock() {
<-h.sem
}
func (h *httpHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
func serveHTTP(rctx *fasthttp.RequestCtx) {
reqID, _ := nanoid.Nanoid()
defer func() {
if r := recover(); r != nil {
if err, ok := r.(imgproxyError); ok {
respondWithError(reqID, rw, err)
respondWithError(reqID, rctx, err)
} else {
respondWithError(reqID, rw, newUnexpectedError(r.(error), 4))
respondWithError(reqID, rctx, newUnexpectedError(r.(error), 4))
}
}
}()
log.Printf("[%s] %s: %s\n", reqID, r.Method, r.URL.RequestURI())
log.Printf("[%s] %s: %s\n", reqID, rctx.Method(), rctx.RequestURI())
writeCORS(rw)
writeCORS(rctx)
if r.Method == http.MethodOptions {
respondWithOptions(reqID, rw)
if rctx.Request.Header.IsOptions() {
respondWithOptions(reqID, rctx)
return
}
if r.Method != http.MethodGet {
if !rctx.IsGet() {
panic(invalidMethodErr)
}
if !checkSecret(r.Header.Get("Authorization")) {
if !checkSecret(rctx) {
panic(invalidSecretErr)
}
h.lock()
defer h.unlock()
serverMutex.Lock()
defer serverMutex.Unock()
if r.URL.Path == "/health" {
rw.WriteHeader(200)
rw.Write([]byte("imgproxy is running"))
if bytes.Equal(rctx.RequestURI(), healthRequestURI) {
rctx.SetStatusCode(200)
rctx.SetBodyString("imgproxy is running")
return
}
t := startTimer(time.Duration(conf.WriteTimeout)*time.Second, "Processing")
ctx, timeoutCancel := startTimer(time.Duration(conf.WriteTimeout) * time.Second)
defer timeoutCancel()
imgURL, procOpt, err := parsePath(r)
ctx, err := parsePath(ctx, rctx)
if err != nil {
panic(newError(404, err.Error(), "Invalid image url"))
}
if _, err = url.ParseRequestURI(imgURL); err != nil {
panic(newError(404, err.Error(), "Invalid image url"))
}
b, imgtype, err := downloadImage(imgURL)
ctx, downloadcancel, err := downloadImage(ctx)
defer downloadcancel()
if err != nil {
panic(newError(404, err.Error(), "Image is unreachable"))
}
t.Check()
checkTimeout(ctx)
if conf.ETagEnabled {
eTag := calcETag(b, &procOpt)
rw.Header().Set("ETag", eTag)
// if conf.ETagEnabled {
// eTag := calcETag(b, &procOpt)
// rw.Header().Set("ETag", eTag)
if eTag == r.Header.Get("If-None-Match") {
panic(notModifiedErr)
}
}
// if eTag == r.Header.Get("If-None-Match") {
// panic(notModifiedErr)
// }
// }
t.Check()
checkTimeout(ctx)
b, err = processImage(b, imgtype, procOpt, t)
imageData, err := processImage(ctx)
if err != nil {
panic(newError(500, err.Error(), "Error occurred while processing image"))
}
t.Check()
checkTimeout(ctx)
respondWithImage(reqID, r, rw, b, imgURL, procOpt, t.Since())
respondWithImage(ctx, reqID, rctx, imageData)
}

View File

@ -1,32 +1,34 @@
package main
import (
"context"
"fmt"
"time"
)
var timerSinceCtxKey = ctxKey("timerSince")
type timer struct {
StartTime time.Time
Timer <-chan time.Time
}
func startTimer(dt time.Duration, info string) *timer {
return &timer{time.Now(), time.After(dt)}
func startTimer(d time.Duration) (context.Context, context.CancelFunc) {
return context.WithTimeout(
context.WithValue(context.Background(), timerSinceCtxKey, time.Now()),
d,
)
}
func (t *timer) Check() {
func getTimerSince(ctx context.Context) time.Duration {
return time.Since(ctx.Value(timerSinceCtxKey).(time.Time))
}
func checkTimeout(ctx context.Context) {
select {
case <-t.Timer:
panic(t.TimeoutErr())
case <-ctx.Done():
panic(newError(503, fmt.Sprintf("Timeout after %v", getTimerSince(ctx)), "Timeout"))
default:
// Go ahead
}
}
func (t *timer) TimeoutErr() imgproxyError {
return newError(503, fmt.Sprintf("Timeout after %v", t.Since()), "Timeout")
}
func (t *timer) Since() time.Duration {
return time.Since(t.StartTime)
}

27
vendor/github.com/klauspost/compress/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

32
vendor/github.com/klauspost/compress/flate/copy.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// forwardCopy is like the built-in copy function except that it always goes
// forward from the start, even if the dst and src overlap.
// It is equivalent to:
// for i := 0; i < n; i++ {
// mem[dst+i] = mem[src+i]
// }
func forwardCopy(mem []byte, dst, src, n int) {
if dst <= src {
copy(mem[dst:dst+n], mem[src:src+n])
return
}
for {
if dst >= src+n {
copy(mem[dst:dst+n], mem[src:src+n])
return
}
// There is some forward overlap. The destination
// will be filled with a repeated pattern of mem[src:src+k].
// We copy one instance of the pattern here, then repeat.
// Each time around this loop k will double.
k := dst - src
copy(mem[dst:dst+k], mem[src:src+k])
n -= k
dst += k
}
}

View File

@ -0,0 +1,41 @@
//+build !noasm
//+build !appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
package flate
import (
"github.com/klauspost/cpuid"
)
// crc32sse returns a hash for the first 4 bytes of the slice
// len(a) must be >= 4.
//go:noescape
func crc32sse(a []byte) uint32
// crc32sseAll calculates hashes for each 4-byte set in a.
// dst must be east len(a) - 4 in size.
// The size is not checked by the assembly.
//go:noescape
func crc32sseAll(a []byte, dst []uint32)
// matchLenSSE4 returns the number of matching bytes in a and b
// up to length 'max'. Both slices must be at least 'max'
// bytes in size.
//
// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions.
//
//go:noescape
func matchLenSSE4(a, b []byte, max int) int
// histogram accumulates a histogram of b in h.
// h must be at least 256 entries in length,
// and must be cleared before calling this function.
//go:noescape
func histogram(b []byte, h []int32)
// Detect SSE 4.2 feature.
func init() {
useSSE42 = cpuid.CPU.SSE42()
}

View File

@ -0,0 +1,213 @@
//+build !noasm
//+build !appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
// func crc32sse(a []byte) uint32
TEXT ·crc32sse(SB), 4, $0
MOVQ a+0(FP), R10
XORQ BX, BX
// CRC32 dword (R10), EBX
BYTE $0xF2; BYTE $0x41; BYTE $0x0f
BYTE $0x38; BYTE $0xf1; BYTE $0x1a
MOVL BX, ret+24(FP)
RET
// func crc32sseAll(a []byte, dst []uint32)
TEXT ·crc32sseAll(SB), 4, $0
MOVQ a+0(FP), R8 // R8: src
MOVQ a_len+8(FP), R10 // input length
MOVQ dst+24(FP), R9 // R9: dst
SUBQ $4, R10
JS end
JZ one_crc
MOVQ R10, R13
SHRQ $2, R10 // len/4
ANDQ $3, R13 // len&3
XORQ BX, BX
ADDQ $1, R13
TESTQ R10, R10
JZ rem_loop
crc_loop:
MOVQ (R8), R11
XORQ BX, BX
XORQ DX, DX
XORQ DI, DI
MOVQ R11, R12
SHRQ $8, R11
MOVQ R12, AX
MOVQ R11, CX
SHRQ $16, R12
SHRQ $16, R11
MOVQ R12, SI
// CRC32 EAX, EBX
BYTE $0xF2; BYTE $0x0f
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
// CRC32 ECX, EDX
BYTE $0xF2; BYTE $0x0f
BYTE $0x38; BYTE $0xf1; BYTE $0xd1
// CRC32 ESI, EDI
BYTE $0xF2; BYTE $0x0f
BYTE $0x38; BYTE $0xf1; BYTE $0xfe
MOVL BX, (R9)
MOVL DX, 4(R9)
MOVL DI, 8(R9)
XORQ BX, BX
MOVL R11, AX
// CRC32 EAX, EBX
BYTE $0xF2; BYTE $0x0f
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
MOVL BX, 12(R9)
ADDQ $16, R9
ADDQ $4, R8
XORQ BX, BX
SUBQ $1, R10
JNZ crc_loop
rem_loop:
MOVL (R8), AX
// CRC32 EAX, EBX
BYTE $0xF2; BYTE $0x0f
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
MOVL BX, (R9)
ADDQ $4, R9
ADDQ $1, R8
XORQ BX, BX
SUBQ $1, R13
JNZ rem_loop
end:
RET
one_crc:
MOVQ $1, R13
XORQ BX, BX
JMP rem_loop
// func matchLenSSE4(a, b []byte, max int) int
TEXT ·matchLenSSE4(SB), 4, $0
MOVQ a_base+0(FP), SI
MOVQ b_base+24(FP), DI
MOVQ DI, DX
MOVQ max+48(FP), CX
cmp8:
// As long as we are 8 or more bytes before the end of max, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ CX, $8
JLT cmp1
MOVQ (SI), AX
MOVQ (DI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, SI
ADDQ $8, DI
SUBQ $8, CX
JMP cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, DI
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
SUBQ DX, DI
MOVQ DI, ret+56(FP)
RET
cmp1:
// In the slices' tail, compare 1 byte at a time.
CMPQ CX, $0
JEQ matchLenEnd
MOVB (SI), AX
MOVB (DI), BX
CMPB AX, BX
JNE matchLenEnd
ADDQ $1, SI
ADDQ $1, DI
SUBQ $1, CX
JMP cmp1
matchLenEnd:
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
SUBQ DX, DI
MOVQ DI, ret+56(FP)
RET
// func histogram(b []byte, h []int32)
TEXT ·histogram(SB), 4, $0
MOVQ b+0(FP), SI // SI: &b
MOVQ b_len+8(FP), R9 // R9: len(b)
MOVQ h+24(FP), DI // DI: Histogram
MOVQ R9, R8
SHRQ $3, R8
JZ hist1
XORQ R11, R11
loop_hist8:
MOVQ (SI), R10
MOVB R10, R11
INCL (DI)(R11*4)
SHRQ $8, R10
MOVB R10, R11
INCL (DI)(R11*4)
SHRQ $8, R10
MOVB R10, R11
INCL (DI)(R11*4)
SHRQ $8, R10
MOVB R10, R11
INCL (DI)(R11*4)
SHRQ $8, R10
MOVB R10, R11
INCL (DI)(R11*4)
SHRQ $8, R10
MOVB R10, R11
INCL (DI)(R11*4)
SHRQ $8, R10
MOVB R10, R11
INCL (DI)(R11*4)
SHRQ $8, R10
INCL (DI)(R10*4)
ADDQ $8, SI
DECQ R8
JNZ loop_hist8
hist1:
ANDQ $7, R9
JZ end_hist
XORQ R10, R10
loop_hist1:
MOVB (SI), R10
INCL (DI)(R10*4)
INCQ SI
DECQ R9
JNZ loop_hist1
end_hist:
RET

View File

@ -0,0 +1,35 @@
//+build !amd64 noasm appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
package flate
func init() {
useSSE42 = false
}
// crc32sse should never be called.
func crc32sse(a []byte) uint32 {
panic("no assembler")
}
// crc32sseAll should never be called.
func crc32sseAll(a []byte, dst []uint32) {
panic("no assembler")
}
// matchLenSSE4 should never be called.
func matchLenSSE4(a, b []byte, max int) int {
panic("no assembler")
return 0
}
// histogram accumulates a histogram of b in h.
//
// len(h) must be >= 256, and h's elements must be all zeroes.
func histogram(b []byte, h []int32) {
h = h[:256]
for _, t := range b {
h[t]++
}
}

1353
vendor/github.com/klauspost/compress/flate/deflate.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,184 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
// * Literal insertions: Runs of one or more symbols are inserted into the data
// stream as is. This is accomplished through the writeByte method for a
// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
// Any valid stream must start with a literal insertion if no preset dictionary
// is used.
//
// * Backward copies: Runs of one or more symbols are copied from previously
// emitted data. Backward copies come as the tuple (dist, length) where dist
// determines how far back in the stream to copy from and length determines how
// many bytes to copy. Note that it is valid for the length to be greater than
// the distance. Since LZ77 uses forward copies, that situation is used to
// perform a form of run-length encoding on repeated runs of symbols.
// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
// method call must be respected.
type dictDecoder struct {
hist []byte // Sliding window history
// Invariant: 0 <= rdPos <= wrPos <= len(hist)
wrPos int // Current output position in buffer
rdPos int // Have emitted hist[:rdPos] already
full bool // Has a full window length been written yet?
}
// init initializes dictDecoder to have a sliding window dictionary of the given
// size. If a preset dict is provided, it will initialize the dictionary with
// the contents of dict.
func (dd *dictDecoder) init(size int, dict []byte) {
*dd = dictDecoder{hist: dd.hist}
if cap(dd.hist) < size {
dd.hist = make([]byte, size)
}
dd.hist = dd.hist[:size]
if len(dict) > len(dd.hist) {
dict = dict[len(dict)-len(dd.hist):]
}
dd.wrPos = copy(dd.hist, dict)
if dd.wrPos == len(dd.hist) {
dd.wrPos = 0
dd.full = true
}
dd.rdPos = dd.wrPos
}
// histSize reports the total amount of historical data in the dictionary.
func (dd *dictDecoder) histSize() int {
if dd.full {
return len(dd.hist)
}
return dd.wrPos
}
// availRead reports the number of bytes that can be flushed by readFlush.
func (dd *dictDecoder) availRead() int {
return dd.wrPos - dd.rdPos
}
// availWrite reports the available amount of output buffer space.
func (dd *dictDecoder) availWrite() int {
return len(dd.hist) - dd.wrPos
}
// writeSlice returns a slice of the available buffer to write data to.
//
// This invariant will be kept: len(s) <= availWrite()
func (dd *dictDecoder) writeSlice() []byte {
return dd.hist[dd.wrPos:]
}
// writeMark advances the writer pointer by cnt.
//
// This invariant must be kept: 0 <= cnt <= availWrite()
func (dd *dictDecoder) writeMark(cnt int) {
dd.wrPos += cnt
}
// writeByte writes a single byte to the dictionary.
//
// This invariant must be kept: 0 < availWrite()
func (dd *dictDecoder) writeByte(c byte) {
dd.hist[dd.wrPos] = c
dd.wrPos++
}
// writeCopy copies a string at a given (dist, length) to the output.
// This returns the number of bytes copied and may be less than the requested
// length if the available space in the output buffer is too small.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) writeCopy(dist, length int) int {
dstBase := dd.wrPos
dstPos := dstBase
srcPos := dstPos - dist
endPos := dstPos + length
if endPos > len(dd.hist) {
endPos = len(dd.hist)
}
// Copy non-overlapping section after destination position.
//
// This section is non-overlapping in that the copy length for this section
// is always less than or equal to the backwards distance. This can occur
// if a distance refers to data that wraps-around in the buffer.
// Thus, a backwards copy is performed here; that is, the exact bytes in
// the source prior to the copy is placed in the destination.
if srcPos < 0 {
srcPos += len(dd.hist)
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
srcPos = 0
}
// Copy possibly overlapping section before destination position.
//
// This section can overlap if the copy length for this section is larger
// than the backwards distance. This is allowed by LZ77 so that repeated
// strings can be succinctly represented using (dist, length) pairs.
// Thus, a forwards copy is performed here; that is, the bytes copied is
// possibly dependent on the resulting bytes in the destination as the copy
// progresses along. This is functionally equivalent to the following:
//
// for i := 0; i < endPos-dstPos; i++ {
// dd.hist[dstPos+i] = dd.hist[srcPos+i]
// }
// dstPos = endPos
//
for dstPos < endPos {
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// tryWriteCopy tries to copy a string at a given (distance, length) to the
// output. This specialized version is optimized for short distances.
//
// This method is designed to be inlined for performance reasons.
//
// This invariant must be kept: 0 < dist <= histSize()
func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
dstPos := dd.wrPos
endPos := dstPos + length
if dstPos < dist || endPos > len(dd.hist) {
return 0
}
dstBase := dstPos
srcPos := dstPos - dist
// Copy possibly overlapping section before destination position.
loop:
dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
if dstPos < endPos {
goto loop // Avoid for-loop so that this function can be inlined
}
dd.wrPos = dstPos
return dstPos - dstBase
}
// readFlush returns a slice of the historical buffer that is ready to be
// emitted to the user. The data returned by readFlush must be fully consumed
// before calling any other dictDecoder methods.
func (dd *dictDecoder) readFlush() []byte {
toRead := dd.hist[dd.rdPos:dd.wrPos]
dd.rdPos = dd.wrPos
if dd.wrPos == len(dd.hist) {
dd.wrPos, dd.rdPos = 0, 0
dd.full = true
}
return toRead
}

265
vendor/github.com/klauspost/compress/flate/gen.go generated vendored Normal file
View File

@ -0,0 +1,265 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// This program generates fixedhuff.go
// Invoke as
//
// go run gen.go -output fixedhuff.go
package main
import (
"bytes"
"flag"
"fmt"
"go/format"
"io/ioutil"
"log"
)
var filename = flag.String("output", "fixedhuff.go", "output file name")
const maxCodeLen = 16
// Note: the definition of the huffmanDecoder struct is copied from
// inflate.go, as it is private to the implementation.
// chunk & 15 is number of bits
// chunk >> 4 is value, including table link
const (
huffmanChunkBits = 9
huffmanNumChunks = 1 << huffmanChunkBits
huffmanCountMask = 15
huffmanValueShift = 4
)
type huffmanDecoder struct {
min int // the minimum code length
chunks [huffmanNumChunks]uint32 // chunks as described above
links [][]uint32 // overflow links
linkMask uint32 // mask the width of the link table
}
// Initialize Huffman decoding tables from array of code lengths.
// Following this function, h is guaranteed to be initialized into a complete
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
// degenerate case where the tree has only a single symbol with length 1. Empty
// trees are permitted.
func (h *huffmanDecoder) init(bits []int) bool {
// Sanity enables additional runtime tests during Huffman
// table construction. It's intended to be used during
// development to supplement the currently ad-hoc unit tests.
const sanity = false
if h.min != 0 {
*h = huffmanDecoder{}
}
// Count number of codes of each length,
// compute min and max length.
var count [maxCodeLen]int
var min, max int
for _, n := range bits {
if n == 0 {
continue
}
if min == 0 || n < min {
min = n
}
if n > max {
max = n
}
count[n]++
}
// Empty tree. The decompressor.huffSym function will fail later if the tree
// is used. Technically, an empty tree is only valid for the HDIST tree and
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
// is guaranteed to fail since it will attempt to use the tree to decode the
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
// guaranteed to fail later since the compressed data section must be
// composed of at least one symbol (the end-of-block marker).
if max == 0 {
return true
}
code := 0
var nextcode [maxCodeLen]int
for i := min; i <= max; i++ {
code <<= 1
nextcode[i] = code
code += count[i]
}
// Check that the coding is complete (i.e., that we've
// assigned all 2-to-the-max possible bit sequences).
// Exception: To be compatible with zlib, we also need to
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
if code != 1<<uint(max) && !(code == 1 && max == 1) {
return false
}
h.min = min
if max > huffmanChunkBits {
numLinks := 1 << (uint(max) - huffmanChunkBits)
h.linkMask = uint32(numLinks - 1)
// create link tables
link := nextcode[huffmanChunkBits+1] >> 1
h.links = make([][]uint32, huffmanNumChunks-link)
for j := uint(link); j < huffmanNumChunks; j++ {
reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
reverse >>= uint(16 - huffmanChunkBits)
off := j - uint(link)
if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
h.links[off] = make([]uint32, numLinks)
}
}
for i, n := range bits {
if n == 0 {
continue
}
code := nextcode[n]
nextcode[n]++
chunk := uint32(i<<huffmanValueShift | n)
reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
reverse >>= uint(16 - n)
if n <= huffmanChunkBits {
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
// We should never need to overwrite
// an existing chunk. Also, 0 is
// never a valid chunk, because the
// lower 4 "count" bits should be
// between 1 and 15.
if sanity && h.chunks[off] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[off] = chunk
}
} else {
j := reverse & (huffmanNumChunks - 1)
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
// Longer codes should have been
// associated with a link table above.
panic("impossible: not an indirect chunk")
}
value := h.chunks[j] >> huffmanValueShift
linktab := h.links[value]
reverse >>= huffmanChunkBits
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
if sanity && linktab[off] != 0 {
panic("impossible: overwriting existing chunk")
}
linktab[off] = chunk
}
}
}
if sanity {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
for i, chunk := range h.chunks {
if chunk == 0 {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
if code == 1 && i%2 == 1 {
continue
}
panic("impossible: missing chunk")
}
}
for _, linktab := range h.links {
for _, chunk := range linktab {
if chunk == 0 {
panic("impossible: missing chunk")
}
}
}
}
return true
}
func main() {
flag.Parse()
var h huffmanDecoder
var bits [288]int
initReverseByte()
for i := 0; i < 144; i++ {
bits[i] = 8
}
for i := 144; i < 256; i++ {
bits[i] = 9
}
for i := 256; i < 280; i++ {
bits[i] = 7
}
for i := 280; i < 288; i++ {
bits[i] = 8
}
h.init(bits[:])
if h.links != nil {
log.Fatal("Unexpected links table in fixed Huffman decoder")
}
var buf bytes.Buffer
fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.`+"\n\n")
fmt.Fprintln(&buf, "package flate")
fmt.Fprintln(&buf)
fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
fmt.Fprintln(&buf)
fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
fmt.Fprintf(&buf, "\t%d,\n", h.min)
fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
for i := 0; i < huffmanNumChunks; i++ {
if i&7 == 0 {
fmt.Fprintf(&buf, "\t\t")
} else {
fmt.Fprintf(&buf, " ")
}
fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
if i&7 == 7 {
fmt.Fprintln(&buf)
}
}
fmt.Fprintln(&buf, "\t},")
fmt.Fprintln(&buf, "\tnil, 0,")
fmt.Fprintln(&buf, "}")
data, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile(*filename, data, 0644)
if err != nil {
log.Fatal(err)
}
}
var reverseByte [256]byte
func initReverseByte() {
for x := 0; x < 256; x++ {
var result byte
for i := uint(0); i < 8; i++ {
result |= byte(((x >> i) & 1) << (7 - i))
}
reverseByte[x] = result
}
}

View File

@ -0,0 +1,701 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"io"
)
const (
// The largest offset code.
offsetCodeCount = 30
// The special code used to mark the end of a block.
endBlockMarker = 256
// The first length code.
lengthCodesStart = 257
// The number of codegen codes.
codegenCodeCount = 19
badCode = 255
// bufferFlushSize indicates the buffer size
// after which bytes are flushed to the writer.
// Should preferably be a multiple of 6, since
// we accumulate 6 bytes between writes to the buffer.
bufferFlushSize = 240
// bufferSize is the actual output byte buffer size.
// It must have additional headroom for a flush
// which can contain up to 8 bytes.
bufferSize = bufferFlushSize + 8
)
// The number of extra bits needed by length code X - LENGTH_CODES_START.
var lengthExtraBits = []int8{
/* 257 */ 0, 0, 0,
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
/* 280 */ 4, 5, 5, 5, 5, 0,
}
// The length indicated by length code X - LENGTH_CODES_START.
var lengthBase = []uint32{
0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
64, 80, 96, 112, 128, 160, 192, 224, 255,
}
// offset code word extra bits.
var offsetExtraBits = []int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
/* extended window */
14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
}
var offsetBase = []uint32{
/* normal deflate */
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
/* extended window */
0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
0x100000, 0x180000, 0x200000, 0x300000,
}
// The odd order in which the codegen code sizes are written.
var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
type huffmanBitWriter struct {
// writer is the underlying writer.
// Do not use it directly; use the write method, which ensures
// that Write errors are sticky.
writer io.Writer
// Data waiting to be written is bytes[0:nbytes]
// and then the low nbits of bits.
bits uint64
nbits uint
bytes [bufferSize]byte
codegenFreq [codegenCodeCount]int32
nbytes int
literalFreq []int32
offsetFreq []int32
codegen []uint8
literalEncoding *huffmanEncoder
offsetEncoding *huffmanEncoder
codegenEncoding *huffmanEncoder
err error
}
func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
return &huffmanBitWriter{
writer: w,
literalFreq: make([]int32, maxNumLit),
offsetFreq: make([]int32, offsetCodeCount),
codegen: make([]uint8, maxNumLit+offsetCodeCount+1),
literalEncoding: newHuffmanEncoder(maxNumLit),
codegenEncoding: newHuffmanEncoder(codegenCodeCount),
offsetEncoding: newHuffmanEncoder(offsetCodeCount),
}
}
func (w *huffmanBitWriter) reset(writer io.Writer) {
w.writer = writer
w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
w.bytes = [bufferSize]byte{}
}
func (w *huffmanBitWriter) flush() {
if w.err != nil {
w.nbits = 0
return
}
n := w.nbytes
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
w.bits >>= 8
if w.nbits > 8 { // Avoid underflow
w.nbits -= 8
} else {
w.nbits = 0
}
n++
}
w.bits = 0
w.write(w.bytes[:n])
w.nbytes = 0
}
func (w *huffmanBitWriter) write(b []byte) {
if w.err != nil {
return
}
_, w.err = w.writer.Write(b)
}
func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
if w.err != nil {
return
}
w.bits |= uint64(b) << w.nbits
w.nbits += nb
if w.nbits >= 48 {
bits := w.bits
w.bits >>= 48
w.nbits -= 48
n := w.nbytes
bytes := w.bytes[n : n+6]
bytes[0] = byte(bits)
bytes[1] = byte(bits >> 8)
bytes[2] = byte(bits >> 16)
bytes[3] = byte(bits >> 24)
bytes[4] = byte(bits >> 32)
bytes[5] = byte(bits >> 40)
n += 6
if n >= bufferFlushSize {
w.write(w.bytes[:n])
n = 0
}
w.nbytes = n
}
}
func (w *huffmanBitWriter) writeBytes(bytes []byte) {
if w.err != nil {
return
}
n := w.nbytes
if w.nbits&7 != 0 {
w.err = InternalError("writeBytes with unfinished bits")
return
}
for w.nbits != 0 {
w.bytes[n] = byte(w.bits)
w.bits >>= 8
w.nbits -= 8
n++
}
if n != 0 {
w.write(w.bytes[:n])
}
w.nbytes = 0
w.write(bytes)
}
// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
// the literal and offset lengths arrays (which are concatenated into a single
// array). This method generates that run-length encoding.
//
// The result is written into the codegen array, and the frequencies
// of each code is written into the codegenFreq array.
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
// information. Code badCode is an end marker
//
// numLiterals The number of literals in literalEncoding
// numOffsets The number of offsets in offsetEncoding
// litenc, offenc The literal and offset encoder to use
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
for i := range w.codegenFreq {
w.codegenFreq[i] = 0
}
// Note that we are using codegen both as a temporary variable for holding
// a copy of the frequencies, and as the place where we put the result.
// This is fine because the output is always shorter than the input used
// so far.
codegen := w.codegen // cache
// Copy the concatenated code sizes to codegen. Put a marker at the end.
cgnl := codegen[:numLiterals]
for i := range cgnl {
cgnl[i] = uint8(litEnc.codes[i].len)
}
cgnl = codegen[numLiterals : numLiterals+numOffsets]
for i := range cgnl {
cgnl[i] = uint8(offEnc.codes[i].len)
}
codegen[numLiterals+numOffsets] = badCode
size := codegen[0]
count := 1
outIndex := 0
for inIndex := 1; size != badCode; inIndex++ {
// INVARIANT: We have seen "count" copies of size that have not yet
// had output generated for them.
nextSize := codegen[inIndex]
if nextSize == size {
count++
continue
}
// We need to generate codegen indicating "count" of size.
if size != 0 {
codegen[outIndex] = size
outIndex++
w.codegenFreq[size]++
count--
for count >= 3 {
n := 6
if n > count {
n = count
}
codegen[outIndex] = 16
outIndex++
codegen[outIndex] = uint8(n - 3)
outIndex++
w.codegenFreq[16]++
count -= n
}
} else {
for count >= 11 {
n := 138
if n > count {
n = count
}
codegen[outIndex] = 18
outIndex++
codegen[outIndex] = uint8(n - 11)
outIndex++
w.codegenFreq[18]++
count -= n
}
if count >= 3 {
// count >= 3 && count <= 10
codegen[outIndex] = 17
outIndex++
codegen[outIndex] = uint8(count - 3)
outIndex++
w.codegenFreq[17]++
count = 0
}
}
count--
for ; count >= 0; count-- {
codegen[outIndex] = size
outIndex++
w.codegenFreq[size]++
}
// Set up invariant for next time through the loop.
size = nextSize
count = 1
}
// Marker indicating the end of the codegen.
codegen[outIndex] = badCode
}
// dynamicSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
numCodegens = len(w.codegenFreq)
for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
numCodegens--
}
header := 3 + 5 + 5 + 4 + (3 * numCodegens) +
w.codegenEncoding.bitLength(w.codegenFreq[:]) +
int(w.codegenFreq[16])*2 +
int(w.codegenFreq[17])*3 +
int(w.codegenFreq[18])*7
size = header +
litEnc.bitLength(w.literalFreq) +
offEnc.bitLength(w.offsetFreq) +
extraBits
return size, numCodegens
}
// fixedSize returns the size of dynamically encoded data in bits.
func (w *huffmanBitWriter) fixedSize(extraBits int) int {
return 3 +
fixedLiteralEncoding.bitLength(w.literalFreq) +
fixedOffsetEncoding.bitLength(w.offsetFreq) +
extraBits
}
// storedSize calculates the stored size, including header.
// The function returns the size in bits and whether the block
// fits inside a single block.
func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
if in == nil {
return 0, false
}
if len(in) <= maxStoreBlockSize {
return (len(in) + 5) * 8, true
}
return 0, false
}
func (w *huffmanBitWriter) writeCode(c hcode) {
if w.err != nil {
return
}
w.bits |= uint64(c.code) << w.nbits
w.nbits += uint(c.len)
if w.nbits >= 48 {
bits := w.bits
w.bits >>= 48
w.nbits -= 48
n := w.nbytes
bytes := w.bytes[n : n+6]
bytes[0] = byte(bits)
bytes[1] = byte(bits >> 8)
bytes[2] = byte(bits >> 16)
bytes[3] = byte(bits >> 24)
bytes[4] = byte(bits >> 32)
bytes[5] = byte(bits >> 40)
n += 6
if n >= bufferFlushSize {
w.write(w.bytes[:n])
n = 0
}
w.nbytes = n
}
}
// Write the header of a dynamic Huffman block to the output stream.
//
// numLiterals The number of literals specified in codegen
// numOffsets The number of offsets specified in codegen
// numCodegens The number of codegens used in codegen
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
if w.err != nil {
return
}
var firstBits int32 = 4
if isEof {
firstBits = 5
}
w.writeBits(firstBits, 3)
w.writeBits(int32(numLiterals-257), 5)
w.writeBits(int32(numOffsets-1), 5)
w.writeBits(int32(numCodegens-4), 4)
for i := 0; i < numCodegens; i++ {
value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
w.writeBits(int32(value), 3)
}
i := 0
for {
var codeWord int = int(w.codegen[i])
i++
if codeWord == badCode {
break
}
w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
switch codeWord {
case 16:
w.writeBits(int32(w.codegen[i]), 2)
i++
break
case 17:
w.writeBits(int32(w.codegen[i]), 3)
i++
break
case 18:
w.writeBits(int32(w.codegen[i]), 7)
i++
break
}
}
}
func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
if w.err != nil {
return
}
var flag int32
if isEof {
flag = 1
}
w.writeBits(flag, 3)
w.flush()
w.writeBits(int32(length), 16)
w.writeBits(int32(^uint16(length)), 16)
}
func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
if w.err != nil {
return
}
// Indicate that we are a fixed Huffman block
var value int32 = 2
if isEof {
value = 3
}
w.writeBits(value, 3)
}
// writeBlock will write a block of tokens with the smallest encoding.
// The original input can be supplied, and if the huffman encoded data
// is larger than the original bytes, the data will be written as a
// stored block.
// If the input is nil, the tokens will always be Huffman encoded.
func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) {
if w.err != nil {
return
}
tokens = append(tokens, endBlockMarker)
numLiterals, numOffsets := w.indexTokens(tokens)
var extraBits int
storedSize, storable := w.storedSize(input)
if storable {
// We only bother calculating the costs of the extra bits required by
// the length of offset fields (which will be the same for both fixed
// and dynamic encoding), if we need to compare those two encodings
// against stored encoding.
for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
// First eight length codes have extra size = 0.
extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart])
}
for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
// First four offset codes have extra size = 0.
extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode])
}
}
// Figure out smallest code.
// Fixed Huffman baseline.
var literalEncoding = fixedLiteralEncoding
var offsetEncoding = fixedOffsetEncoding
var size = w.fixedSize(extraBits)
// Dynamic Huffman?
var numCodegens int
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
if dynamicSize < size {
size = dynamicSize
literalEncoding = w.literalEncoding
offsetEncoding = w.offsetEncoding
}
// Stored bytes?
if storable && storedSize < size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
// Huffman.
if literalEncoding == fixedLiteralEncoding {
w.writeFixedHeader(eof)
} else {
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
}
// Write the tokens.
w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes)
}
// writeBlockDynamic encodes a block using a dynamic Huffman table.
// This should be used if the symbols used have a disproportionate
// histogram distribution.
// If input is supplied and the compression savings are below 1/16th of the
// input size the block is stored.
func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) {
if w.err != nil {
return
}
tokens = append(tokens, endBlockMarker)
numLiterals, numOffsets := w.indexTokens(tokens)
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0)
// Store bytes, if we don't get a reasonable improvement.
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
// Write Huffman table.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
// Write the tokens.
w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes)
}
// indexTokens indexes a slice of tokens, and updates
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
// The number of literal and offset tokens is returned.
func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) {
for i := range w.literalFreq {
w.literalFreq[i] = 0
}
for i := range w.offsetFreq {
w.offsetFreq[i] = 0
}
for _, t := range tokens {
if t < matchType {
w.literalFreq[t.literal()]++
continue
}
length := t.length()
offset := t.offset()
w.literalFreq[lengthCodesStart+lengthCode(length)]++
w.offsetFreq[offsetCode(offset)]++
}
// get the number of literals
numLiterals = len(w.literalFreq)
for w.literalFreq[numLiterals-1] == 0 {
numLiterals--
}
// get the number of offsets
numOffsets = len(w.offsetFreq)
for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
numOffsets--
}
if numOffsets == 0 {
// We haven't found a single match. If we want to go with the dynamic encoding,
// we should count at least one offset to be sure that the offset huffman tree could be encoded.
w.offsetFreq[0] = 1
numOffsets = 1
}
w.literalEncoding.generate(w.literalFreq, 15)
w.offsetEncoding.generate(w.offsetFreq, 15)
return
}
// writeTokens writes a slice of tokens to the output.
// codes for literal and offset encoding must be supplied.
func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
if w.err != nil {
return
}
for _, t := range tokens {
if t < matchType {
w.writeCode(leCodes[t.literal()])
continue
}
// Write the length
length := t.length()
lengthCode := lengthCode(length)
w.writeCode(leCodes[lengthCode+lengthCodesStart])
extraLengthBits := uint(lengthExtraBits[lengthCode])
if extraLengthBits > 0 {
extraLength := int32(length - lengthBase[lengthCode])
w.writeBits(extraLength, extraLengthBits)
}
// Write the offset
offset := t.offset()
offsetCode := offsetCode(offset)
w.writeCode(oeCodes[offsetCode])
extraOffsetBits := uint(offsetExtraBits[offsetCode])
if extraOffsetBits > 0 {
extraOffset := int32(offset - offsetBase[offsetCode])
w.writeBits(extraOffset, extraOffsetBits)
}
}
}
// huffOffset is a static offset encoder used for huffman only encoding.
// It can be reused since we will not be encoding offset values.
var huffOffset *huffmanEncoder
func init() {
w := newHuffmanBitWriter(nil)
w.offsetFreq[0] = 1
huffOffset = newHuffmanEncoder(offsetCodeCount)
huffOffset.generate(w.offsetFreq, 15)
}
// writeBlockHuff encodes a block of bytes as either
// Huffman encoded literals or uncompressed bytes if the
// results only gains very little from compression.
func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
if w.err != nil {
return
}
// Clear histogram
for i := range w.literalFreq {
w.literalFreq[i] = 0
}
// Add everything as literals
histogram(input, w.literalFreq)
w.literalFreq[endBlockMarker] = 1
const numLiterals = endBlockMarker + 1
const numOffsets = 1
w.literalEncoding.generate(w.literalFreq, 15)
// Figure out smallest code.
// Always use dynamic Huffman or Store
var numCodegens int
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0)
// Store bytes, if we don't get a reasonable improvement.
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
}
// Huffman.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
encoding := w.literalEncoding.codes[:257]
n := w.nbytes
for _, t := range input {
// Bitwriting inlined, ~30% speedup
c := encoding[t]
w.bits |= uint64(c.code) << w.nbits
w.nbits += uint(c.len)
if w.nbits < 48 {
continue
}
// Store 6 bytes
bits := w.bits
w.bits >>= 48
w.nbits -= 48
bytes := w.bytes[n : n+6]
bytes[0] = byte(bits)
bytes[1] = byte(bits >> 8)
bytes[2] = byte(bits >> 16)
bytes[3] = byte(bits >> 24)
bytes[4] = byte(bits >> 32)
bytes[5] = byte(bits >> 40)
n += 6
if n < bufferFlushSize {
continue
}
w.write(w.bytes[:n])
if w.err != nil {
return // Return early in the event of write failures
}
n = 0
}
w.nbytes = n
w.writeCode(encoding[endBlockMarker])
}

View File

@ -0,0 +1,344 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import (
"math"
"sort"
)
// hcode is a huffman code with a bit code and bit length.
type hcode struct {
code, len uint16
}
type huffmanEncoder struct {
codes []hcode
freqcache []literalNode
bitCount [17]int32
lns byLiteral // stored to avoid repeated allocation in generate
lfs byFreq // stored to avoid repeated allocation in generate
}
type literalNode struct {
literal uint16
freq int32
}
// A levelInfo describes the state of the constructed tree for a given depth.
type levelInfo struct {
// Our level. for better printing
level int32
// The frequency of the last node at this level
lastFreq int32
// The frequency of the next character to add to this level
nextCharFreq int32
// The frequency of the next pair (from level below) to add to this level.
// Only valid if the "needed" value of the next lower level is 0.
nextPairFreq int32
// The number of chains remaining to generate for this level before moving
// up to the next level
needed int32
}
// set sets the code and length of an hcode.
func (h *hcode) set(code uint16, length uint16) {
h.len = length
h.code = code
}
func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
func newHuffmanEncoder(size int) *huffmanEncoder {
return &huffmanEncoder{codes: make([]hcode, size)}
}
// Generates a HuffmanCode corresponding to the fixed literal table
func generateFixedLiteralEncoding() *huffmanEncoder {
h := newHuffmanEncoder(maxNumLit)
codes := h.codes
var ch uint16
for ch = 0; ch < maxNumLit; ch++ {
var bits uint16
var size uint16
switch {
case ch < 144:
// size 8, 000110000 .. 10111111
bits = ch + 48
size = 8
break
case ch < 256:
// size 9, 110010000 .. 111111111
bits = ch + 400 - 144
size = 9
break
case ch < 280:
// size 7, 0000000 .. 0010111
bits = ch - 256
size = 7
break
default:
// size 8, 11000000 .. 11000111
bits = ch + 192 - 280
size = 8
}
codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
}
return h
}
func generateFixedOffsetEncoding() *huffmanEncoder {
h := newHuffmanEncoder(30)
codes := h.codes
for ch := range codes {
codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
}
return h
}
var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
func (h *huffmanEncoder) bitLength(freq []int32) int {
var total int
for i, f := range freq {
if f != 0 {
total += int(f) * int(h.codes[i].len)
}
}
return total
}
const maxBitsLimit = 16
// Return the number of literals assigned to each bit size in the Huffman encoding
//
// This method is only called when list.length >= 3
// The cases of 0, 1, and 2 literals are handled by special case code.
//
// list An array of the literals with non-zero frequencies
// and their associated frequencies. The array is in order of increasing
// frequency, and has as its last element a special element with frequency
// MaxInt32
// maxBits The maximum number of bits that should be used to encode any literal.
// Must be less than 16.
// return An integer array in which array[i] indicates the number of literals
// that should be encoded in i bits.
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
if maxBits >= maxBitsLimit {
panic("flate: maxBits too large")
}
n := int32(len(list))
list = list[0 : n+1]
list[n] = maxNode()
// The tree can't have greater depth than n - 1, no matter what. This
// saves a little bit of work in some small cases
if maxBits > n-1 {
maxBits = n - 1
}
// Create information about each of the levels.
// A bogus "Level 0" whose sole purpose is so that
// level1.prev.needed==0. This makes level1.nextPairFreq
// be a legitimate value that never gets chosen.
var levels [maxBitsLimit]levelInfo
// leafCounts[i] counts the number of literals at the left
// of ancestors of the rightmost node at level i.
// leafCounts[i][j] is the number of literals at the left
// of the level j ancestor.
var leafCounts [maxBitsLimit][maxBitsLimit]int32
for level := int32(1); level <= maxBits; level++ {
// For every level, the first two items are the first two characters.
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
lastFreq: list[1].freq,
nextCharFreq: list[2].freq,
nextPairFreq: list[0].freq + list[1].freq,
}
leafCounts[level][level] = 2
if level == 1 {
levels[level].nextPairFreq = math.MaxInt32
}
}
// We need a total of 2*n - 2 items at top level and have already generated 2.
levels[maxBits].needed = 2*n - 4
level := maxBits
for {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
// We've run out of both leafs and pairs.
// End all calculations for this level.
// To make sure we never come back to this level or any lower level,
// set nextPairFreq impossibly large.
l.needed = 0
levels[level+1].nextPairFreq = math.MaxInt32
level++
continue
}
prevFreq := l.lastFreq
if l.nextCharFreq < l.nextPairFreq {
// The next item on this row is a leaf node.
n := leafCounts[level][level] + 1
l.lastFreq = l.nextCharFreq
// Lower leafCounts are the same of the previous node.
leafCounts[level][level] = n
l.nextCharFreq = list[n].freq
} else {
// The next item on this row is a pair from the previous row.
// nextPairFreq isn't valid until we generate two
// more values in the level below
l.lastFreq = l.nextPairFreq
// Take leaf counts from the lower level, except counts[level] remains the same.
copy(leafCounts[level][:level], leafCounts[level-1][:level])
levels[l.level-1].needed = 2
}
if l.needed--; l.needed == 0 {
// We've done everything we need to do for this level.
// Continue calculating one level up. Fill in nextPairFreq
// of that level with the sum of the two nodes we've just calculated on
// this level.
if l.level == maxBits {
// All done!
break
}
levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
level++
} else {
// If we stole from below, move down temporarily to replenish it.
for levels[level-1].needed > 0 {
level--
}
}
}
// Somethings is wrong if at the end, the top level is null or hasn't used
// all of the leaves.
if leafCounts[maxBits][maxBits] != n {
panic("leafCounts[maxBits][maxBits] != n")
}
bitCount := h.bitCount[:maxBits+1]
bits := 1
counts := &leafCounts[maxBits]
for level := maxBits; level > 0; level-- {
// chain.leafCount gives the number of literals requiring at least "bits"
// bits to encode.
bitCount[bits] = counts[level] - counts[level-1]
bits++
}
return bitCount
}
// Look at the leaves and assign them a bit count and an encoding as specified
// in RFC 1951 3.2.2
func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
code := uint16(0)
for n, bits := range bitCount {
code <<= 1
if n == 0 || bits == 0 {
continue
}
// The literals list[len(list)-bits] .. list[len(list)-bits]
// are encoded using "bits" bits, and get the values
// code, code + 1, .... The code values are
// assigned in literal order (not frequency order).
chunk := list[len(list)-int(bits):]
h.lns.sort(chunk)
for _, node := range chunk {
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
code++
}
list = list[0 : len(list)-int(bits)]
}
}
// Update this Huffman Code object to be the minimum code for the specified frequency count.
//
// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
if h.freqcache == nil {
// Allocate a reusable buffer with the longest possible frequency table.
// Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit.
// The largest of these is maxNumLit, so we allocate for that case.
h.freqcache = make([]literalNode, maxNumLit+1)
}
list := h.freqcache[:len(freq)+1]
// Number of non-zero literals
count := 0
// Set list to be the set of all non-zero literals and their frequencies
for i, f := range freq {
if f != 0 {
list[count] = literalNode{uint16(i), f}
count++
} else {
list[count] = literalNode{}
h.codes[i].len = 0
}
}
list[len(freq)] = literalNode{}
list = list[:count]
if count <= 2 {
// Handle the small cases here, because they are awkward for the general case code. With
// two or fewer literals, everything has bit length 1.
for i, node := range list {
// "list" is in order of increasing literal value.
h.codes[node.literal].set(uint16(i), 1)
}
return
}
h.lfs.sort(list)
// Get the number of literals for each bit count
bitCount := h.bitCounts(list, maxBits)
// And do the assignment
h.assignEncodingAndSize(bitCount, list)
}
type byLiteral []literalNode
func (s *byLiteral) sort(a []literalNode) {
*s = byLiteral(a)
sort.Sort(s)
}
func (s byLiteral) Len() int { return len(s) }
func (s byLiteral) Less(i, j int) bool {
return s[i].literal < s[j].literal
}
func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type byFreq []literalNode
func (s *byFreq) sort(a []literalNode) {
*s = byFreq(a)
sort.Sort(s)
}
func (s byFreq) Len() int { return len(s) }
func (s byFreq) Less(i, j int) bool {
if s[i].freq == s[j].freq {
return s[i].literal < s[j].literal
}
return s[i].freq < s[j].freq
}
func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }

880
vendor/github.com/klauspost/compress/flate/inflate.go generated vendored Normal file
View File

@ -0,0 +1,880 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package flate implements the DEFLATE compressed data format, described in
// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
// formats.
package flate
import (
"bufio"
"io"
"math/bits"
"strconv"
"sync"
)
const (
maxCodeLen = 16 // max length of Huffman code
maxCodeLenMask = 15 // mask for max length of Huffman code
// The next three numbers come from the RFC section 3.2.7, with the
// additional proviso in section 3.2.5 which implies that distance codes
// 30 and 31 should never occur in compressed data.
maxNumLit = 286
maxNumDist = 30
numCodes = 19 // number of codes in Huffman meta-code
)
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
// A CorruptInputError reports the presence of corrupt input at a given offset.
type CorruptInputError int64
func (e CorruptInputError) Error() string {
return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
}
// An InternalError reports an error in the flate code itself.
type InternalError string
func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
// A ReadError reports an error encountered while reading input.
//
// Deprecated: No longer returned.
type ReadError struct {
Offset int64 // byte offset where error occurred
Err error // error returned by underlying Read
}
func (e *ReadError) Error() string {
return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
}
// A WriteError reports an error encountered while writing output.
//
// Deprecated: No longer returned.
type WriteError struct {
Offset int64 // byte offset where error occurred
Err error // error returned by underlying Write
}
func (e *WriteError) Error() string {
return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
}
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
// to switch to a new underlying Reader. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
// Reset discards any buffered data and resets the Resetter as if it was
// newly initialized with the given reader.
Reset(r io.Reader, dict []byte) error
}
// The data structure for decoding Huffman tables is based on that of
// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
// For codes smaller than the table width, there are multiple entries
// (each combination of trailing bits has the same value). For codes
// larger than the table width, the table contains a link to an overflow
// table. The width of each entry in the link table is the maximum code
// size minus the chunk width.
//
// Note that you can do a lookup in the table even without all bits
// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
// have the property that shorter codes come before longer ones, the
// bit length estimate in the result is a lower bound on the actual
// number of bits.
//
// See the following:
// http://www.gzip.org/algorithm.txt
// chunk & 15 is number of bits
// chunk >> 4 is value, including table link
const (
huffmanChunkBits = 9
huffmanNumChunks = 1 << huffmanChunkBits
huffmanCountMask = 15
huffmanValueShift = 4
)
type huffmanDecoder struct {
min int // the minimum code length
chunks *[huffmanNumChunks]uint32 // chunks as described above
links [][]uint32 // overflow links
linkMask uint32 // mask the width of the link table
}
// Initialize Huffman decoding tables from array of code lengths.
// Following this function, h is guaranteed to be initialized into a complete
// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
// degenerate case where the tree has only a single symbol with length 1. Empty
// trees are permitted.
func (h *huffmanDecoder) init(lengths []int) bool {
// Sanity enables additional runtime tests during Huffman
// table construction. It's intended to be used during
// development to supplement the currently ad-hoc unit tests.
const sanity = false
if h.chunks == nil {
h.chunks = &[huffmanNumChunks]uint32{}
}
if h.min != 0 {
*h = huffmanDecoder{chunks: h.chunks, links: h.links}
}
// Count number of codes of each length,
// compute min and max length.
var count [maxCodeLen]int
var min, max int
for _, n := range lengths {
if n == 0 {
continue
}
if min == 0 || n < min {
min = n
}
if n > max {
max = n
}
count[n&maxCodeLenMask]++
}
// Empty tree. The decompressor.huffSym function will fail later if the tree
// is used. Technically, an empty tree is only valid for the HDIST tree and
// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
// is guaranteed to fail since it will attempt to use the tree to decode the
// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
// guaranteed to fail later since the compressed data section must be
// composed of at least one symbol (the end-of-block marker).
if max == 0 {
return true
}
code := 0
var nextcode [maxCodeLen]int
for i := min; i <= max; i++ {
code <<= 1
nextcode[i&maxCodeLenMask] = code
code += count[i&maxCodeLenMask]
}
// Check that the coding is complete (i.e., that we've
// assigned all 2-to-the-max possible bit sequences).
// Exception: To be compatible with zlib, we also need to
// accept degenerate single-code codings. See also
// TestDegenerateHuffmanCoding.
if code != 1<<uint(max) && !(code == 1 && max == 1) {
return false
}
h.min = min
chunks := h.chunks[:]
for i := range chunks {
chunks[i] = 0
}
if max > huffmanChunkBits {
numLinks := 1 << (uint(max) - huffmanChunkBits)
h.linkMask = uint32(numLinks - 1)
// create link tables
link := nextcode[huffmanChunkBits+1] >> 1
if cap(h.links) < huffmanNumChunks-link {
h.links = make([][]uint32, huffmanNumChunks-link)
} else {
h.links = h.links[:huffmanNumChunks-link]
}
for j := uint(link); j < huffmanNumChunks; j++ {
reverse := int(bits.Reverse16(uint16(j)))
reverse >>= uint(16 - huffmanChunkBits)
off := j - uint(link)
if sanity && h.chunks[reverse] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
if cap(h.links[off]) < numLinks {
h.links[off] = make([]uint32, numLinks)
} else {
links := h.links[off][:0]
h.links[off] = links[:numLinks]
}
}
} else {
h.links = h.links[:0]
}
for i, n := range lengths {
if n == 0 {
continue
}
code := nextcode[n]
nextcode[n]++
chunk := uint32(i<<huffmanValueShift | n)
reverse := int(bits.Reverse16(uint16(code)))
reverse >>= uint(16 - n)
if n <= huffmanChunkBits {
for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
// We should never need to overwrite
// an existing chunk. Also, 0 is
// never a valid chunk, because the
// lower 4 "count" bits should be
// between 1 and 15.
if sanity && h.chunks[off] != 0 {
panic("impossible: overwriting existing chunk")
}
h.chunks[off] = chunk
}
} else {
j := reverse & (huffmanNumChunks - 1)
if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
// Longer codes should have been
// associated with a link table above.
panic("impossible: not an indirect chunk")
}
value := h.chunks[j] >> huffmanValueShift
linktab := h.links[value]
reverse >>= huffmanChunkBits
for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
if sanity && linktab[off] != 0 {
panic("impossible: overwriting existing chunk")
}
linktab[off] = chunk
}
}
}
if sanity {
// Above we've sanity checked that we never overwrote
// an existing entry. Here we additionally check that
// we filled the tables completely.
for i, chunk := range h.chunks {
if chunk == 0 {
// As an exception, in the degenerate
// single-code case, we allow odd
// chunks to be missing.
if code == 1 && i%2 == 1 {
continue
}
panic("impossible: missing chunk")
}
}
for _, linktab := range h.links {
for _, chunk := range linktab {
if chunk == 0 {
panic("impossible: missing chunk")
}
}
}
}
return true
}
// The actual read interface needed by NewReader.
// If the passed in io.Reader does not also have ReadByte,
// the NewReader will introduce its own buffering.
type Reader interface {
io.Reader
io.ByteReader
}
// Decompress state.
type decompressor struct {
// Input source.
r Reader
roffset int64
// Input bits, in top of b.
b uint32
nb uint
// Huffman decoders for literal/length, distance.
h1, h2 huffmanDecoder
// Length arrays used to define Huffman codes.
bits *[maxNumLit + maxNumDist]int
codebits *[numCodes]int
// Output history, buffer.
dict dictDecoder
// Temporary buffer (avoids repeated allocation).
buf [4]byte
// Next step in the decompression,
// and decompression state.
step func(*decompressor)
stepState int
final bool
err error
toRead []byte
hl, hd *huffmanDecoder
copyLen int
copyDist int
}
func (f *decompressor) nextBlock() {
for f.nb < 1+2 {
if f.err = f.moreBits(); f.err != nil {
return
}
}
f.final = f.b&1 == 1
f.b >>= 1
typ := f.b & 3
f.b >>= 2
f.nb -= 1 + 2
switch typ {
case 0:
f.dataBlock()
case 1:
// compressed, fixed Huffman tables
f.hl = &fixedHuffmanDecoder
f.hd = nil
f.huffmanBlock()
case 2:
// compressed, dynamic Huffman tables
if f.err = f.readHuffman(); f.err != nil {
break
}
f.hl = &f.h1
f.hd = &f.h2
f.huffmanBlock()
default:
// 3 is reserved.
f.err = CorruptInputError(f.roffset)
}
}
func (f *decompressor) Read(b []byte) (int, error) {
for {
if len(f.toRead) > 0 {
n := copy(b, f.toRead)
f.toRead = f.toRead[n:]
if len(f.toRead) == 0 {
return n, f.err
}
return n, nil
}
if f.err != nil {
return 0, f.err
}
f.step(f)
if f.err != nil && len(f.toRead) == 0 {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
}
}
}
// Support the io.WriteTo interface for io.Copy and friends.
func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
total := int64(0)
flushed := false
for {
if len(f.toRead) > 0 {
n, err := w.Write(f.toRead)
total += int64(n)
if err != nil {
f.err = err
return total, err
}
if n != len(f.toRead) {
return total, io.ErrShortWrite
}
f.toRead = f.toRead[:0]
}
if f.err != nil && flushed {
if f.err == io.EOF {
return total, nil
}
return total, f.err
}
if f.err == nil {
f.step(f)
}
if len(f.toRead) == 0 && f.err != nil && !flushed {
f.toRead = f.dict.readFlush() // Flush what's left in case of error
flushed = true
}
}
}
func (f *decompressor) Close() error {
if f.err == io.EOF {
return nil
}
return f.err
}
// RFC 1951 section 3.2.7.
// Compression with dynamic Huffman codes
var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
func (f *decompressor) readHuffman() error {
// HLIT[5], HDIST[5], HCLEN[4].
for f.nb < 5+5+4 {
if err := f.moreBits(); err != nil {
return err
}
}
nlit := int(f.b&0x1F) + 257
if nlit > maxNumLit {
return CorruptInputError(f.roffset)
}
f.b >>= 5
ndist := int(f.b&0x1F) + 1
if ndist > maxNumDist {
return CorruptInputError(f.roffset)
}
f.b >>= 5
nclen := int(f.b&0xF) + 4
// numCodes is 19, so nclen is always valid.
f.b >>= 4
f.nb -= 5 + 5 + 4
// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
for i := 0; i < nclen; i++ {
for f.nb < 3 {
if err := f.moreBits(); err != nil {
return err
}
}
f.codebits[codeOrder[i]] = int(f.b & 0x7)
f.b >>= 3
f.nb -= 3
}
for i := nclen; i < len(codeOrder); i++ {
f.codebits[codeOrder[i]] = 0
}
if !f.h1.init(f.codebits[0:]) {
return CorruptInputError(f.roffset)
}
// HLIT + 257 code lengths, HDIST + 1 code lengths,
// using the code length Huffman code.
for i, n := 0, nlit+ndist; i < n; {
x, err := f.huffSym(&f.h1)
if err != nil {
return err
}
if x < 16 {
// Actual length.
f.bits[i] = x
i++
continue
}
// Repeat previous length or zero.
var rep int
var nb uint
var b int
switch x {
default:
return InternalError("unexpected length code")
case 16:
rep = 3
nb = 2
if i == 0 {
return CorruptInputError(f.roffset)
}
b = f.bits[i-1]
case 17:
rep = 3
nb = 3
b = 0
case 18:
rep = 11
nb = 7
b = 0
}
for f.nb < nb {
if err := f.moreBits(); err != nil {
return err
}
}
rep += int(f.b & uint32(1<<nb-1))
f.b >>= nb
f.nb -= nb
if i+rep > n {
return CorruptInputError(f.roffset)
}
for j := 0; j < rep; j++ {
f.bits[i] = b
i++
}
}
if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
return CorruptInputError(f.roffset)
}
// As an optimization, we can initialize the min bits to read at a time
// for the HLIT tree to the length of the EOB marker since we know that
// every block must terminate with one. This preserves the property that
// we never read any extra bytes after the end of the DEFLATE stream.
if f.h1.min < f.bits[endBlockMarker] {
f.h1.min = f.bits[endBlockMarker]
}
return nil
}
// Decode a single Huffman block from f.
// hl and hd are the Huffman states for the lit/length values
// and the distance values, respectively. If hd == nil, using the
// fixed distance encoding associated with fixed Huffman blocks.
func (f *decompressor) huffmanBlock() {
const (
stateInit = iota // Zero value must be stateInit
stateDict
)
switch f.stepState {
case stateInit:
goto readLiteral
case stateDict:
goto copyHistory
}
readLiteral:
// Read literal and/or (length, distance) according to RFC section 3.2.3.
{
v, err := f.huffSym(f.hl)
if err != nil {
f.err = err
return
}
var n uint // number of bits extra
var length int
switch {
case v < 256:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBlock
f.stepState = stateInit
return
}
goto readLiteral
case v == 256:
f.finishBlock()
return
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
n = 0
case v < 269:
length = v*2 - (265*2 - 11)
n = 1
case v < 273:
length = v*4 - (269*4 - 19)
n = 2
case v < 277:
length = v*8 - (273*8 - 35)
n = 3
case v < 281:
length = v*16 - (277*16 - 67)
n = 4
case v < 285:
length = v*32 - (281*32 - 131)
n = 5
case v < maxNumLit:
length = 258
n = 0
default:
f.err = CorruptInputError(f.roffset)
return
}
if n > 0 {
for f.nb < n {
if err = f.moreBits(); err != nil {
f.err = err
return
}
}
length += int(f.b & uint32(1<<n-1))
f.b >>= n
f.nb -= n
}
var dist int
if f.hd == nil {
for f.nb < 5 {
if err = f.moreBits(); err != nil {
f.err = err
return
}
}
dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
if dist, err = f.huffSym(f.hd); err != nil {
f.err = err
return
}
}
switch {
case dist < 4:
dist++
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << nb
for f.nb < nb {
if err = f.moreBits(); err != nil {
f.err = err
return
}
}
extra |= int(f.b & uint32(1<<nb-1))
f.b >>= nb
f.nb -= nb
dist = 1<<(nb+1) + 1 + extra
default:
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > f.dict.histSize() {
f.err = CorruptInputError(f.roffset)
return
}
f.copyLen, f.copyDist = length, dist
goto copyHistory
}
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).huffmanBlock // We need to continue this work
f.stepState = stateDict
return
}
goto readLiteral
}
}
// Copy a single uncompressed data block from input to output.
func (f *decompressor) dataBlock() {
// Uncompressed.
// Discard current half-byte.
f.nb = 0
f.b = 0
// Length then ones-complement of length.
nr, err := io.ReadFull(f.r, f.buf[0:4])
f.roffset += int64(nr)
if err != nil {
f.err = noEOF(err)
return
}
n := int(f.buf[0]) | int(f.buf[1])<<8
nn := int(f.buf[2]) | int(f.buf[3])<<8
if uint16(nn) != uint16(^n) {
f.err = CorruptInputError(f.roffset)
return
}
if n == 0 {
f.toRead = f.dict.readFlush()
f.finishBlock()
return
}
f.copyLen = n
f.copyData()
}
// copyData copies f.copyLen bytes from the underlying reader into f.hist.
// It pauses for reads when f.hist is full.
func (f *decompressor) copyData() {
buf := f.dict.writeSlice()
if len(buf) > f.copyLen {
buf = buf[:f.copyLen]
}
cnt, err := io.ReadFull(f.r, buf)
f.roffset += int64(cnt)
f.copyLen -= cnt
f.dict.writeMark(cnt)
if err != nil {
f.err = noEOF(err)
return
}
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).copyData
return
}
f.finishBlock()
}
func (f *decompressor) finishBlock() {
if f.final {
if f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
f.err = io.EOF
}
f.step = (*decompressor).nextBlock
}
// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
func noEOF(e error) error {
if e == io.EOF {
return io.ErrUnexpectedEOF
}
return e
}
func (f *decompressor) moreBits() error {
c, err := f.r.ReadByte()
if err != nil {
return noEOF(err)
}
f.roffset++
f.b |= uint32(c) << f.nb
f.nb += 8
return nil
}
// Read the next Huffman-encoded symbol from f according to h.
func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(h.min)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
nb, b := f.nb, f.b
for {
for nb < n {
c, err := f.r.ReadByte()
if err != nil {
f.b = b
f.nb = nb
return 0, noEOF(err)
}
f.roffset++
b |= uint32(c) << (nb & 31)
nb += 8
}
chunk := h.chunks[b&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= nb {
if n == 0 {
f.b = b
f.nb = nb
f.err = CorruptInputError(f.roffset)
return 0, f.err
}
f.b = b >> (n & 31)
f.nb = nb - n
return int(chunk >> huffmanValueShift), nil
}
}
}
func makeReader(r io.Reader) Reader {
if rr, ok := r.(Reader); ok {
return rr
}
return bufio.NewReader(r)
}
func fixedHuffmanDecoderInit() {
fixedOnce.Do(func() {
// These come from the RFC section 3.2.6.
var bits [288]int
for i := 0; i < 144; i++ {
bits[i] = 8
}
for i := 144; i < 256; i++ {
bits[i] = 9
}
for i := 256; i < 280; i++ {
bits[i] = 7
}
for i := 280; i < 288; i++ {
bits[i] = 8
}
fixedHuffmanDecoder.init(bits[:])
})
}
func (f *decompressor) Reset(r io.Reader, dict []byte) error {
*f = decompressor{
r: makeReader(r),
bits: f.bits,
codebits: f.codebits,
h1: f.h1,
h2: f.h2,
dict: f.dict,
step: (*decompressor).nextBlock,
}
f.dict.init(maxMatchOffset, dict)
return nil
}
// NewReader returns a new ReadCloser that can be used
// to read the uncompressed version of r.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser
// when finished reading.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReader(r io.Reader) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = (*decompressor).nextBlock
f.dict.init(maxMatchOffset, nil)
return &f
}
// NewReaderDict is like NewReader but initializes the reader
// with a preset dictionary. The returned Reader behaves as if
// the uncompressed data stream started with the given dictionary,
// which has already been read. NewReaderDict is typically used
// to read data compressed by NewWriterDict.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
f.r = makeReader(r)
f.bits = new([maxNumLit + maxNumDist]int)
f.codebits = new([numCodes]int)
f.step = (*decompressor).nextBlock
f.dict.init(maxMatchOffset, dict)
return &f
}

View File

@ -0,0 +1,48 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
var reverseByte = [256]byte{
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
}
func reverseUint16(v uint16) uint16 {
return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8
}
func reverseBits(number uint16, bitLength byte) uint16 {
return reverseUint16(number << uint8(16-bitLength))
}

900
vendor/github.com/klauspost/compress/flate/snappy.go generated vendored Normal file
View File

@ -0,0 +1,900 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Modified for deflate by Klaus Post (c) 2015.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst *tokens, lit []byte) {
ol := int(dst.n)
for i, v := range lit {
dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
}
dst.n += uint16(len(lit))
}
// emitCopy writes a copy chunk and returns the number of bytes written.
func emitCopy(dst *tokens, offset, length int) {
dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize))
dst.n++
}
type snappyEnc interface {
Encode(dst *tokens, src []byte)
Reset()
}
func newSnappy(level int) snappyEnc {
switch level {
case 1:
return &snappyL1{}
case 2:
return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
case 3:
return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}
case 4:
return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}}
default:
panic("invalid level specified")
}
}
const (
tableBits = 14 // Bits used in the table
tableSize = 1 << tableBits // Size of the table
tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks.
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
baseMatchOffset = 1 // The smallest match offset
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
maxMatchOffset = 1 << 15 // The largest match offset
)
func load32(b []byte, i int) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func hash(u uint32) uint32 {
return (u * 0x1e35a7bd) >> tableShift
}
// snappyL1 encapsulates level 1 compression
type snappyL1 struct{}
func (e *snappyL1) Reset() {}
func (e *snappyL1) Encode(dst *tokens, src []byte) {
const (
inputMargin = 16 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
return
}
// Initialize the hash table.
//
// The table element type is uint16, as s < sLimit and sLimit < len(src)
// and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535.
var table [tableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
nextHash := hash(load32(src, s))
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := 32
nextS := s
candidate := 0
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = int(table[nextHash&tableMask])
table[nextHash&tableMask] = uint16(s)
nextHash = hash(load32(src, nextS))
if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) {
break
}
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
emitLiteral(dst, src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
// Extend the 4-byte match as long as possible.
//
// This is an inlined version of Snappy's:
// s = extendMatch(src, candidate+4, s+4)
s += 4
s1 := base + maxMatchLength
if s1 > len(src) {
s1 = len(src)
}
a := src[s:s1]
b := src[candidate+4:]
b = b[:len(a)]
l := len(a)
for i := range a {
if a[i] != b[i] {
l = i
break
}
}
s += l
// matchToken is flate's equivalent of Snappy's emitCopy.
dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset))
dst.n++
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
prevHash := hash(uint32(x >> 0))
table[prevHash&tableMask] = uint16(s - 1)
currHash := hash(uint32(x >> 8))
candidate = int(table[currHash&tableMask])
table[currHash&tableMask] = uint16(s)
if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) {
nextHash = hash(uint32(x >> 16))
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
emitLiteral(dst, src[nextEmit:])
}
}
type tableEntry struct {
val uint32
offset int32
}
func load3232(b []byte, i int32) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load6432(b []byte, i int32) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
// snappyGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type snappyGen struct {
prev []byte
cur int32
}
// snappyGen maintains the table for matches,
// and the previous byte block for level 2.
// This is the generic implementation.
type snappyL2 struct {
snappyGen
table [tableSize]tableEntry
}
// EncodeL2 uses a similar algorithm to level 1, but is capable
// of matching across blocks giving better compression at a small slowdown.
func (e *snappyL2) Encode(dst *tokens, src []byte) {
const (
inputMargin = 8 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
// Protect against e.cur wraparound.
if e.cur > 1<<30 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
e.cur = maxStoreBlockSize
}
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
e.cur += maxStoreBlockSize
e.prev = e.prev[:0]
return
}
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := int32(0)
s := int32(0)
cv := load3232(src, s)
nextHash := hash(cv)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := int32(32)
nextS := s
var candidate tableEntry
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = e.table[nextHash&tableMask]
now := load3232(src, nextS)
e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv}
nextHash = hash(now)
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || cv != candidate.val {
// Out of range or not matched.
cv = now
continue
}
break
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
emitLiteral(dst, src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
//
s += 4
t := candidate.offset - e.cur + 4
l := e.matchlen(s, t, src)
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
dst.n++
s += l
nextEmit = s
if s >= sLimit {
t += l
// Index first pair after match end.
if int(t+4) < len(src) && t > 0 {
cv := load3232(src, t)
e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv}
}
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-1)
prevHash := hash(uint32(x))
e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)}
x >>= 8
currHash := hash(uint32(x))
candidate = e.table[currHash&tableMask]
e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x) != candidate.val {
cv = uint32(x >> 8)
nextHash = hash(cv)
s++
break
}
}
}
emitRemainder:
if int(nextEmit) < len(src) {
emitLiteral(dst, src[nextEmit:])
}
e.cur += int32(len(src))
e.prev = e.prev[:len(src)]
copy(e.prev, src)
}
type tableEntryPrev struct {
Cur tableEntry
Prev tableEntry
}
// snappyL3
type snappyL3 struct {
snappyGen
table [tableSize]tableEntryPrev
}
// Encode uses a similar algorithm to level 2, will check up to two candidates.
func (e *snappyL3) Encode(dst *tokens, src []byte) {
const (
inputMargin = 8 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
// Protect against e.cur wraparound.
if e.cur > 1<<30 {
for i := range e.table[:] {
e.table[i] = tableEntryPrev{}
}
e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
}
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
e.cur += maxStoreBlockSize
e.prev = e.prev[:0]
return
}
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := int32(0)
s := int32(0)
cv := load3232(src, s)
nextHash := hash(cv)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := int32(32)
nextS := s
var candidate tableEntry
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidates := e.table[nextHash&tableMask]
now := load3232(src, nextS)
e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
nextHash = hash(now)
// Check both candidates
candidate = candidates.Cur
if cv == candidate.val {
offset := s - (candidate.offset - e.cur)
if offset <= maxMatchOffset {
break
}
} else {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
if cv == candidate.val {
offset := s - (candidate.offset - e.cur)
if offset <= maxMatchOffset {
break
}
}
}
cv = now
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
emitLiteral(dst, src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
//
s += 4
t := candidate.offset - e.cur + 4
l := e.matchlen(s, t, src)
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
dst.n++
s += l
nextEmit = s
if s >= sLimit {
t += l
// Index first pair after match end.
if int(t+4) < len(src) && t > 0 {
cv := load3232(src, t)
nextHash = hash(cv)
e.table[nextHash&tableMask] = tableEntryPrev{
Prev: e.table[nextHash&tableMask].Cur,
Cur: tableEntry{offset: e.cur + t, val: cv},
}
}
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-3 to s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-3)
prevHash := hash(uint32(x))
e.table[prevHash&tableMask] = tableEntryPrev{
Prev: e.table[prevHash&tableMask].Cur,
Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
}
x >>= 8
prevHash = hash(uint32(x))
e.table[prevHash&tableMask] = tableEntryPrev{
Prev: e.table[prevHash&tableMask].Cur,
Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
}
x >>= 8
prevHash = hash(uint32(x))
e.table[prevHash&tableMask] = tableEntryPrev{
Prev: e.table[prevHash&tableMask].Cur,
Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
}
x >>= 8
currHash := hash(uint32(x))
candidates := e.table[currHash&tableMask]
cv = uint32(x)
e.table[currHash&tableMask] = tableEntryPrev{
Prev: candidates.Cur,
Cur: tableEntry{offset: s + e.cur, val: cv},
}
// Check both candidates
candidate = candidates.Cur
if cv == candidate.val {
offset := s - (candidate.offset - e.cur)
if offset <= maxMatchOffset {
continue
}
} else {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
if cv == candidate.val {
offset := s - (candidate.offset - e.cur)
if offset <= maxMatchOffset {
continue
}
}
}
cv = uint32(x >> 8)
nextHash = hash(cv)
s++
break
}
}
emitRemainder:
if int(nextEmit) < len(src) {
emitLiteral(dst, src[nextEmit:])
}
e.cur += int32(len(src))
e.prev = e.prev[:len(src)]
copy(e.prev, src)
}
// snappyL4
type snappyL4 struct {
snappyL3
}
// Encode uses a similar algorithm to level 3,
// but will check up to two candidates if first isn't long enough.
func (e *snappyL4) Encode(dst *tokens, src []byte) {
const (
inputMargin = 8 - 3
minNonLiteralBlockSize = 1 + 1 + inputMargin
matchLenGood = 12
)
// Protect against e.cur wraparound.
if e.cur > 1<<30 {
for i := range e.table[:] {
e.table[i] = tableEntryPrev{}
}
e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]}
}
// This check isn't in the Snappy implementation, but there, the caller
// instead of the callee handles this case.
if len(src) < minNonLiteralBlockSize {
// We do not fill the token table.
// This will be picked up by caller.
dst.n = uint16(len(src))
e.cur += maxStoreBlockSize
e.prev = e.prev[:0]
return
}
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := int32(0)
s := int32(0)
cv := load3232(src, s)
nextHash := hash(cv)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := int32(32)
nextS := s
var candidate tableEntry
var candidateAlt tableEntry
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidates := e.table[nextHash&tableMask]
now := load3232(src, nextS)
e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}}
nextHash = hash(now)
// Check both candidates
candidate = candidates.Cur
if cv == candidate.val {
offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset {
offset = s - (candidates.Prev.offset - e.cur)
if cv == candidates.Prev.val && offset < maxMatchOffset {
candidateAlt = candidates.Prev
}
break
}
} else {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
if cv == candidate.val {
offset := s - (candidate.offset - e.cur)
if offset < maxMatchOffset {
break
}
}
}
cv = now
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
emitLiteral(dst, src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
// Extend the 4-byte match as long as possible.
//
s += 4
t := candidate.offset - e.cur + 4
l := e.matchlen(s, t, src)
// Try alternative candidate if match length < matchLenGood.
if l < matchLenGood-4 && candidateAlt.offset != 0 {
t2 := candidateAlt.offset - e.cur + 4
l2 := e.matchlen(s, t2, src)
if l2 > l {
l = l2
t = t2
}
}
// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset)
dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset))
dst.n++
s += l
nextEmit = s
if s >= sLimit {
t += l
// Index first pair after match end.
if int(t+4) < len(src) && t > 0 {
cv := load3232(src, t)
nextHash = hash(cv)
e.table[nextHash&tableMask] = tableEntryPrev{
Prev: e.table[nextHash&tableMask].Cur,
Cur: tableEntry{offset: e.cur + t, val: cv},
}
}
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-3 to s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load6432(src, s-3)
prevHash := hash(uint32(x))
e.table[prevHash&tableMask] = tableEntryPrev{
Prev: e.table[prevHash&tableMask].Cur,
Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)},
}
x >>= 8
prevHash = hash(uint32(x))
e.table[prevHash&tableMask] = tableEntryPrev{
Prev: e.table[prevHash&tableMask].Cur,
Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)},
}
x >>= 8
prevHash = hash(uint32(x))
e.table[prevHash&tableMask] = tableEntryPrev{
Prev: e.table[prevHash&tableMask].Cur,
Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)},
}
x >>= 8
currHash := hash(uint32(x))
candidates := e.table[currHash&tableMask]
cv = uint32(x)
e.table[currHash&tableMask] = tableEntryPrev{
Prev: candidates.Cur,
Cur: tableEntry{offset: s + e.cur, val: cv},
}
// Check both candidates
candidate = candidates.Cur
candidateAlt = tableEntry{}
if cv == candidate.val {
offset := s - (candidate.offset - e.cur)
if offset <= maxMatchOffset {
offset = s - (candidates.Prev.offset - e.cur)
if cv == candidates.Prev.val && offset <= maxMatchOffset {
candidateAlt = candidates.Prev
}
continue
}
} else {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
if cv == candidate.val {
offset := s - (candidate.offset - e.cur)
if offset <= maxMatchOffset {
continue
}
}
}
cv = uint32(x >> 8)
nextHash = hash(cv)
s++
break
}
}
emitRemainder:
if int(nextEmit) < len(src) {
emitLiteral(dst, src[nextEmit:])
}
e.cur += int32(len(src))
e.prev = e.prev[:len(src)]
copy(e.prev, src)
}
func (e *snappyGen) matchlen(s, t int32, src []byte) int32 {
s1 := int(s) + maxMatchLength - 4
if s1 > len(src) {
s1 = len(src)
}
// If we are inside the current block
if t >= 0 {
b := src[t:]
a := src[s:s1]
b = b[:len(a)]
// Extend the match to be as long as possible.
for i := range a {
if a[i] != b[i] {
return int32(i)
}
}
return int32(len(a))
}
// We found a match in the previous block.
tp := int32(len(e.prev)) + t
if tp < 0 {
return 0
}
// Extend the match to be as long as possible.
a := src[s:s1]
b := e.prev[tp:]
if len(b) > len(a) {
b = b[:len(a)]
}
a = a[:len(b)]
for i := range b {
if a[i] != b[i] {
return int32(i)
}
}
// If we reached our limit, we matched everything we are
// allowed to in the previous block and we return.
n := int32(len(b))
if int(s+n) == s1 {
return n
}
// Continue looking for more matches in the current block.
a = src[s+n : s1]
b = src[:len(a)]
for i := range a {
if a[i] != b[i] {
return int32(i) + n
}
}
return int32(len(a)) + n
}
// Reset the encoding table.
func (e *snappyGen) Reset() {
e.prev = e.prev[:0]
e.cur += maxMatchOffset
}

115
vendor/github.com/klauspost/compress/flate/token.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flate
import "fmt"
const (
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
// 8 bits: xlength = length - MIN_MATCH_LENGTH
// 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
lengthShift = 22
offsetMask = 1<<lengthShift - 1
typeMask = 3 << 30
literalType = 0 << 30
matchType = 1 << 30
)
// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
// is lengthCodes[length - MIN_MATCH_LENGTH]
var lengthCodes = [...]uint32{
0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 28,
}
var offsetCodes = [...]uint32{
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
}
type token uint32
type tokens struct {
tokens [maxStoreBlockSize + 1]token
n uint16 // Must be able to contain maxStoreBlockSize
}
// Convert a literal into a literal token.
func literalToken(literal uint32) token { return token(literalType + literal) }
// Convert a < xlength, xoffset > pair into a match token.
func matchToken(xlength uint32, xoffset uint32) token {
return token(matchType + xlength<<lengthShift + xoffset)
}
func matchTokend(xlength uint32, xoffset uint32) token {
if xlength > maxMatchLength || xoffset > maxMatchOffset {
panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset))
return token(matchType)
}
return token(matchType + xlength<<lengthShift + xoffset)
}
// Returns the type of a token
func (t token) typ() uint32 { return uint32(t) & typeMask }
// Returns the literal of a literal token
func (t token) literal() uint32 { return uint32(t - literalType) }
// Returns the extra offset of a match token
func (t token) offset() uint32 { return uint32(t) & offsetMask }
func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
func lengthCode(len uint32) uint32 { return lengthCodes[len] }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
if off < uint32(len(offsetCodes)) {
return offsetCodes[off]
} else if off>>7 < uint32(len(offsetCodes)) {
return offsetCodes[off>>7] + 14
} else {
return offsetCodes[off>>14] + 28
}
}

344
vendor/github.com/klauspost/compress/gzip/gunzip.go generated vendored Normal file
View File

@ -0,0 +1,344 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gzip implements reading and writing of gzip format compressed files,
// as specified in RFC 1952.
package gzip
import (
"bufio"
"encoding/binary"
"errors"
"hash/crc32"
"io"
"time"
"github.com/klauspost/compress/flate"
)
const (
gzipID1 = 0x1f
gzipID2 = 0x8b
gzipDeflate = 8
flagText = 1 << 0
flagHdrCrc = 1 << 1
flagExtra = 1 << 2
flagName = 1 << 3
flagComment = 1 << 4
)
var (
// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
ErrChecksum = errors.New("gzip: invalid checksum")
// ErrHeader is returned when reading GZIP data that has an invalid header.
ErrHeader = errors.New("gzip: invalid header")
)
var le = binary.LittleEndian
// noEOF converts io.EOF to io.ErrUnexpectedEOF.
func noEOF(err error) error {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
// The gzip file stores a header giving metadata about the compressed file.
// That header is exposed as the fields of the Writer and Reader structs.
//
// Strings must be UTF-8 encoded and may only contain Unicode code points
// U+0001 through U+00FF, due to limitations of the GZIP file format.
type Header struct {
Comment string // comment
Extra []byte // "extra data"
ModTime time.Time // modification time
Name string // file name
OS byte // operating system type
}
// A Reader is an io.Reader that can be read to retrieve
// uncompressed data from a gzip-format compressed file.
//
// In general, a gzip file can be a concatenation of gzip files,
// each with its own header. Reads from the Reader
// return the concatenation of the uncompressed data of each.
// Only the first header is recorded in the Reader fields.
//
// Gzip files store a length and checksum of the uncompressed data.
// The Reader will return a ErrChecksum when Read
// reaches the end of the uncompressed data if it does not
// have the expected length or checksum. Clients should treat data
// returned by Read as tentative until they receive the io.EOF
// marking the end of the data.
type Reader struct {
Header // valid after NewReader or Reader.Reset
r flate.Reader
decompressor io.ReadCloser
digest uint32 // CRC-32, IEEE polynomial (section 8)
size uint32 // Uncompressed size (section 2.3.1)
buf [512]byte
err error
multistream bool
}
// NewReader creates a new Reader reading the given reader.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
//
// It is the caller's responsibility to call Close on the Reader when done.
//
// The Reader.Header fields will be valid in the Reader returned.
func NewReader(r io.Reader) (*Reader, error) {
z := new(Reader)
if err := z.Reset(r); err != nil {
return nil, err
}
return z, nil
}
// Reset discards the Reader z's state and makes it equivalent to the
// result of its original state from NewReader, but reading from r instead.
// This permits reusing a Reader rather than allocating a new one.
func (z *Reader) Reset(r io.Reader) error {
*z = Reader{
decompressor: z.decompressor,
multistream: true,
}
if rr, ok := r.(flate.Reader); ok {
z.r = rr
} else {
z.r = bufio.NewReader(r)
}
z.Header, z.err = z.readHeader()
return z.err
}
// Multistream controls whether the reader supports multistream files.
//
// If enabled (the default), the Reader expects the input to be a sequence
// of individually gzipped data streams, each with its own header and
// trailer, ending at EOF. The effect is that the concatenation of a sequence
// of gzipped files is treated as equivalent to the gzip of the concatenation
// of the sequence. This is standard behavior for gzip readers.
//
// Calling Multistream(false) disables this behavior; disabling the behavior
// can be useful when reading file formats that distinguish individual gzip
// data streams or mix gzip data streams with other data streams.
// In this mode, when the Reader reaches the end of the data stream,
// Read returns io.EOF. If the underlying reader implements io.ByteReader,
// it will be left positioned just after the gzip stream.
// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
// If there is no next stream, z.Reset(r) will return io.EOF.
func (z *Reader) Multistream(ok bool) {
z.multistream = ok
}
// readString reads a NUL-terminated string from z.r.
// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
// will output a string encoded using UTF-8.
// This method always updates z.digest with the data read.
func (z *Reader) readString() (string, error) {
var err error
needConv := false
for i := 0; ; i++ {
if i >= len(z.buf) {
return "", ErrHeader
}
z.buf[i], err = z.r.ReadByte()
if err != nil {
return "", err
}
if z.buf[i] > 0x7f {
needConv = true
}
if z.buf[i] == 0 {
// Digest covers the NUL terminator.
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
if needConv {
s := make([]rune, 0, i)
for _, v := range z.buf[:i] {
s = append(s, rune(v))
}
return string(s), nil
}
return string(z.buf[:i]), nil
}
}
}
// readHeader reads the GZIP header according to section 2.3.1.
// This method does not set z.err.
func (z *Reader) readHeader() (hdr Header, err error) {
if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
// RFC 1952, section 2.2, says the following:
// A gzip file consists of a series of "members" (compressed data sets).
//
// Other than this, the specification does not clarify whether a
// "series" is defined as "one or more" or "zero or more". To err on the
// side of caution, Go interprets this to mean "zero or more".
// Thus, it is okay to return io.EOF here.
return hdr, err
}
if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
return hdr, ErrHeader
}
flg := z.buf[3]
hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
// z.buf[8] is XFL and is currently ignored.
hdr.OS = z.buf[9]
z.digest = crc32.ChecksumIEEE(z.buf[:10])
if flg&flagExtra != 0 {
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
return hdr, noEOF(err)
}
z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
data := make([]byte, le.Uint16(z.buf[:2]))
if _, err = io.ReadFull(z.r, data); err != nil {
return hdr, noEOF(err)
}
z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
hdr.Extra = data
}
var s string
if flg&flagName != 0 {
if s, err = z.readString(); err != nil {
return hdr, err
}
hdr.Name = s
}
if flg&flagComment != 0 {
if s, err = z.readString(); err != nil {
return hdr, err
}
hdr.Comment = s
}
if flg&flagHdrCrc != 0 {
if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
return hdr, noEOF(err)
}
digest := le.Uint16(z.buf[:2])
if digest != uint16(z.digest) {
return hdr, ErrHeader
}
}
z.digest = 0
if z.decompressor == nil {
z.decompressor = flate.NewReader(z.r)
} else {
z.decompressor.(flate.Resetter).Reset(z.r, nil)
}
return hdr, nil
}
// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
func (z *Reader) Read(p []byte) (n int, err error) {
if z.err != nil {
return 0, z.err
}
n, z.err = z.decompressor.Read(p)
z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
z.size += uint32(n)
if z.err != io.EOF {
// In the normal case we return here.
return n, z.err
}
// Finished file; check checksum and size.
if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
z.err = noEOF(err)
return n, z.err
}
digest := le.Uint32(z.buf[:4])
size := le.Uint32(z.buf[4:8])
if digest != z.digest || size != z.size {
z.err = ErrChecksum
return n, z.err
}
z.digest, z.size = 0, 0
// File is ok; check if there is another.
if !z.multistream {
return n, io.EOF
}
z.err = nil // Remove io.EOF
if _, z.err = z.readHeader(); z.err != nil {
return n, z.err
}
// Read from next file, if necessary.
if n > 0 {
return n, nil
}
return z.Read(p)
}
// Support the io.WriteTo interface for io.Copy and friends.
func (z *Reader) WriteTo(w io.Writer) (int64, error) {
total := int64(0)
crcWriter := crc32.NewIEEE()
for {
if z.err != nil {
if z.err == io.EOF {
return total, nil
}
return total, z.err
}
// We write both to output and digest.
mw := io.MultiWriter(w, crcWriter)
n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
total += n
z.size += uint32(n)
if err != nil {
z.err = err
return total, z.err
}
// Finished file; check checksum + size.
if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
z.err = err
return total, err
}
z.digest = crcWriter.Sum32()
digest := le.Uint32(z.buf[:4])
size := le.Uint32(z.buf[4:8])
if digest != z.digest || size != z.size {
z.err = ErrChecksum
return total, z.err
}
z.digest, z.size = 0, 0
// File is ok; check if there is another.
if !z.multistream {
return total, nil
}
crcWriter.Reset()
z.err = nil // Remove io.EOF
if _, z.err = z.readHeader(); z.err != nil {
if z.err == io.EOF {
return total, nil
}
return total, z.err
}
}
}
// Close closes the Reader. It does not close the underlying io.Reader.
// In order for the GZIP checksum to be verified, the reader must be
// fully consumed until the io.EOF.
func (z *Reader) Close() error { return z.decompressor.Close() }

251
vendor/github.com/klauspost/compress/gzip/gzip.go generated vendored Normal file
View File

@ -0,0 +1,251 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gzip
import (
"errors"
"fmt"
"hash/crc32"
"io"
"github.com/klauspost/compress/flate"
)
// These constants are copied from the flate package, so that code that imports
// "compress/gzip" does not also have to import "compress/flate".
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
ConstantCompression = flate.ConstantCompression
HuffmanOnly = flate.HuffmanOnly
)
// A Writer is an io.WriteCloser.
// Writes to a Writer are compressed and written to w.
type Writer struct {
Header // written at first call to Write, Flush, or Close
w io.Writer
level int
wroteHeader bool
compressor *flate.Writer
digest uint32 // CRC-32, IEEE polynomial (section 8)
size uint32 // Uncompressed size (section 2.3.1)
closed bool
buf [10]byte
err error
}
// NewWriter returns a new Writer.
// Writes to the returned writer are compressed and written to w.
//
// It is the caller's responsibility to call Close on the WriteCloser when done.
// Writes may be buffered and not flushed until Close.
//
// Callers that wish to set the fields in Writer.Header must do so before
// the first call to Write, Flush, or Close.
func NewWriter(w io.Writer) *Writer {
z, _ := NewWriterLevel(w, DefaultCompression)
return z
}
// NewWriterLevel is like NewWriter but specifies the compression level instead
// of assuming DefaultCompression.
//
// The compression level can be DefaultCompression, NoCompression, or any
// integer value between BestSpeed and BestCompression inclusive. The error
// returned will be nil if the level is valid.
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
if level < HuffmanOnly || level > BestCompression {
return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
}
z := new(Writer)
z.init(w, level)
return z, nil
}
func (z *Writer) init(w io.Writer, level int) {
compressor := z.compressor
if compressor != nil {
compressor.Reset(w)
}
*z = Writer{
Header: Header{
OS: 255, // unknown
},
w: w,
level: level,
compressor: compressor,
}
}
// Reset discards the Writer z's state and makes it equivalent to the
// result of its original state from NewWriter or NewWriterLevel, but
// writing to w instead. This permits reusing a Writer rather than
// allocating a new one.
func (z *Writer) Reset(w io.Writer) {
z.init(w, z.level)
}
// writeBytes writes a length-prefixed byte slice to z.w.
func (z *Writer) writeBytes(b []byte) error {
if len(b) > 0xffff {
return errors.New("gzip.Write: Extra data is too large")
}
le.PutUint16(z.buf[:2], uint16(len(b)))
_, err := z.w.Write(z.buf[:2])
if err != nil {
return err
}
_, err = z.w.Write(b)
return err
}
// writeString writes a UTF-8 string s in GZIP's format to z.w.
// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
func (z *Writer) writeString(s string) (err error) {
// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
needconv := false
for _, v := range s {
if v == 0 || v > 0xff {
return errors.New("gzip.Write: non-Latin-1 header string")
}
if v > 0x7f {
needconv = true
}
}
if needconv {
b := make([]byte, 0, len(s))
for _, v := range s {
b = append(b, byte(v))
}
_, err = z.w.Write(b)
} else {
_, err = io.WriteString(z.w, s)
}
if err != nil {
return err
}
// GZIP strings are NUL-terminated.
z.buf[0] = 0
_, err = z.w.Write(z.buf[:1])
return err
}
// Write writes a compressed form of p to the underlying io.Writer. The
// compressed bytes are not necessarily flushed until the Writer is closed.
func (z *Writer) Write(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
var n int
// Write the GZIP header lazily.
if !z.wroteHeader {
z.wroteHeader = true
z.buf[0] = gzipID1
z.buf[1] = gzipID2
z.buf[2] = gzipDeflate
z.buf[3] = 0
if z.Extra != nil {
z.buf[3] |= 0x04
}
if z.Name != "" {
z.buf[3] |= 0x08
}
if z.Comment != "" {
z.buf[3] |= 0x10
}
le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
if z.level == BestCompression {
z.buf[8] = 2
} else if z.level == BestSpeed {
z.buf[8] = 4
} else {
z.buf[8] = 0
}
z.buf[9] = z.OS
n, z.err = z.w.Write(z.buf[:10])
if z.err != nil {
return n, z.err
}
if z.Extra != nil {
z.err = z.writeBytes(z.Extra)
if z.err != nil {
return n, z.err
}
}
if z.Name != "" {
z.err = z.writeString(z.Name)
if z.err != nil {
return n, z.err
}
}
if z.Comment != "" {
z.err = z.writeString(z.Comment)
if z.err != nil {
return n, z.err
}
}
if z.compressor == nil {
z.compressor, _ = flate.NewWriter(z.w, z.level)
}
}
z.size += uint32(len(p))
z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
n, z.err = z.compressor.Write(p)
return n, z.err
}
// Flush flushes any pending compressed data to the underlying writer.
//
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet. Flush does
// not return until the data has been written. If the underlying
// writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func (z *Writer) Flush() error {
if z.err != nil {
return z.err
}
if z.closed {
return nil
}
if !z.wroteHeader {
z.Write(nil)
if z.err != nil {
return z.err
}
}
z.err = z.compressor.Flush()
return z.err
}
// Close closes the Writer, flushing any unwritten data to the underlying
// io.Writer, but does not close the underlying io.Writer.
func (z *Writer) Close() error {
if z.err != nil {
return z.err
}
if z.closed {
return nil
}
z.closed = true
if !z.wroteHeader {
z.Write(nil)
if z.err != nil {
return z.err
}
}
z.err = z.compressor.Close()
if z.err != nil {
return z.err
}
le.PutUint32(z.buf[:4], z.digest)
le.PutUint32(z.buf[4:8], z.size)
_, z.err = z.w.Write(z.buf[:8])
return z.err
}

15
vendor/github.com/klauspost/compress/snappy/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,15 @@
# This is the official list of Snappy-Go authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

View File

@ -0,0 +1,37 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the Snappy-Go repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# http://code.google.com/legal/individual-cla-v1.0.html
# http://code.google.com/legal/corporate-cla-v1.0.html
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Jan Mercl <0xjnml@gmail.com>
Kai Backman <kaib@golang.org>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

27
vendor/github.com/klauspost/compress/snappy/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

183
vendor/github.com/klauspost/compress/zlib/reader.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package zlib implements reading and writing of zlib format compressed data,
as specified in RFC 1950.
The implementation provides filters that uncompress during reading
and compress during writing. For example, to write compressed data
to a buffer:
var b bytes.Buffer
w := zlib.NewWriter(&b)
w.Write([]byte("hello, world\n"))
w.Close()
and to read that data back:
r, err := zlib.NewReader(&b)
io.Copy(os.Stdout, r)
r.Close()
*/
package zlib
import (
"bufio"
"errors"
"hash"
"hash/adler32"
"io"
"github.com/klauspost/compress/flate"
)
const zlibDeflate = 8
var (
// ErrChecksum is returned when reading ZLIB data that has an invalid checksum.
ErrChecksum = errors.New("zlib: invalid checksum")
// ErrDictionary is returned when reading ZLIB data that has an invalid dictionary.
ErrDictionary = errors.New("zlib: invalid dictionary")
// ErrHeader is returned when reading ZLIB data that has an invalid header.
ErrHeader = errors.New("zlib: invalid header")
)
type reader struct {
r flate.Reader
decompressor io.ReadCloser
digest hash.Hash32
err error
scratch [4]byte
}
// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
// to switch to a new underlying Reader. This permits reusing a ReadCloser
// instead of allocating a new one.
type Resetter interface {
// Reset discards any buffered data and resets the Resetter as if it was
// newly initialized with the given reader.
Reset(r io.Reader, dict []byte) error
}
// NewReader creates a new ReadCloser.
// Reads from the returned ReadCloser read and decompress data from r.
// If r does not implement io.ByteReader, the decompressor may read more
// data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser when done.
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReader(r io.Reader) (io.ReadCloser, error) {
return NewReaderDict(r, nil)
}
// NewReaderDict is like NewReader but uses a preset dictionary.
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary.
//
// The ReadCloser returned by NewReaderDict also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
z := new(reader)
err := z.Reset(r, dict)
if err != nil {
return nil, err
}
return z, nil
}
func (z *reader) Read(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
var n int
n, z.err = z.decompressor.Read(p)
z.digest.Write(p[0:n])
if z.err != io.EOF {
// In the normal case we return here.
return n, z.err
}
// Finished file; check checksum.
if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
z.err = err
return n, z.err
}
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
if checksum != z.digest.Sum32() {
z.err = ErrChecksum
return n, z.err
}
return n, io.EOF
}
// Calling Close does not close the wrapped io.Reader originally passed to NewReader.
// In order for the ZLIB checksum to be verified, the reader must be
// fully consumed until the io.EOF.
func (z *reader) Close() error {
if z.err != nil && z.err != io.EOF {
return z.err
}
z.err = z.decompressor.Close()
return z.err
}
func (z *reader) Reset(r io.Reader, dict []byte) error {
*z = reader{decompressor: z.decompressor, digest: z.digest}
if fr, ok := r.(flate.Reader); ok {
z.r = fr
} else {
z.r = bufio.NewReader(r)
}
// Read the header (RFC 1950 section 2.2.).
_, z.err = io.ReadFull(z.r, z.scratch[0:2])
if z.err != nil {
if z.err == io.EOF {
z.err = io.ErrUnexpectedEOF
}
return z.err
}
h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
z.err = ErrHeader
return z.err
}
haveDict := z.scratch[1]&0x20 != 0
if haveDict {
_, z.err = io.ReadFull(z.r, z.scratch[0:4])
if z.err != nil {
if z.err == io.EOF {
z.err = io.ErrUnexpectedEOF
}
return z.err
}
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
if checksum != adler32.Checksum(dict) {
z.err = ErrDictionary
return z.err
}
}
if z.decompressor == nil {
if haveDict {
z.decompressor = flate.NewReaderDict(z.r, dict)
} else {
z.decompressor = flate.NewReader(z.r)
}
} else {
z.decompressor.(flate.Resetter).Reset(z.r, dict)
}
if z.digest != nil {
z.digest.Reset()
} else {
z.digest = adler32.New()
}
return nil
}

201
vendor/github.com/klauspost/compress/zlib/writer.go generated vendored Normal file
View File

@ -0,0 +1,201 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package zlib
import (
"fmt"
"hash"
"hash/adler32"
"io"
"github.com/klauspost/compress/flate"
)
// These constants are copied from the flate package, so that code that imports
// "compress/zlib" does not also have to import "compress/flate".
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
ConstantCompression = flate.ConstantCompression
HuffmanOnly = flate.HuffmanOnly
)
// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see NewWriter).
type Writer struct {
w io.Writer
level int
dict []byte
compressor *flate.Writer
digest hash.Hash32
err error
scratch [4]byte
wroteHeader bool
}
// NewWriter creates a new Writer.
// Writes to the returned Writer are compressed and written to w.
//
// It is the caller's responsibility to call Close on the WriteCloser when done.
// Writes may be buffered and not flushed until Close.
func NewWriter(w io.Writer) *Writer {
z, _ := NewWriterLevelDict(w, DefaultCompression, nil)
return z
}
// NewWriterLevel is like NewWriter but specifies the compression level instead
// of assuming DefaultCompression.
//
// The compression level can be DefaultCompression, NoCompression, HuffmanOnly
// or any integer value between BestSpeed and BestCompression inclusive.
// The error returned will be nil if the level is valid.
func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
return NewWriterLevelDict(w, level, nil)
}
// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to
// compress with.
//
// The dictionary may be nil. If not, its contents should not be modified until
// the Writer is closed.
func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) {
if level < HuffmanOnly || level > BestCompression {
return nil, fmt.Errorf("zlib: invalid compression level: %d", level)
}
return &Writer{
w: w,
level: level,
dict: dict,
}, nil
}
// Reset clears the state of the Writer z such that it is equivalent to its
// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing
// to w.
func (z *Writer) Reset(w io.Writer) {
z.w = w
// z.level and z.dict left unchanged.
if z.compressor != nil {
z.compressor.Reset(w)
}
if z.digest != nil {
z.digest.Reset()
}
z.err = nil
z.scratch = [4]byte{}
z.wroteHeader = false
}
// writeHeader writes the ZLIB header.
func (z *Writer) writeHeader() (err error) {
z.wroteHeader = true
// ZLIB has a two-byte header (as documented in RFC 1950).
// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size.
// The next four bits is the CM (compression method), which is 8 for deflate.
z.scratch[0] = 0x78
// The next two bits is the FLEVEL (compression level). The four values are:
// 0=fastest, 1=fast, 2=default, 3=best.
// The next bit, FDICT, is set if a dictionary is given.
// The final five FCHECK bits form a mod-31 checksum.
switch z.level {
case -2, 0, 1:
z.scratch[1] = 0 << 6
case 2, 3, 4, 5:
z.scratch[1] = 1 << 6
case 6, -1:
z.scratch[1] = 2 << 6
case 7, 8, 9:
z.scratch[1] = 3 << 6
default:
panic("unreachable")
}
if z.dict != nil {
z.scratch[1] |= 1 << 5
}
z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31)
if _, err = z.w.Write(z.scratch[0:2]); err != nil {
return err
}
if z.dict != nil {
// The next four bytes are the Adler-32 checksum of the dictionary.
checksum := adler32.Checksum(z.dict)
z.scratch[0] = uint8(checksum >> 24)
z.scratch[1] = uint8(checksum >> 16)
z.scratch[2] = uint8(checksum >> 8)
z.scratch[3] = uint8(checksum >> 0)
if _, err = z.w.Write(z.scratch[0:4]); err != nil {
return err
}
}
if z.compressor == nil {
// Initialize deflater unless the Writer is being reused
// after a Reset call.
z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict)
if err != nil {
return err
}
z.digest = adler32.New()
}
return nil
}
// Write writes a compressed form of p to the underlying io.Writer. The
// compressed bytes are not necessarily flushed until the Writer is closed or
// explicitly flushed.
func (z *Writer) Write(p []byte) (n int, err error) {
if !z.wroteHeader {
z.err = z.writeHeader()
}
if z.err != nil {
return 0, z.err
}
if len(p) == 0 {
return 0, nil
}
n, err = z.compressor.Write(p)
if err != nil {
z.err = err
return
}
z.digest.Write(p)
return
}
// Flush flushes the Writer to its underlying io.Writer.
func (z *Writer) Flush() error {
if !z.wroteHeader {
z.err = z.writeHeader()
}
if z.err != nil {
return z.err
}
z.err = z.compressor.Flush()
return z.err
}
// Close closes the Writer, flushing any unwritten data to the underlying
// io.Writer, but does not close the underlying io.Writer.
func (z *Writer) Close() error {
if !z.wroteHeader {
z.err = z.writeHeader()
}
if z.err != nil {
return z.err
}
z.err = z.compressor.Close()
if z.err != nil {
return z.err
}
checksum := z.digest.Sum32()
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
z.scratch[0] = uint8(checksum >> 24)
z.scratch[1] = uint8(checksum >> 16)
z.scratch[2] = uint8(checksum >> 8)
z.scratch[3] = uint8(checksum >> 0)
_, z.err = z.w.Write(z.scratch[0:4])
return z.err
}

24
vendor/github.com/klauspost/cpuid/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

24
vendor/github.com/klauspost/cpuid/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,24 @@
language: go
sudo: false
os:
- linux
- osx
go:
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- master
script:
- go vet ./...
- go test -v ./...
- go test -race ./...
- diff <(gofmt -d .) <("")
matrix:
allow_failures:
- go: 'master'
fast_finish: true

22
vendor/github.com/klauspost/cpuid/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

145
vendor/github.com/klauspost/cpuid/README.md generated vendored Normal file
View File

@ -0,0 +1,145 @@
# cpuid
Package cpuid provides information about the CPU running the current program.
CPU features are detected on startup, and kept for fast access through the life of the application.
Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
You can access the CPU information by accessing the shared CPU variable of the cpuid library.
Package home: https://github.com/klauspost/cpuid
[![GoDoc][1]][2] [![Build Status][3]][4]
[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
[2]: https://godoc.org/github.com/klauspost/cpuid
[3]: https://travis-ci.org/klauspost/cpuid.svg
[4]: https://travis-ci.org/klauspost/cpuid
# features
## CPU Instructions
* **CMOV** (i686 CMOV)
* **NX** (NX (No-Execute) bit)
* **AMD3DNOW** (AMD 3DNOW)
* **AMD3DNOWEXT** (AMD 3DNowExt)
* **MMX** (standard MMX)
* **MMXEXT** (SSE integer functions or AMD MMX ext)
* **SSE** (SSE functions)
* **SSE2** (P4 SSE functions)
* **SSE3** (Prescott SSE3 functions)
* **SSSE3** (Conroe SSSE3 functions)
* **SSE4** (Penryn SSE4.1 functions)
* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
* **SSE42** (Nehalem SSE4.2 functions)
* **AVX** (AVX functions)
* **AVX2** (AVX2 functions)
* **FMA3** (Intel FMA 3)
* **FMA4** (Bulldozer FMA4 functions)
* **XOP** (Bulldozer XOP functions)
* **F16C** (Half-precision floating-point conversion)
* **BMI1** (Bit Manipulation Instruction Set 1)
* **BMI2** (Bit Manipulation Instruction Set 2)
* **TBM** (AMD Trailing Bit Manipulation)
* **LZCNT** (LZCNT instruction)
* **POPCNT** (POPCNT instruction)
* **AESNI** (Advanced Encryption Standard New Instructions)
* **CLMUL** (Carry-less Multiplication)
* **HTT** (Hyperthreading (enabled))
* **HLE** (Hardware Lock Elision)
* **RTM** (Restricted Transactional Memory)
* **RDRAND** (RDRAND instruction is available)
* **RDSEED** (RDSEED instruction is available)
* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
* **SHA** (Intel SHA Extensions)
* **AVX512F** (AVX-512 Foundation)
* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
* **AVX512PF** (AVX-512 Prefetch Instructions)
* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
* **AVX512CD** (AVX-512 Conflict Detection Instructions)
* **AVX512BW** (AVX-512 Byte and Word Instructions)
* **AVX512VL** (AVX-512 Vector Length Extensions)
* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
* **MPX** (Intel MPX (Memory Protection Extensions))
* **ERMS** (Enhanced REP MOVSB/STOSB)
* **RDTSCP** (RDTSCP Instruction)
* **CX16** (CMPXCHG16B Instruction)
* **SGX** (Software Guard Extensions, with activation details)
## Performance
* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
* **SSE2SLOW** (SSE2 is supported, but usually not faster)
* **SSE3SLOW** (SSE3 is supported, but usually not faster)
* **ATOM** (Atom processor, some SSSE3 instructions are slower)
* **Cache line** (Probable size of a cache line).
* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
## Cpu Vendor/VM
* **Intel**
* **AMD**
* **VIA**
* **Transmeta**
* **NSC**
* **KVM** (Kernel-based Virtual Machine)
* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
* **VMware**
* **XenHVM**
# installing
```go get github.com/klauspost/cpuid```
# example
```Go
package main
import (
"fmt"
"github.com/klauspost/cpuid"
)
func main() {
// Print basic CPU information:
fmt.Println("Name:", cpuid.CPU.BrandName)
fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
fmt.Println("Features:", cpuid.CPU.Features)
fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
// Test if we have a specific feature:
if cpuid.CPU.SSE() {
fmt.Println("We have Streaming SIMD Extensions")
}
}
```
Sample output:
```
>go run main.go
Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
PhysicalCores: 2
ThreadsPerCore: 2
LogicalCores: 4
Family 6 Model: 42
Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
Cacheline bytes: 64
We have Streaming SIMD Extensions
```
# private package
In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
For this purpose all exports are removed, and functions and constants are lowercased.
This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
# license
This code is published under an MIT license. See LICENSE file for more information.

1030
vendor/github.com/klauspost/cpuid/cpuid.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

42
vendor/github.com/klauspost/cpuid/cpuid_386.s generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORL CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+4(FP)
MOVL BX, ebx+8(FP)
MOVL CX, ecx+12(FP)
MOVL DX, edx+16(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+4(FP)
MOVL DX, edx+8(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET

42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build amd64,!gccgo
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORQ CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmXgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+8(FP)
MOVL DX, edx+12(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET

17
vendor/github.com/klauspost/cpuid/detect_intel.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo amd64,!gccgo
package cpuid
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
func asmXgetbv(index uint32) (eax, edx uint32)
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
func initCPU() {
cpuid = asmCpuid
cpuidex = asmCpuidex
xgetbv = asmXgetbv
rdtscpAsm = asmRdtscpAsm
}

23
vendor/github.com/klauspost/cpuid/detect_ref.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build !amd64,!386 gccgo
package cpuid
func initCPU() {
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
xgetbv = func(index uint32) (eax, edx uint32) {
return 0, 0
}
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
}

4
vendor/github.com/klauspost/cpuid/generate.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
package cpuid
//go:generate go run private-gen.go
//go:generate gofmt -w ./private

476
vendor/github.com/klauspost/cpuid/private-gen.go generated vendored Normal file
View File

@ -0,0 +1,476 @@
// +build ignore
package main
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var inFiles = []string{"cpuid.go", "cpuid_test.go"}
var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
var fileSet = token.NewFileSet()
var reWrites = []rewrite{
initRewrite("CPUInfo -> cpuInfo"),
initRewrite("Vendor -> vendor"),
initRewrite("Flags -> flags"),
initRewrite("Detect -> detect"),
initRewrite("CPU -> cpu"),
}
var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
// cpuid_test.go
"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
}
var excludePrefixes = []string{"test", "benchmark"}
func main() {
Package := "private"
parserMode := parser.ParseComments
exported := make(map[string]rewrite)
for _, file := range inFiles {
in, err := os.Open(file)
if err != nil {
log.Fatalf("opening input", err)
}
src, err := ioutil.ReadAll(in)
if err != nil {
log.Fatalf("reading input", err)
}
astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
if err != nil {
log.Fatalf("parsing input", err)
}
for _, rw := range reWrites {
astfile = rw(astfile)
}
// Inspect the AST and print all identifiers and literals.
var startDecl token.Pos
var endDecl token.Pos
ast.Inspect(astfile, func(n ast.Node) bool {
var s string
switch x := n.(type) {
case *ast.Ident:
if x.IsExported() {
t := strings.ToLower(x.Name)
for _, pre := range excludePrefixes {
if strings.HasPrefix(t, pre) {
return true
}
}
if excludeNames[t] != true {
//if x.Pos() > startDecl && x.Pos() < endDecl {
exported[x.Name] = initRewrite(x.Name + " -> " + t)
}
}
case *ast.GenDecl:
if x.Tok == token.CONST && x.Lparen > 0 {
startDecl = x.Lparen
endDecl = x.Rparen
// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
}
}
if s != "" {
fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
}
return true
})
for _, rw := range exported {
astfile = rw(astfile)
}
var buf bytes.Buffer
printer.Fprint(&buf, fileSet, astfile)
// Remove package documentation and insert information
s := buf.String()
ind := strings.Index(buf.String(), "\npackage cpuid")
s = s[ind:]
s = "// Generated, DO NOT EDIT,\n" +
"// but copy it to your own project and rename the package.\n" +
"// See more at http://github.com/klauspost/cpuid\n" +
s
outputName := Package + string(os.PathSeparator) + file
err = ioutil.WriteFile(outputName, []byte(s), 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
log.Println("Generated", outputName)
}
for _, file := range copyFiles {
dst := ""
if strings.HasPrefix(file, "cpuid") {
dst = Package + string(os.PathSeparator) + file
} else {
dst = Package + string(os.PathSeparator) + "cpuid_" + file
}
err := copyFile(file, dst)
if err != nil {
log.Fatalf("copying file: %s", err)
}
log.Println("Copied", dst)
}
}
// CopyFile copies a file from src to dst. If src and dst files exist, and are
// the same, then return success. Copy the file contents from src to dst.
func copyFile(src, dst string) (err error) {
sfi, err := os.Stat(src)
if err != nil {
return
}
if !sfi.Mode().IsRegular() {
// cannot copy non-regular files (e.g., directories,
// symlinks, devices, etc.)
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
}
dfi, err := os.Stat(dst)
if err != nil {
if !os.IsNotExist(err) {
return
}
} else {
if !(dfi.Mode().IsRegular()) {
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
}
if os.SameFile(sfi, dfi) {
return
}
}
err = copyFileContents(src, dst)
return
}
// copyFileContents copies the contents of the file named src to the file named
// by dst. The file will be created if it does not already exist. If the
// destination file exists, all it's contents will be replaced by the contents
// of the source file.
func copyFileContents(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
cerr := out.Close()
if err == nil {
err = cerr
}
}()
if _, err = io.Copy(out, in); err != nil {
return
}
err = out.Sync()
return
}
type rewrite func(*ast.File) *ast.File
// Mostly copied from gofmt
func initRewrite(rewriteRule string) rewrite {
f := strings.Split(rewriteRule, "->")
if len(f) != 2 {
fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
os.Exit(2)
}
pattern := parseExpr(f[0], "pattern")
replace := parseExpr(f[1], "replacement")
return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
}
// parseExpr parses s as an expression.
// It might make sense to expand this to allow statement patterns,
// but there are problems with preserving formatting and also
// with what a wildcard for a statement looks like.
func parseExpr(s, what string) ast.Expr {
x, err := parser.ParseExpr(s)
if err != nil {
fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
os.Exit(2)
}
return x
}
// Keep this function for debugging.
/*
func dump(msg string, val reflect.Value) {
fmt.Printf("%s:\n", msg)
ast.Print(fileSet, val.Interface())
fmt.Println()
}
*/
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
cmap := ast.NewCommentMap(fileSet, p, p.Comments)
m := make(map[string]reflect.Value)
pat := reflect.ValueOf(pattern)
repl := reflect.ValueOf(replace)
var rewriteVal func(val reflect.Value) reflect.Value
rewriteVal = func(val reflect.Value) reflect.Value {
// don't bother if val is invalid to start with
if !val.IsValid() {
return reflect.Value{}
}
for k := range m {
delete(m, k)
}
val = apply(rewriteVal, val)
if match(m, pat, val) {
val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
}
return val
}
r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
r.Comments = cmap.Filter(r).Comments() // recreate comments list
return r
}
// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
func set(x, y reflect.Value) {
// don't bother if x cannot be set or y is invalid
if !x.CanSet() || !y.IsValid() {
return
}
defer func() {
if x := recover(); x != nil {
if s, ok := x.(string); ok &&
(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
// x cannot be set to y - ignore this rewrite
return
}
panic(x)
}
}()
x.Set(y)
}
// Values/types for special cases.
var (
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
identType = reflect.TypeOf((*ast.Ident)(nil))
objectPtrType = reflect.TypeOf((*ast.Object)(nil))
positionType = reflect.TypeOf(token.NoPos)
callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
)
// apply replaces each AST field x in val with f(x), returning val.
// To avoid extra conversions, f operates on the reflect.Value form.
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
if !val.IsValid() {
return reflect.Value{}
}
// *ast.Objects introduce cycles and are likely incorrect after
// rewrite; don't follow them but replace with nil instead
if val.Type() == objectPtrType {
return objectPtrNil
}
// similarly for scopes: they are likely incorrect after a rewrite;
// replace them with nil
if val.Type() == scopePtrType {
return scopePtrNil
}
switch v := reflect.Indirect(val); v.Kind() {
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
e := v.Index(i)
set(e, f(e))
}
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
e := v.Field(i)
set(e, f(e))
}
case reflect.Interface:
e := v.Elem()
set(v, f(e))
}
return val
}
func isWildcard(s string) bool {
rune, size := utf8.DecodeRuneInString(s)
return size == len(s) && unicode.IsLower(rune)
}
// match returns true if pattern matches val,
// recording wildcard submatches in m.
// If m == nil, match checks whether pattern == val.
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
// Wildcard matches any expression. If it appears multiple
// times in the pattern, it must match the same expression
// each time.
if m != nil && pattern.IsValid() && pattern.Type() == identType {
name := pattern.Interface().(*ast.Ident).Name
if isWildcard(name) && val.IsValid() {
// wildcards only match valid (non-nil) expressions.
if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
if old, ok := m[name]; ok {
return match(nil, old, val)
}
m[name] = val
return true
}
}
}
// Otherwise, pattern and val must match recursively.
if !pattern.IsValid() || !val.IsValid() {
return !pattern.IsValid() && !val.IsValid()
}
if pattern.Type() != val.Type() {
return false
}
// Special cases.
switch pattern.Type() {
case identType:
// For identifiers, only the names need to match
// (and none of the other *ast.Object information).
// This is a common case, handle it all here instead
// of recursing down any further via reflection.
p := pattern.Interface().(*ast.Ident)
v := val.Interface().(*ast.Ident)
return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
case objectPtrType, positionType:
// object pointers and token positions always match
return true
case callExprType:
// For calls, the Ellipsis fields (token.Position) must
// match since that is how f(x) and f(x...) are different.
// Check them here but fall through for the remaining fields.
p := pattern.Interface().(*ast.CallExpr)
v := val.Interface().(*ast.CallExpr)
if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
return false
}
}
p := reflect.Indirect(pattern)
v := reflect.Indirect(val)
if !p.IsValid() || !v.IsValid() {
return !p.IsValid() && !v.IsValid()
}
switch p.Kind() {
case reflect.Slice:
if p.Len() != v.Len() {
return false
}
for i := 0; i < p.Len(); i++ {
if !match(m, p.Index(i), v.Index(i)) {
return false
}
}
return true
case reflect.Struct:
for i := 0; i < p.NumField(); i++ {
if !match(m, p.Field(i), v.Field(i)) {
return false
}
}
return true
case reflect.Interface:
return match(m, p.Elem(), v.Elem())
}
// Handle token integers, etc.
return p.Interface() == v.Interface()
}
// subst returns a copy of pattern with values from m substituted in place
// of wildcards and pos used as the position of tokens from the pattern.
// if m == nil, subst returns a copy of pattern and doesn't change the line
// number information.
func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
if !pattern.IsValid() {
return reflect.Value{}
}
// Wildcard gets replaced with map value.
if m != nil && pattern.Type() == identType {
name := pattern.Interface().(*ast.Ident).Name
if isWildcard(name) {
if old, ok := m[name]; ok {
return subst(nil, old, reflect.Value{})
}
}
}
if pos.IsValid() && pattern.Type() == positionType {
// use new position only if old position was valid in the first place
if old := pattern.Interface().(token.Pos); !old.IsValid() {
return pattern
}
return pos
}
// Otherwise copy.
switch p := pattern; p.Kind() {
case reflect.Slice:
v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
for i := 0; i < p.Len(); i++ {
v.Index(i).Set(subst(m, p.Index(i), pos))
}
return v
case reflect.Struct:
v := reflect.New(p.Type()).Elem()
for i := 0; i < p.NumField(); i++ {
v.Field(i).Set(subst(m, p.Field(i), pos))
}
return v
case reflect.Ptr:
v := reflect.New(p.Type()).Elem()
if elem := p.Elem(); elem.IsValid() {
v.Set(subst(m, elem, pos).Addr())
}
return v
case reflect.Interface:
v := reflect.New(p.Type()).Elem()
if elem := p.Elem(); elem.IsValid() {
v.Set(subst(m, elem, pos))
}
return v
}
return pattern
}

15
vendor/github.com/valyala/bytebufferpool/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,15 @@
language: go
go:
- 1.6
script:
# build test for supported platforms
- GOOS=linux go build
- GOOS=darwin go build
- GOOS=freebsd go build
- GOOS=windows go build
- GOARCH=386 go build
# run tests on a standard platform
- go test -v ./...

22
vendor/github.com/valyala/bytebufferpool/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

21
vendor/github.com/valyala/bytebufferpool/README.md generated vendored Normal file
View File

@ -0,0 +1,21 @@
[![Build Status](https://travis-ci.org/valyala/bytebufferpool.svg)](https://travis-ci.org/valyala/bytebufferpool)
[![GoDoc](https://godoc.org/github.com/valyala/bytebufferpool?status.svg)](http://godoc.org/github.com/valyala/bytebufferpool)
[![Go Report](http://goreportcard.com/badge/valyala/bytebufferpool)](http://goreportcard.com/report/valyala/bytebufferpool)
# bytebufferpool
An implementation of a pool of byte buffers with anti-memory-waste protection.
The pool may waste limited amount of memory due to fragmentation.
This amount equals to the maximum total size of the byte buffers
in concurrent use.
# Benchmark results
Currently bytebufferpool is fastest and most effective buffer pool written in Go.
You can find results [here](https://omgnull.github.io/go-benchmark/buffer/).
# bytebufferpool users
* [fasthttp](https://github.com/valyala/fasthttp)
* [quicktemplate](https://github.com/valyala/quicktemplate)

111
vendor/github.com/valyala/bytebufferpool/bytebuffer.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package bytebufferpool
import "io"
// ByteBuffer provides byte buffer, which can be used for minimizing
// memory allocations.
//
// ByteBuffer may be used with functions appending data to the given []byte
// slice. See example code for details.
//
// Use Get for obtaining an empty byte buffer.
type ByteBuffer struct {
// B is a byte buffer to use in append-like workloads.
// See example code for details.
B []byte
}
// Len returns the size of the byte buffer.
func (b *ByteBuffer) Len() int {
return len(b.B)
}
// ReadFrom implements io.ReaderFrom.
//
// The function appends all the data read from r to b.
func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) {
p := b.B
nStart := int64(len(p))
nMax := int64(cap(p))
n := nStart
if nMax == 0 {
nMax = 64
p = make([]byte, nMax)
} else {
p = p[:nMax]
}
for {
if n == nMax {
nMax *= 2
bNew := make([]byte, nMax)
copy(bNew, p)
p = bNew
}
nn, err := r.Read(p[n:])
n += int64(nn)
if err != nil {
b.B = p[:n]
n -= nStart
if err == io.EOF {
return n, nil
}
return n, err
}
}
}
// WriteTo implements io.WriterTo.
func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) {
n, err := w.Write(b.B)
return int64(n), err
}
// Bytes returns b.B, i.e. all the bytes accumulated in the buffer.
//
// The purpose of this function is bytes.Buffer compatibility.
func (b *ByteBuffer) Bytes() []byte {
return b.B
}
// Write implements io.Writer - it appends p to ByteBuffer.B
func (b *ByteBuffer) Write(p []byte) (int, error) {
b.B = append(b.B, p...)
return len(p), nil
}
// WriteByte appends the byte c to the buffer.
//
// The purpose of this function is bytes.Buffer compatibility.
//
// The function always returns nil.
func (b *ByteBuffer) WriteByte(c byte) error {
b.B = append(b.B, c)
return nil
}
// WriteString appends s to ByteBuffer.B.
func (b *ByteBuffer) WriteString(s string) (int, error) {
b.B = append(b.B, s...)
return len(s), nil
}
// Set sets ByteBuffer.B to p.
func (b *ByteBuffer) Set(p []byte) {
b.B = append(b.B[:0], p...)
}
// SetString sets ByteBuffer.B to s.
func (b *ByteBuffer) SetString(s string) {
b.B = append(b.B[:0], s...)
}
// String returns string representation of ByteBuffer.B.
func (b *ByteBuffer) String() string {
return string(b.B)
}
// Reset makes ByteBuffer.B empty.
func (b *ByteBuffer) Reset() {
b.B = b.B[:0]
}

7
vendor/github.com/valyala/bytebufferpool/doc.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// Package bytebufferpool implements a pool of byte buffers
// with anti-fragmentation protection.
//
// The pool may waste limited amount of memory due to fragmentation.
// This amount equals to the maximum total size of the byte buffers
// in concurrent use.
package bytebufferpool

151
vendor/github.com/valyala/bytebufferpool/pool.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
package bytebufferpool
import (
"sort"
"sync"
"sync/atomic"
)
const (
minBitSize = 6 // 2**6=64 is a CPU cache line size
steps = 20
minSize = 1 << minBitSize
maxSize = 1 << (minBitSize + steps - 1)
calibrateCallsThreshold = 42000
maxPercentile = 0.95
)
// Pool represents byte buffer pool.
//
// Distinct pools may be used for distinct types of byte buffers.
// Properly determined byte buffer types with their own pools may help reducing
// memory waste.
type Pool struct {
calls [steps]uint64
calibrating uint64
defaultSize uint64
maxSize uint64
pool sync.Pool
}
var defaultPool Pool
// Get returns an empty byte buffer from the pool.
//
// Got byte buffer may be returned to the pool via Put call.
// This reduces the number of memory allocations required for byte buffer
// management.
func Get() *ByteBuffer { return defaultPool.Get() }
// Get returns new byte buffer with zero length.
//
// The byte buffer may be returned to the pool via Put after the use
// in order to minimize GC overhead.
func (p *Pool) Get() *ByteBuffer {
v := p.pool.Get()
if v != nil {
return v.(*ByteBuffer)
}
return &ByteBuffer{
B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)),
}
}
// Put returns byte buffer to the pool.
//
// ByteBuffer.B mustn't be touched after returning it to the pool.
// Otherwise data races will occur.
func Put(b *ByteBuffer) { defaultPool.Put(b) }
// Put releases byte buffer obtained via Get to the pool.
//
// The buffer mustn't be accessed after returning to the pool.
func (p *Pool) Put(b *ByteBuffer) {
idx := index(len(b.B))
if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold {
p.calibrate()
}
maxSize := int(atomic.LoadUint64(&p.maxSize))
if maxSize == 0 || cap(b.B) <= maxSize {
b.Reset()
p.pool.Put(b)
}
}
func (p *Pool) calibrate() {
if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) {
return
}
a := make(callSizes, 0, steps)
var callsSum uint64
for i := uint64(0); i < steps; i++ {
calls := atomic.SwapUint64(&p.calls[i], 0)
callsSum += calls
a = append(a, callSize{
calls: calls,
size: minSize << i,
})
}
sort.Sort(a)
defaultSize := a[0].size
maxSize := defaultSize
maxSum := uint64(float64(callsSum) * maxPercentile)
callsSum = 0
for i := 0; i < steps; i++ {
if callsSum > maxSum {
break
}
callsSum += a[i].calls
size := a[i].size
if size > maxSize {
maxSize = size
}
}
atomic.StoreUint64(&p.defaultSize, defaultSize)
atomic.StoreUint64(&p.maxSize, maxSize)
atomic.StoreUint64(&p.calibrating, 0)
}
type callSize struct {
calls uint64
size uint64
}
type callSizes []callSize
func (ci callSizes) Len() int {
return len(ci)
}
func (ci callSizes) Less(i, j int) bool {
return ci[i].calls > ci[j].calls
}
func (ci callSizes) Swap(i, j int) {
ci[i], ci[j] = ci[j], ci[i]
}
func index(n int) int {
n--
n >>= minBitSize
idx := 0
for n > 0 {
n >>= 1
idx++
}
if idx >= steps {
idx = steps - 1
}
return idx
}

3
vendor/github.com/valyala/fasthttp/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
tags
*.pprof
*.fasthttp.gz

36
vendor/github.com/valyala/fasthttp/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,36 @@
language: go
go:
- tip
- 1.11
- 1.10.x
- 1.9.x
os:
- linux
- osx
matrix:
allow_failures:
- tip
fast_finish: true
before_install:
- go get -t -v ./...
# - go get -v golang.org/x/tools/cmd/goimports
script:
# TODO(@kirilldanshin)
# - test -z "$(goimports -d $(find . -type f -name '*.go' -not -path "./vendor/*"))"
# build test for supported platforms
- GOOS=linux go build
- GOOS=darwin go build
- GOOS=freebsd go build
- GOOS=windows go build
- GOARCH=386 go build
# run tests on a standard platform
- go test -v ./...
# run tests with the race detector as well
- go test -race -v ./...

25
vendor/github.com/valyala/fasthttp/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
The MIT License (MIT)
Copyright (c) 2015-present Aliaksandr Valialkin, VertaMedia
Copyright (c) 2018-present Kirill Danshin
Copyright (c) 2018-present Erik Dubbelboer
Copyright (c) 2018-present FastHTTP Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

584
vendor/github.com/valyala/fasthttp/README.md generated vendored Normal file
View File

@ -0,0 +1,584 @@
[![Build Status](https://travis-ci.org/valyala/fasthttp.svg)](https://travis-ci.org/valyala/fasthttp)
[![GoDoc](https://godoc.org/github.com/valyala/fasthttp?status.svg)](http://godoc.org/github.com/valyala/fasthttp)
[![Go Report](https://goreportcard.com/badge/github.com/valyala/fasthttp)](https://goreportcard.com/report/github.com/valyala/fasthttp)
# fasthttp
Fast HTTP implementation for Go.
Currently fasthttp is successfully used by [VertaMedia](https://vertamedia.com/)
in a production serving up to 200K rps from more than 1.5M concurrent keep-alive
connections per physical server.
[TechEmpower Benchmark round 12 results](https://www.techempower.com/benchmarks/#section=data-r12&hw=peak&test=plaintext)
[Server Benchmarks](#http-server-performance-comparison-with-nethttp)
[Client Benchmarks](#http-client-comparison-with-nethttp)
[Install](#install)
[Documentation](https://godoc.org/github.com/valyala/fasthttp)
[Examples from docs](https://godoc.org/github.com/valyala/fasthttp#pkg-examples)
[Code examples](examples)
[Awesome fasthttp tools](https://github.com/fasthttp)
[Switching from net/http to fasthttp](#switching-from-nethttp-to-fasthttp)
[Fasthttp best practices](#fasthttp-best-practices)
[Tricks with byte buffers](#tricks-with-byte-buffers)
[Related projects](#related-projects)
[FAQ](#faq)
# HTTP server performance comparison with [net/http](https://golang.org/pkg/net/http/)
In short, fasthttp server is up to 10 times faster than net/http.
Below are benchmark results.
*GOMAXPROCS=1*
net/http server:
```
$ GOMAXPROCS=1 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s
BenchmarkNetHTTPServerGet1ReqPerConn 1000000 12052 ns/op 2297 B/op 29 allocs/op
BenchmarkNetHTTPServerGet2ReqPerConn 1000000 12278 ns/op 2327 B/op 24 allocs/op
BenchmarkNetHTTPServerGet10ReqPerConn 2000000 8903 ns/op 2112 B/op 19 allocs/op
BenchmarkNetHTTPServerGet10KReqPerConn 2000000 8451 ns/op 2058 B/op 18 allocs/op
BenchmarkNetHTTPServerGet1ReqPerConn10KClients 500000 26733 ns/op 3229 B/op 29 allocs/op
BenchmarkNetHTTPServerGet2ReqPerConn10KClients 1000000 23351 ns/op 3211 B/op 24 allocs/op
BenchmarkNetHTTPServerGet10ReqPerConn10KClients 1000000 13390 ns/op 2483 B/op 19 allocs/op
BenchmarkNetHTTPServerGet100ReqPerConn10KClients 1000000 13484 ns/op 2171 B/op 18 allocs/op
```
fasthttp server:
```
$ GOMAXPROCS=1 go test -bench=kServerGet -benchmem -benchtime=10s
BenchmarkServerGet1ReqPerConn 10000000 1559 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet2ReqPerConn 10000000 1248 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet10ReqPerConn 20000000 797 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet10KReqPerConn 20000000 716 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet1ReqPerConn10KClients 10000000 1974 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet2ReqPerConn10KClients 10000000 1352 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet10ReqPerConn10KClients 20000000 789 ns/op 2 B/op 0 allocs/op
BenchmarkServerGet100ReqPerConn10KClients 20000000 604 ns/op 0 B/op 0 allocs/op
```
*GOMAXPROCS=4*
net/http server:
```
$ GOMAXPROCS=4 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s
BenchmarkNetHTTPServerGet1ReqPerConn-4 3000000 4529 ns/op 2389 B/op 29 allocs/op
BenchmarkNetHTTPServerGet2ReqPerConn-4 5000000 3896 ns/op 2418 B/op 24 allocs/op
BenchmarkNetHTTPServerGet10ReqPerConn-4 5000000 3145 ns/op 2160 B/op 19 allocs/op
BenchmarkNetHTTPServerGet10KReqPerConn-4 5000000 3054 ns/op 2065 B/op 18 allocs/op
BenchmarkNetHTTPServerGet1ReqPerConn10KClients-4 1000000 10321 ns/op 3710 B/op 30 allocs/op
BenchmarkNetHTTPServerGet2ReqPerConn10KClients-4 2000000 7556 ns/op 3296 B/op 24 allocs/op
BenchmarkNetHTTPServerGet10ReqPerConn10KClients-4 5000000 3905 ns/op 2349 B/op 19 allocs/op
BenchmarkNetHTTPServerGet100ReqPerConn10KClients-4 5000000 3435 ns/op 2130 B/op 18 allocs/op
```
fasthttp server:
```
$ GOMAXPROCS=4 go test -bench=kServerGet -benchmem -benchtime=10s
BenchmarkServerGet1ReqPerConn-4 10000000 1141 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet2ReqPerConn-4 20000000 707 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet10ReqPerConn-4 30000000 341 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet10KReqPerConn-4 50000000 310 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet1ReqPerConn10KClients-4 10000000 1119 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet2ReqPerConn10KClients-4 20000000 644 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet10ReqPerConn10KClients-4 30000000 346 ns/op 0 B/op 0 allocs/op
BenchmarkServerGet100ReqPerConn10KClients-4 50000000 282 ns/op 0 B/op 0 allocs/op
```
# HTTP client comparison with net/http
In short, fasthttp client is up to 10 times faster than net/http.
Below are benchmark results.
*GOMAXPROCS=1*
net/http client:
```
$ GOMAXPROCS=1 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
BenchmarkNetHTTPClientDoFastServer 1000000 12567 ns/op 2616 B/op 35 allocs/op
BenchmarkNetHTTPClientGetEndToEnd1TCP 200000 67030 ns/op 5028 B/op 56 allocs/op
BenchmarkNetHTTPClientGetEndToEnd10TCP 300000 51098 ns/op 5031 B/op 56 allocs/op
BenchmarkNetHTTPClientGetEndToEnd100TCP 300000 45096 ns/op 5026 B/op 55 allocs/op
BenchmarkNetHTTPClientGetEndToEnd1Inmemory 500000 24779 ns/op 5035 B/op 57 allocs/op
BenchmarkNetHTTPClientGetEndToEnd10Inmemory 1000000 26425 ns/op 5035 B/op 57 allocs/op
BenchmarkNetHTTPClientGetEndToEnd100Inmemory 500000 28515 ns/op 5045 B/op 57 allocs/op
BenchmarkNetHTTPClientGetEndToEnd1000Inmemory 500000 39511 ns/op 5096 B/op 56 allocs/op
```
fasthttp client:
```
$ GOMAXPROCS=1 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
BenchmarkClientDoFastServer 20000000 865 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd1TCP 1000000 18711 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd10TCP 1000000 14664 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd100TCP 1000000 14043 ns/op 1 B/op 0 allocs/op
BenchmarkClientGetEndToEnd1Inmemory 5000000 3965 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd10Inmemory 3000000 4060 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd100Inmemory 5000000 3396 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd1000Inmemory 5000000 3306 ns/op 2 B/op 0 allocs/op
```
*GOMAXPROCS=4*
net/http client:
```
$ GOMAXPROCS=4 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
BenchmarkNetHTTPClientDoFastServer-4 2000000 8774 ns/op 2619 B/op 35 allocs/op
BenchmarkNetHTTPClientGetEndToEnd1TCP-4 500000 22951 ns/op 5047 B/op 56 allocs/op
BenchmarkNetHTTPClientGetEndToEnd10TCP-4 1000000 19182 ns/op 5037 B/op 55 allocs/op
BenchmarkNetHTTPClientGetEndToEnd100TCP-4 1000000 16535 ns/op 5031 B/op 55 allocs/op
BenchmarkNetHTTPClientGetEndToEnd1Inmemory-4 1000000 14495 ns/op 5038 B/op 56 allocs/op
BenchmarkNetHTTPClientGetEndToEnd10Inmemory-4 1000000 10237 ns/op 5034 B/op 56 allocs/op
BenchmarkNetHTTPClientGetEndToEnd100Inmemory-4 1000000 10125 ns/op 5045 B/op 56 allocs/op
BenchmarkNetHTTPClientGetEndToEnd1000Inmemory-4 1000000 11132 ns/op 5136 B/op 56 allocs/op
```
fasthttp client:
```
$ GOMAXPROCS=4 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s
BenchmarkClientDoFastServer-4 50000000 397 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd1TCP-4 2000000 7388 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd10TCP-4 2000000 6689 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd100TCP-4 3000000 4927 ns/op 1 B/op 0 allocs/op
BenchmarkClientGetEndToEnd1Inmemory-4 10000000 1604 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd10Inmemory-4 10000000 1458 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd100Inmemory-4 10000000 1329 ns/op 0 B/op 0 allocs/op
BenchmarkClientGetEndToEnd1000Inmemory-4 10000000 1316 ns/op 5 B/op 0 allocs/op
```
# Install
```
go get -u github.com/valyala/fasthttp
```
# Switching from net/http to fasthttp
Unfortunately, fasthttp doesn't provide API identical to net/http.
See the [FAQ](#faq) for details.
There is [net/http -> fasthttp handler converter](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor),
but it is better to write fasthttp request handlers by hand in order to use
all of the fasthttp advantages (especially high performance :) ).
Important points:
* Fasthttp works with [RequestHandler functions](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
instead of objects implementing [Handler interface](https://golang.org/pkg/net/http/#Handler).
Fortunately, it is easy to pass bound struct methods to fasthttp:
```go
type MyHandler struct {
foobar string
}
// request handler in net/http style, i.e. method bound to MyHandler struct.
func (h *MyHandler) HandleFastHTTP(ctx *fasthttp.RequestCtx) {
// notice that we may access MyHandler properties here - see h.foobar.
fmt.Fprintf(ctx, "Hello, world! Requested path is %q. Foobar is %q",
ctx.Path(), h.foobar)
}
// request handler in fasthttp style, i.e. just plain function.
func fastHTTPHandler(ctx *fasthttp.RequestCtx) {
fmt.Fprintf(ctx, "Hi there! RequestURI is %q", ctx.RequestURI())
}
// pass bound struct method to fasthttp
myHandler := &MyHandler{
foobar: "foobar",
}
fasthttp.ListenAndServe(":8080", myHandler.HandleFastHTTP)
// pass plain function to fasthttp
fasthttp.ListenAndServe(":8081", fastHTTPHandler)
```
* The [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
accepts only one argument - [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx).
It contains all the functionality required for http request processing
and response writing. Below is an example of a simple request handler conversion
from net/http to fasthttp.
```go
// net/http request handler
requestHandler := func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/foo":
fooHandler(w, r)
case "/bar":
barHandler(w, r)
default:
http.Error(w, "Unsupported path", http.StatusNotFound)
}
}
```
```go
// the corresponding fasthttp request handler
requestHandler := func(ctx *fasthttp.RequestCtx) {
switch string(ctx.Path()) {
case "/foo":
fooHandler(ctx)
case "/bar":
barHandler(ctx)
default:
ctx.Error("Unsupported path", fasthttp.StatusNotFound)
}
}
```
* Fasthttp allows setting response headers and writing response body
in an arbitrary order. There is no 'headers first, then body' restriction
like in net/http. The following code is valid for fasthttp:
```go
requestHandler := func(ctx *fasthttp.RequestCtx) {
// set some headers and status code first
ctx.SetContentType("foo/bar")
ctx.SetStatusCode(fasthttp.StatusOK)
// then write the first part of body
fmt.Fprintf(ctx, "this is the first part of body\n")
// then set more headers
ctx.Response.Header.Set("Foo-Bar", "baz")
// then write more body
fmt.Fprintf(ctx, "this is the second part of body\n")
// then override already written body
ctx.SetBody([]byte("this is completely new body contents"))
// then update status code
ctx.SetStatusCode(fasthttp.StatusNotFound)
// basically, anything may be updated many times before
// returning from RequestHandler.
//
// Unlike net/http fasthttp doesn't put response to the wire until
// returning from RequestHandler.
}
```
* Fasthttp doesn't provide [ServeMux](https://golang.org/pkg/net/http/#ServeMux),
but there are more powerful third-party routers and web frameworks
with fasthttp support:
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
* [lu](https://github.com/vincentLiuxiang/lu)
* [atreugo](https://github.com/savsgio/atreugo)
Net/http code with simple ServeMux is trivially converted to fasthttp code:
```go
// net/http code
m := &http.ServeMux{}
m.HandleFunc("/foo", fooHandlerFunc)
m.HandleFunc("/bar", barHandlerFunc)
m.Handle("/baz", bazHandler)
http.ListenAndServe(":80", m)
```
```go
// the corresponding fasthttp code
m := func(ctx *fasthttp.RequestCtx) {
switch string(ctx.Path()) {
case "/foo":
fooHandlerFunc(ctx)
case "/bar":
barHandlerFunc(ctx)
case "/baz":
bazHandler.HandlerFunc(ctx)
default:
ctx.Error("not found", fasthttp.StatusNotFound)
}
}
fasthttp.ListenAndServe(":80", m)
```
* net/http -> fasthttp conversion table:
* All the pseudocode below assumes w, r and ctx have these types:
```go
var (
w http.ResponseWriter
r *http.Request
ctx *fasthttp.RequestCtx
)
```
* r.Body -> [ctx.PostBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody)
* r.URL.Path -> [ctx.Path()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Path)
* r.URL -> [ctx.URI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.URI)
* r.Method -> [ctx.Method()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Method)
* r.Header -> [ctx.Request.Header](https://godoc.org/github.com/valyala/fasthttp#RequestHeader)
* r.Header.Get() -> [ctx.Request.Header.Peek()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Peek)
* r.Host -> [ctx.Host()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Host)
* r.Form -> [ctx.QueryArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.QueryArgs) +
[ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs)
* r.PostForm -> [ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs)
* r.FormValue() -> [ctx.FormValue()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormValue)
* r.FormFile() -> [ctx.FormFile()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormFile)
* r.MultipartForm -> [ctx.MultipartForm()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.MultipartForm)
* r.RemoteAddr -> [ctx.RemoteAddr()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RemoteAddr)
* r.RequestURI -> [ctx.RequestURI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RequestURI)
* r.TLS -> [ctx.IsTLS()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.IsTLS)
* r.Cookie() -> [ctx.Request.Header.Cookie()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Cookie)
* r.Referer() -> [ctx.Referer()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Referer)
* r.UserAgent() -> [ctx.UserAgent()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.UserAgent)
* w.Header() -> [ctx.Response.Header](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader)
* w.Header().Set() -> [ctx.Response.Header.Set()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.Set)
* w.Header().Set("Content-Type") -> [ctx.SetContentType()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetContentType)
* w.Header().Set("Set-Cookie") -> [ctx.Response.Header.SetCookie()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.SetCookie)
* w.Write() -> [ctx.Write()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Write),
[ctx.SetBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBody),
[ctx.SetBodyStream()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStream),
[ctx.SetBodyStreamWriter()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStreamWriter)
* w.WriteHeader() -> [ctx.SetStatusCode()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetStatusCode)
* w.(http.Hijacker).Hijack() -> [ctx.Hijack()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
* http.Error() -> [ctx.Error()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Error)
* http.FileServer() -> [fasthttp.FSHandler()](https://godoc.org/github.com/valyala/fasthttp#FSHandler),
[fasthttp.FS](https://godoc.org/github.com/valyala/fasthttp#FS)
* http.ServeFile() -> [fasthttp.ServeFile()](https://godoc.org/github.com/valyala/fasthttp#ServeFile)
* http.Redirect() -> [ctx.Redirect()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Redirect)
* http.NotFound() -> [ctx.NotFound()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.NotFound)
* http.StripPrefix() -> [fasthttp.PathRewriteFunc](https://godoc.org/github.com/valyala/fasthttp#PathRewriteFunc)
* *VERY IMPORTANT!* Fasthttp disallows holding references
to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) or to its'
members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
Otherwise [data races](http://blog.golang.org/race-detector) are inevitable.
Carefully inspect all the net/http request handlers converted to fasthttp whether
they retain references to RequestCtx or to its' members after returning.
RequestCtx provides the following _band aids_ for this case:
* Wrap RequestHandler into [TimeoutHandler](https://godoc.org/github.com/valyala/fasthttp#TimeoutHandler).
* Call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
before returning from RequestHandler if there are references to RequestCtx or to its' members.
See [the example](https://godoc.org/github.com/valyala/fasthttp#example-RequestCtx-TimeoutError)
for more details.
Use this brilliant tool - [race detector](http://blog.golang.org/race-detector) -
for detecting and eliminating data races in your program. If you detected
data race related to fasthttp in your program, then there is high probability
you forgot calling [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
* Blind switching from net/http to fasthttp won't give you performance boost.
While fasthttp is optimized for speed, its' performance may be easily saturated
by slow [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
So [profile](http://blog.golang.org/profiling-go-programs) and optimize your
code after switching to fasthttp. For instance, use [quicktemplate](https://github.com/valyala/quicktemplate)
instead of [html/template](https://golang.org/pkg/html/template/).
* See also [fasthttputil](https://godoc.org/github.com/valyala/fasthttp/fasthttputil),
[fasthttpadaptor](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor) and
[expvarhandler](https://godoc.org/github.com/valyala/fasthttp/expvarhandler).
# Performance optimization tips for multi-core systems
* Use [reuseport](https://godoc.org/github.com/valyala/fasthttp/reuseport) listener.
* Run a separate server instance per CPU core with GOMAXPROCS=1.
* Pin each server instance to a separate CPU core using [taskset](http://linux.die.net/man/1/taskset).
* Ensure the interrupts of multiqueue network card are evenly distributed between CPU cores.
See [this article](https://blog.cloudflare.com/how-to-achieve-low-latency/) for details.
* Use Go 1.6 as it provides some considerable performance improvements.
# Fasthttp best practices
* Do not allocate objects and `[]byte` buffers - just reuse them as much
as possible. Fasthttp API design encourages this.
* [sync.Pool](https://golang.org/pkg/sync/#Pool) is your best friend.
* [Profile your program](http://blog.golang.org/profiling-go-programs)
in production.
`go tool pprof --alloc_objects your-program mem.pprof` usually gives better
insights for optimization opportunities than `go tool pprof your-program cpu.pprof`.
* Write [tests and benchmarks](https://golang.org/pkg/testing/) for hot paths.
* Avoid conversion between `[]byte` and `string`, since this may result in memory
allocation+copy. Fasthttp API provides functions for both `[]byte` and `string` -
use these functions instead of converting manually between `[]byte` and `string`.
There are some exceptions - see [this wiki page](https://github.com/golang/go/wiki/CompilerOptimizations#string-and-byte)
for more details.
* Verify your tests and production code under
[race detector](https://golang.org/doc/articles/race_detector.html) on a regular basis.
* Prefer [quicktemplate](https://github.com/valyala/quicktemplate) instead of
[html/template](https://golang.org/pkg/html/template/) in your webserver.
# Tricks with `[]byte` buffers
The following tricks are used by fasthttp. Use them in your code too.
* Standard Go functions accept nil buffers
```go
var (
// both buffers are uninitialized
dst []byte
src []byte
)
dst = append(dst, src...) // is legal if dst is nil and/or src is nil
copy(dst, src) // is legal if dst is nil and/or src is nil
(string(src) == "") // is true if src is nil
(len(src) == 0) // is true if src is nil
src = src[:0] // works like a charm with nil src
// this for loop doesn't panic if src is nil
for i, ch := range src {
doSomething(i, ch)
}
```
So throw away nil checks for `[]byte` buffers from you code. For example,
```go
srcLen := 0
if src != nil {
srcLen = len(src)
}
```
becomes
```go
srcLen := len(src)
```
* String may be appended to `[]byte` buffer with `append`
```go
dst = append(dst, "foobar"...)
```
* `[]byte` buffer may be extended to its' capacity.
```go
buf := make([]byte, 100)
a := buf[:10] // len(a) == 10, cap(a) == 100.
b := a[:100] // is valid, since cap(a) == 100.
```
* All fasthttp functions accept nil `[]byte` buffer
```go
statusCode, body, err := fasthttp.Get(nil, "http://google.com/")
uintBuf := fasthttp.AppendUint(nil, 1234)
```
# Related projects
* [fasthttp](https://github.com/fasthttp) - various useful
helpers for projects based on fasthttp.
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) - fast and
powerful routing package for fasthttp servers.
* [fasthttprouter](https://github.com/buaazp/fasthttprouter) - a high
performance fasthttp request router that scales well.
* [gramework](https://github.com/gramework/gramework) - a web framework made by one of fasthttp maintainers
* [lu](https://github.com/vincentLiuxiang/lu) - a high performance
go middleware web framework which is based on fasthttp.
* [websocket](https://github.com/fasthttp/websocket) - Gorilla-based
websocket implementation for fasthttp.
* [fasthttpsession](https://github.com/phachon/fasthttpsession) - a fast and powerful session package for fasthttp servers.
* [atreugo](https://github.com/savsgio/atreugo) - Micro-framework to make simple the use of routing and middlewares.
# FAQ
* *Why creating yet another http package instead of optimizing net/http?*
Because net/http API limits many optimization opportunities.
For example:
* net/http Request object lifetime isn't limited by request handler execution
time. So the server must create a new request object per each request instead
of reusing existing objects like fasthttp does.
* net/http headers are stored in a `map[string][]string`. So the server
must parse all the headers, convert them from `[]byte` to `string` and put
them into the map before calling user-provided request handler.
This all requires unnecessary memory allocations avoided by fasthttp.
* net/http client API requires creating a new response object per each request.
* *Why fasthttp API is incompatible with net/http?*
Because net/http API limits many optimization opportunities. See the answer
above for more details. Also certain net/http API parts are suboptimal
for use:
* Compare [net/http connection hijacking](https://golang.org/pkg/net/http/#Hijacker)
to [fasthttp connection hijacking](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack).
* Compare [net/http Request.Body reading](https://golang.org/pkg/net/http/#Request)
to [fasthttp request body reading](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody).
* *Why fasthttp doesn't support HTTP/2.0 and WebSockets?*
[HTTP/2.0 support](https://github.com/fasthttp/http2) is in progress. [WebSockets](https://github.com/fasthttp/websockets) has been done already.
Third parties also may use [RequestCtx.Hijack](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
for implementing these goodies.
* *Are there known net/http advantages comparing to fasthttp?*
Yes:
* net/http supports [HTTP/2.0 starting from go1.6](https://http2.golang.org/).
* net/http API is stable, while fasthttp API constantly evolves.
* net/http handles more HTTP corner cases.
* net/http should contain less bugs, since it is used and tested by much
wider audience.
* net/http works on Go older than 1.5.
* *Why fasthttp API prefers returning `[]byte` instead of `string`?*
Because `[]byte` to `string` conversion isn't free - it requires memory
allocation and copy. Feel free wrapping returned `[]byte` result into
`string()` if you prefer working with strings instead of byte slices.
But be aware that this has non-zero overhead.
* *Which GO versions are supported by fasthttp?*
Go1.5+. Older versions won't be supported, since their standard package
[miss useful functions](https://github.com/valyala/fasthttp/issues/5).
**NOTE**: Go 1.9.7 is the oldest tested version. We recommend you to update as soon as you can. As of 1.11.3 we will drop 1.9.x support.
* *Please provide real benchmark data and server information*
See [this issue](https://github.com/valyala/fasthttp/issues/4).
* *Are there plans to add request routing to fasthttp?*
There are no plans to add request routing into fasthttp.
Use third-party routers and web frameworks with fasthttp support:
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
* [gramework](https://github.com/gramework/gramework)
* [lu](https://github.com/vincentLiuxiang/lu)
* [atreugo](https://github.com/savsgio/atreugo)
See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info.
* *I detected data race in fasthttp!*
Cool! [File a bug](https://github.com/valyala/fasthttp/issues/new). But before
doing this check the following in your code:
* Make sure there are no references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx)
or to its' members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler).
* Make sure you call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError)
before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler)
if there are references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx)
or to its' members, which may be accessed by other goroutines.
* *I didn't find an answer for my question here*
Try exploring [these questions](https://github.com/valyala/fasthttp/issues?q=label%3Aquestion).

4
vendor/github.com/valyala/fasthttp/TODO generated vendored Normal file
View File

@ -0,0 +1,4 @@
- SessionClient with referer and cookies support.
- ProxyHandler similar to FSHandler.
- WebSockets. See https://tools.ietf.org/html/rfc6455 .
- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 .

522
vendor/github.com/valyala/fasthttp/args.go generated vendored Normal file
View File

@ -0,0 +1,522 @@
package fasthttp
import (
"bytes"
"errors"
"io"
"sync"
"github.com/valyala/bytebufferpool"
)
// AcquireArgs returns an empty Args object from the pool.
//
// The returned Args may be returned to the pool with ReleaseArgs
// when no longer needed. This allows reducing GC load.
func AcquireArgs() *Args {
return argsPool.Get().(*Args)
}
// ReleaseArgs returns the object acquired via AquireArgs to the pool.
//
// Do not access the released Args object, otherwise data races may occur.
func ReleaseArgs(a *Args) {
a.Reset()
argsPool.Put(a)
}
var argsPool = &sync.Pool{
New: func() interface{} {
return &Args{}
},
}
// Args represents query arguments.
//
// It is forbidden copying Args instances. Create new instances instead
// and use CopyTo().
//
// Args instance MUST NOT be used from concurrently running goroutines.
type Args struct {
noCopy noCopy
args []argsKV
buf []byte
}
type argsKV struct {
key []byte
value []byte
}
// Reset clears query args.
func (a *Args) Reset() {
a.args = a.args[:0]
}
// CopyTo copies all args to dst.
func (a *Args) CopyTo(dst *Args) {
dst.Reset()
dst.args = copyArgs(dst.args, a.args)
}
// VisitAll calls f for each existing arg.
//
// f must not retain references to key and value after returning.
// Make key and/or value copies if you need storing them after returning.
func (a *Args) VisitAll(f func(key, value []byte)) {
visitArgs(a.args, f)
}
// Len returns the number of query args.
func (a *Args) Len() int {
return len(a.args)
}
// Parse parses the given string containing query args.
func (a *Args) Parse(s string) {
a.buf = append(a.buf[:0], s...)
a.ParseBytes(a.buf)
}
// ParseBytes parses the given b containing query args.
func (a *Args) ParseBytes(b []byte) {
a.Reset()
var s argsScanner
s.b = b
var kv *argsKV
a.args, kv = allocArg(a.args)
for s.next(kv) {
if len(kv.key) > 0 || len(kv.value) > 0 {
a.args, kv = allocArg(a.args)
}
}
a.args = releaseArg(a.args)
}
// String returns string representation of query args.
func (a *Args) String() string {
return string(a.QueryString())
}
// QueryString returns query string for the args.
//
// The returned value is valid until the next call to Args methods.
func (a *Args) QueryString() []byte {
a.buf = a.AppendBytes(a.buf[:0])
return a.buf
}
// AppendBytes appends query string to dst and returns the extended dst.
func (a *Args) AppendBytes(dst []byte) []byte {
for i, n := 0, len(a.args); i < n; i++ {
kv := &a.args[i]
dst = AppendQuotedArg(dst, kv.key)
if len(kv.value) > 0 {
dst = append(dst, '=')
dst = AppendQuotedArg(dst, kv.value)
}
if i+1 < n {
dst = append(dst, '&')
}
}
return dst
}
// WriteTo writes query string to w.
//
// WriteTo implements io.WriterTo interface.
func (a *Args) WriteTo(w io.Writer) (int64, error) {
n, err := w.Write(a.QueryString())
return int64(n), err
}
// Del deletes argument with the given key from query args.
func (a *Args) Del(key string) {
a.args = delAllArgs(a.args, key)
}
// DelBytes deletes argument with the given key from query args.
func (a *Args) DelBytes(key []byte) {
a.args = delAllArgs(a.args, b2s(key))
}
// Add adds 'key=value' argument.
//
// Multiple values for the same key may be added.
func (a *Args) Add(key, value string) {
a.args = appendArg(a.args, key, value)
}
// AddBytesK adds 'key=value' argument.
//
// Multiple values for the same key may be added.
func (a *Args) AddBytesK(key []byte, value string) {
a.args = appendArg(a.args, b2s(key), value)
}
// AddBytesV adds 'key=value' argument.
//
// Multiple values for the same key may be added.
func (a *Args) AddBytesV(key string, value []byte) {
a.args = appendArg(a.args, key, b2s(value))
}
// AddBytesKV adds 'key=value' argument.
//
// Multiple values for the same key may be added.
func (a *Args) AddBytesKV(key, value []byte) {
a.args = appendArg(a.args, b2s(key), b2s(value))
}
// Set sets 'key=value' argument.
func (a *Args) Set(key, value string) {
a.args = setArg(a.args, key, value)
}
// SetBytesK sets 'key=value' argument.
func (a *Args) SetBytesK(key []byte, value string) {
a.args = setArg(a.args, b2s(key), value)
}
// SetBytesV sets 'key=value' argument.
func (a *Args) SetBytesV(key string, value []byte) {
a.args = setArg(a.args, key, b2s(value))
}
// SetBytesKV sets 'key=value' argument.
func (a *Args) SetBytesKV(key, value []byte) {
a.args = setArgBytes(a.args, key, value)
}
// Peek returns query arg value for the given key.
//
// Returned value is valid until the next Args call.
func (a *Args) Peek(key string) []byte {
return peekArgStr(a.args, key)
}
// PeekBytes returns query arg value for the given key.
//
// Returned value is valid until the next Args call.
func (a *Args) PeekBytes(key []byte) []byte {
return peekArgBytes(a.args, key)
}
// PeekMulti returns all the arg values for the given key.
func (a *Args) PeekMulti(key string) [][]byte {
var values [][]byte
a.VisitAll(func(k, v []byte) {
if string(k) == key {
values = append(values, v)
}
})
return values
}
// PeekMultiBytes returns all the arg values for the given key.
func (a *Args) PeekMultiBytes(key []byte) [][]byte {
return a.PeekMulti(b2s(key))
}
// Has returns true if the given key exists in Args.
func (a *Args) Has(key string) bool {
return hasArg(a.args, key)
}
// HasBytes returns true if the given key exists in Args.
func (a *Args) HasBytes(key []byte) bool {
return hasArg(a.args, b2s(key))
}
// ErrNoArgValue is returned when Args value with the given key is missing.
var ErrNoArgValue = errors.New("no Args value for the given key")
// GetUint returns uint value for the given key.
func (a *Args) GetUint(key string) (int, error) {
value := a.Peek(key)
if len(value) == 0 {
return -1, ErrNoArgValue
}
return ParseUint(value)
}
// SetUint sets uint value for the given key.
func (a *Args) SetUint(key string, value int) {
bb := bytebufferpool.Get()
bb.B = AppendUint(bb.B[:0], value)
a.SetBytesV(key, bb.B)
bytebufferpool.Put(bb)
}
// SetUintBytes sets uint value for the given key.
func (a *Args) SetUintBytes(key []byte, value int) {
a.SetUint(b2s(key), value)
}
// GetUintOrZero returns uint value for the given key.
//
// Zero (0) is returned on error.
func (a *Args) GetUintOrZero(key string) int {
n, err := a.GetUint(key)
if err != nil {
n = 0
}
return n
}
// GetUfloat returns ufloat value for the given key.
func (a *Args) GetUfloat(key string) (float64, error) {
value := a.Peek(key)
if len(value) == 0 {
return -1, ErrNoArgValue
}
return ParseUfloat(value)
}
// GetUfloatOrZero returns ufloat value for the given key.
//
// Zero (0) is returned on error.
func (a *Args) GetUfloatOrZero(key string) float64 {
f, err := a.GetUfloat(key)
if err != nil {
f = 0
}
return f
}
// GetBool returns boolean value for the given key.
//
// true is returned for "1", "t", "T", "true", "TRUE", "True", "y", "yes", "Y", "YES", "Yes",
// otherwise false is returned.
func (a *Args) GetBool(key string) bool {
switch b2s(a.Peek(key)) {
// Support the same true cases as strconv.ParseBool
// See: https://github.com/golang/go/blob/4e1b11e2c9bdb0ddea1141eed487be1a626ff5be/src/strconv/atob.go#L12
// and Y and Yes versions.
case "1", "t", "T", "true", "TRUE", "True", "y", "yes", "Y", "YES", "Yes":
return true
default:
return false
}
}
func visitArgs(args []argsKV, f func(k, v []byte)) {
for i, n := 0, len(args); i < n; i++ {
kv := &args[i]
f(kv.key, kv.value)
}
}
func copyArgs(dst, src []argsKV) []argsKV {
if cap(dst) < len(src) {
tmp := make([]argsKV, len(src))
copy(tmp, dst)
dst = tmp
}
n := len(src)
dst = dst[:n]
for i := 0; i < n; i++ {
dstKV := &dst[i]
srcKV := &src[i]
dstKV.key = append(dstKV.key[:0], srcKV.key...)
dstKV.value = append(dstKV.value[:0], srcKV.value...)
}
return dst
}
func delAllArgsBytes(args []argsKV, key []byte) []argsKV {
return delAllArgs(args, b2s(key))
}
func delAllArgs(args []argsKV, key string) []argsKV {
for i, n := 0, len(args); i < n; i++ {
kv := &args[i]
if key == string(kv.key) {
tmp := *kv
copy(args[i:], args[i+1:])
n--
args[n] = tmp
args = args[:n]
}
}
return args
}
func setArgBytes(h []argsKV, key, value []byte) []argsKV {
return setArg(h, b2s(key), b2s(value))
}
func setArg(h []argsKV, key, value string) []argsKV {
n := len(h)
for i := 0; i < n; i++ {
kv := &h[i]
if key == string(kv.key) {
kv.value = append(kv.value[:0], value...)
return h
}
}
return appendArg(h, key, value)
}
func appendArgBytes(h []argsKV, key, value []byte) []argsKV {
return appendArg(h, b2s(key), b2s(value))
}
func appendArg(args []argsKV, key, value string) []argsKV {
var kv *argsKV
args, kv = allocArg(args)
kv.key = append(kv.key[:0], key...)
kv.value = append(kv.value[:0], value...)
return args
}
func allocArg(h []argsKV) ([]argsKV, *argsKV) {
n := len(h)
if cap(h) > n {
h = h[:n+1]
} else {
h = append(h, argsKV{})
}
return h, &h[n]
}
func releaseArg(h []argsKV) []argsKV {
return h[:len(h)-1]
}
func hasArg(h []argsKV, key string) bool {
for i, n := 0, len(h); i < n; i++ {
kv := &h[i]
if key == string(kv.key) {
return true
}
}
return false
}
func peekArgBytes(h []argsKV, k []byte) []byte {
for i, n := 0, len(h); i < n; i++ {
kv := &h[i]
if bytes.Equal(kv.key, k) {
return kv.value
}
}
return nil
}
func peekArgStr(h []argsKV, k string) []byte {
for i, n := 0, len(h); i < n; i++ {
kv := &h[i]
if string(kv.key) == k {
return kv.value
}
}
return nil
}
type argsScanner struct {
b []byte
}
func (s *argsScanner) next(kv *argsKV) bool {
if len(s.b) == 0 {
return false
}
isKey := true
k := 0
for i, c := range s.b {
switch c {
case '=':
if isKey {
isKey = false
kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
k = i + 1
}
case '&':
if isKey {
kv.key = decodeArgAppend(kv.key[:0], s.b[:i])
kv.value = kv.value[:0]
} else {
kv.value = decodeArgAppend(kv.value[:0], s.b[k:i])
}
s.b = s.b[i+1:]
return true
}
}
if isKey {
kv.key = decodeArgAppend(kv.key[:0], s.b)
kv.value = kv.value[:0]
} else {
kv.value = decodeArgAppend(kv.value[:0], s.b[k:])
}
s.b = s.b[len(s.b):]
return true
}
func decodeArgAppend(dst, src []byte) []byte {
if bytes.IndexByte(src, '%') < 0 && bytes.IndexByte(src, '+') < 0 {
// fast path: src doesn't contain encoded chars
return append(dst, src...)
}
// slow path
for i := 0; i < len(src); i++ {
c := src[i]
if c == '%' {
if i+2 >= len(src) {
return append(dst, src[i:]...)
}
x2 := hex2intTable[src[i+2]]
x1 := hex2intTable[src[i+1]]
if x1 == 16 || x2 == 16 {
dst = append(dst, '%')
} else {
dst = append(dst, x1<<4|x2)
i += 2
}
} else if c == '+' {
dst = append(dst, ' ')
} else {
dst = append(dst, c)
}
}
return dst
}
// decodeArgAppendNoPlus is almost identical to decodeArgAppend, but it doesn't
// substitute '+' with ' '.
//
// The function is copy-pasted from decodeArgAppend due to the performance
// reasons only.
func decodeArgAppendNoPlus(dst, src []byte) []byte {
if bytes.IndexByte(src, '%') < 0 {
// fast path: src doesn't contain encoded chars
return append(dst, src...)
}
// slow path
for i := 0; i < len(src); i++ {
c := src[i]
if c == '%' {
if i+2 >= len(src) {
return append(dst, src[i:]...)
}
x2 := hex2intTable[src[i+2]]
x1 := hex2intTable[src[i+1]]
if x1 == 16 || x2 == 16 {
dst = append(dst, '%')
} else {
dst = append(dst, x1<<4|x2)
i += 2
}
} else {
dst = append(dst, c)
}
}
return dst
}

456
vendor/github.com/valyala/fasthttp/bytesconv.go generated vendored Normal file
View File

@ -0,0 +1,456 @@
package fasthttp
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"math"
"net"
"reflect"
"strings"
"sync"
"time"
"unsafe"
)
// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst.
func AppendHTMLEscape(dst []byte, s string) []byte {
if strings.IndexByte(s, '<') < 0 &&
strings.IndexByte(s, '>') < 0 &&
strings.IndexByte(s, '"') < 0 &&
strings.IndexByte(s, '\'') < 0 {
// fast path - nothing to escape
return append(dst, s...)
}
// slow path
var prev int
var sub string
for i, n := 0, len(s); i < n; i++ {
sub = ""
switch s[i] {
case '<':
sub = "&lt;"
case '>':
sub = "&gt;"
case '"':
sub = "&quot;"
case '\'':
sub = "&#39;"
}
if len(sub) > 0 {
dst = append(dst, s[prev:i]...)
dst = append(dst, sub...)
prev = i + 1
}
}
return append(dst, s[prev:]...)
}
// AppendHTMLEscapeBytes appends html-escaped s to dst and returns
// the extended dst.
func AppendHTMLEscapeBytes(dst, s []byte) []byte {
return AppendHTMLEscape(dst, b2s(s))
}
// AppendIPv4 appends string representation of the given ip v4 to dst
// and returns the extended dst.
func AppendIPv4(dst []byte, ip net.IP) []byte {
ip = ip.To4()
if ip == nil {
return append(dst, "non-v4 ip passed to AppendIPv4"...)
}
dst = AppendUint(dst, int(ip[0]))
for i := 1; i < 4; i++ {
dst = append(dst, '.')
dst = AppendUint(dst, int(ip[i]))
}
return dst
}
var errEmptyIPStr = errors.New("empty ip address string")
// ParseIPv4 parses ip address from ipStr into dst and returns the extended dst.
func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) {
if len(ipStr) == 0 {
return dst, errEmptyIPStr
}
if len(dst) < net.IPv4len {
dst = make([]byte, net.IPv4len)
}
copy(dst, net.IPv4zero)
dst = dst.To4()
if dst == nil {
panic("BUG: dst must not be nil")
}
b := ipStr
for i := 0; i < 3; i++ {
n := bytes.IndexByte(b, '.')
if n < 0 {
return dst, fmt.Errorf("cannot find dot in ipStr %q", ipStr)
}
v, err := ParseUint(b[:n])
if err != nil {
return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err)
}
if v > 255 {
return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v)
}
dst[i] = byte(v)
b = b[n+1:]
}
v, err := ParseUint(b)
if err != nil {
return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err)
}
if v > 255 {
return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v)
}
dst[3] = byte(v)
return dst, nil
}
// AppendHTTPDate appends HTTP-compliant (RFC1123) representation of date
// to dst and returns the extended dst.
func AppendHTTPDate(dst []byte, date time.Time) []byte {
dst = date.In(time.UTC).AppendFormat(dst, time.RFC1123)
copy(dst[len(dst)-3:], strGMT)
return dst
}
// ParseHTTPDate parses HTTP-compliant (RFC1123) date.
func ParseHTTPDate(date []byte) (time.Time, error) {
return time.Parse(time.RFC1123, b2s(date))
}
// AppendUint appends n to dst and returns the extended dst.
func AppendUint(dst []byte, n int) []byte {
if n < 0 {
panic("BUG: int must be positive")
}
var b [20]byte
buf := b[:]
i := len(buf)
var q int
for n >= 10 {
i--
q = n / 10
buf[i] = '0' + byte(n-q*10)
n = q
}
i--
buf[i] = '0' + byte(n)
dst = append(dst, buf[i:]...)
return dst
}
// ParseUint parses uint from buf.
func ParseUint(buf []byte) (int, error) {
v, n, err := parseUintBuf(buf)
if n != len(buf) {
return -1, errUnexpectedTrailingChar
}
return v, err
}
var (
errEmptyInt = errors.New("empty integer")
errUnexpectedFirstChar = errors.New("unexpected first char found. Expecting 0-9")
errUnexpectedTrailingChar = errors.New("unexpected trailing char found. Expecting 0-9")
errTooLongInt = errors.New("too long int")
)
func parseUintBuf(b []byte) (int, int, error) {
n := len(b)
if n == 0 {
return -1, 0, errEmptyInt
}
v := 0
for i := 0; i < n; i++ {
c := b[i]
k := c - '0'
if k > 9 {
if i == 0 {
return -1, i, errUnexpectedFirstChar
}
return v, i, nil
}
if i >= maxIntChars {
return -1, i, errTooLongInt
}
v = 10*v + int(k)
}
return v, n, nil
}
var (
errEmptyFloat = errors.New("empty float number")
errDuplicateFloatPoint = errors.New("duplicate point found in float number")
errUnexpectedFloatEnd = errors.New("unexpected end of float number")
errInvalidFloatExponent = errors.New("invalid float number exponent")
errUnexpectedFloatChar = errors.New("unexpected char found in float number")
)
// ParseUfloat parses unsigned float from buf.
func ParseUfloat(buf []byte) (float64, error) {
if len(buf) == 0 {
return -1, errEmptyFloat
}
b := buf
var v uint64
var offset = 1.0
var pointFound bool
for i, c := range b {
if c < '0' || c > '9' {
if c == '.' {
if pointFound {
return -1, errDuplicateFloatPoint
}
pointFound = true
continue
}
if c == 'e' || c == 'E' {
if i+1 >= len(b) {
return -1, errUnexpectedFloatEnd
}
b = b[i+1:]
minus := -1
switch b[0] {
case '+':
b = b[1:]
minus = 1
case '-':
b = b[1:]
default:
minus = 1
}
vv, err := ParseUint(b)
if err != nil {
return -1, errInvalidFloatExponent
}
return float64(v) * offset * math.Pow10(minus*int(vv)), nil
}
return -1, errUnexpectedFloatChar
}
v = 10*v + uint64(c-'0')
if pointFound {
offset /= 10
}
}
return float64(v) * offset, nil
}
var (
errEmptyHexNum = errors.New("empty hex number")
errTooLargeHexNum = errors.New("too large hex number")
)
func readHexInt(r *bufio.Reader) (int, error) {
n := 0
i := 0
var k int
for {
c, err := r.ReadByte()
if err != nil {
if err == io.EOF && i > 0 {
return n, nil
}
return -1, err
}
k = int(hex2intTable[c])
if k == 16 {
if i == 0 {
return -1, errEmptyHexNum
}
r.UnreadByte()
return n, nil
}
if i >= maxHexIntChars {
return -1, errTooLargeHexNum
}
n = (n << 4) | k
i++
}
}
var hexIntBufPool sync.Pool
func writeHexInt(w *bufio.Writer, n int) error {
if n < 0 {
panic("BUG: int must be positive")
}
v := hexIntBufPool.Get()
if v == nil {
v = make([]byte, maxHexIntChars+1)
}
buf := v.([]byte)
i := len(buf) - 1
for {
buf[i] = int2hexbyte(n & 0xf)
n >>= 4
if n == 0 {
break
}
i--
}
_, err := w.Write(buf[i:])
hexIntBufPool.Put(v)
return err
}
func int2hexbyte(n int) byte {
if n < 10 {
return '0' + byte(n)
}
return 'a' + byte(n) - 10
}
func hexCharUpper(c byte) byte {
if c < 10 {
return '0' + c
}
return c - 10 + 'A'
}
var hex2intTable = func() []byte {
b := make([]byte, 256)
for i := 0; i < 256; i++ {
c := byte(16)
if i >= '0' && i <= '9' {
c = byte(i) - '0'
} else if i >= 'a' && i <= 'f' {
c = byte(i) - 'a' + 10
} else if i >= 'A' && i <= 'F' {
c = byte(i) - 'A' + 10
}
b[i] = c
}
return b
}()
const toLower = 'a' - 'A'
var toLowerTable = func() [256]byte {
var a [256]byte
for i := 0; i < 256; i++ {
c := byte(i)
if c >= 'A' && c <= 'Z' {
c += toLower
}
a[i] = c
}
return a
}()
var toUpperTable = func() [256]byte {
var a [256]byte
for i := 0; i < 256; i++ {
c := byte(i)
if c >= 'a' && c <= 'z' {
c -= toLower
}
a[i] = c
}
return a
}()
func lowercaseBytes(b []byte) {
for i := 0; i < len(b); i++ {
p := &b[i]
*p = toLowerTable[*p]
}
}
// b2s converts byte slice to a string without memory allocation.
// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ .
//
// Note it may break if string and/or slice header will change
// in the future go versions.
func b2s(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// s2b converts string to a byte slice without memory allocation.
//
// Note it may break if string and/or slice header will change
// in the future go versions.
func s2b(s string) []byte {
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
bh := reflect.SliceHeader{
Data: sh.Data,
Len: sh.Len,
Cap: sh.Len,
}
return *(*[]byte)(unsafe.Pointer(&bh))
}
// AppendUnquotedArg appends url-decoded src to dst and returns appended dst.
//
// dst may point to src. In this case src will be overwritten.
func AppendUnquotedArg(dst, src []byte) []byte {
return decodeArgAppend(dst, src)
}
// AppendQuotedArg appends url-encoded src to dst and returns appended dst.
func AppendQuotedArg(dst, src []byte) []byte {
for _, c := range src {
// See http://www.w3.org/TR/html5/forms.html#form-submission-algorithm
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
c == '*' || c == '-' || c == '.' || c == '_' {
dst = append(dst, c)
} else {
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
}
}
return dst
}
func appendQuotedPath(dst, src []byte) []byte {
for _, c := range src {
// From the spec: http://tools.ietf.org/html/rfc3986#section-3.3
// an path can contain zero or more of pchar that is defined as follows:
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
// pct-encoded = "%" HEXDIG HEXDIG
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
// / "*" / "+" / "," / ";" / "="
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
c == '-' || c == '.' || c == '_' || c == '~' || c == '!' || c == '$' ||
c == '&' || c == '\'' || c == '(' || c == ')' || c == '*' || c == '+' ||
c == ',' || c == ';' || c == '=' || c == ':' || c == '@' || c == '/' {
dst = append(dst, c)
} else {
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
}
}
return dst
}
// EqualBytesStr returns true if string(b) == s.
//
// This function has no performance benefits comparing to string(b) == s.
// It is left here for backwards compatibility only.
//
// Deprecated: may be deleted soon.
func EqualBytesStr(b []byte, s string) bool {
return string(b) == s
}
// AppendBytesStr appends src to dst and returns the extended dst.
//
// This function has no performance benefits comparing to append(dst, src...).
// It is left here for backwards compatibility only.
//
// Deprecated: may be deleted soon.
func AppendBytesStr(dst []byte, src string) []byte {
return append(dst, src...)
}

8
vendor/github.com/valyala/fasthttp/bytesconv_32.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +build !amd64,!arm64,!ppc64
package fasthttp
const (
maxIntChars = 9
maxHexIntChars = 7
)

8
vendor/github.com/valyala/fasthttp/bytesconv_64.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +build amd64 arm64 ppc64
package fasthttp
const (
maxIntChars = 18
maxHexIntChars = 15
)

2188
vendor/github.com/valyala/fasthttp/client.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

13
vendor/github.com/valyala/fasthttp/coarseTime.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package fasthttp
import (
"time"
)
// CoarseTimeNow returns the current time truncated to the nearest second.
//
// Deprecated: This is slower than calling time.Now() directly.
// This is now time.Now().Truncate(time.Second) shortcut.
func CoarseTimeNow() time.Time {
return time.Now().Truncate(time.Second)
}

438
vendor/github.com/valyala/fasthttp/compress.go generated vendored Normal file
View File

@ -0,0 +1,438 @@
package fasthttp
import (
"bytes"
"fmt"
"io"
"os"
"sync"
"github.com/klauspost/compress/flate"
"github.com/klauspost/compress/gzip"
"github.com/klauspost/compress/zlib"
"github.com/valyala/bytebufferpool"
"github.com/valyala/fasthttp/stackless"
)
// Supported compression levels.
const (
CompressNoCompression = flate.NoCompression
CompressBestSpeed = flate.BestSpeed
CompressBestCompression = flate.BestCompression
CompressDefaultCompression = 6 // flate.DefaultCompression
CompressHuffmanOnly = -2 // flate.HuffmanOnly
)
func acquireGzipReader(r io.Reader) (*gzip.Reader, error) {
v := gzipReaderPool.Get()
if v == nil {
return gzip.NewReader(r)
}
zr := v.(*gzip.Reader)
if err := zr.Reset(r); err != nil {
return nil, err
}
return zr, nil
}
func releaseGzipReader(zr *gzip.Reader) {
zr.Close()
gzipReaderPool.Put(zr)
}
var gzipReaderPool sync.Pool
func acquireFlateReader(r io.Reader) (io.ReadCloser, error) {
v := flateReaderPool.Get()
if v == nil {
zr, err := zlib.NewReader(r)
if err != nil {
return nil, err
}
return zr, nil
}
zr := v.(io.ReadCloser)
if err := resetFlateReader(zr, r); err != nil {
return nil, err
}
return zr, nil
}
func releaseFlateReader(zr io.ReadCloser) {
zr.Close()
flateReaderPool.Put(zr)
}
func resetFlateReader(zr io.ReadCloser, r io.Reader) error {
zrr, ok := zr.(zlib.Resetter)
if !ok {
panic("BUG: zlib.Reader doesn't implement zlib.Resetter???")
}
return zrr.Reset(r, nil)
}
var flateReaderPool sync.Pool
func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer {
nLevel := normalizeCompressLevel(level)
p := stacklessGzipWriterPoolMap[nLevel]
v := p.Get()
if v == nil {
return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
return acquireRealGzipWriter(w, level)
})
}
sw := v.(stackless.Writer)
sw.Reset(w)
return sw
}
func releaseStacklessGzipWriter(sw stackless.Writer, level int) {
sw.Close()
nLevel := normalizeCompressLevel(level)
p := stacklessGzipWriterPoolMap[nLevel]
p.Put(sw)
}
func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer {
nLevel := normalizeCompressLevel(level)
p := realGzipWriterPoolMap[nLevel]
v := p.Get()
if v == nil {
zw, err := gzip.NewWriterLevel(w, level)
if err != nil {
panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err))
}
return zw
}
zw := v.(*gzip.Writer)
zw.Reset(w)
return zw
}
func releaseRealGzipWriter(zw *gzip.Writer, level int) {
zw.Close()
nLevel := normalizeCompressLevel(level)
p := realGzipWriterPoolMap[nLevel]
p.Put(zw)
}
var (
stacklessGzipWriterPoolMap = newCompressWriterPoolMap()
realGzipWriterPoolMap = newCompressWriterPoolMap()
)
// AppendGzipBytesLevel appends gzipped src to dst using the given
// compression level and returns the resulting dst.
//
// Supported compression levels are:
//
// * CompressNoCompression
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func AppendGzipBytesLevel(dst, src []byte, level int) []byte {
w := &byteSliceWriter{dst}
WriteGzipLevel(w, src, level)
return w.b
}
// WriteGzipLevel writes gzipped p to w using the given compression level
// and returns the number of compressed bytes written to w.
//
// Supported compression levels are:
//
// * CompressNoCompression
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
switch w.(type) {
case *byteSliceWriter,
*bytes.Buffer,
*bytebufferpool.ByteBuffer:
// These writers don't block, so we can just use stacklessWriteGzip
ctx := &compressCtx{
w: w,
p: p,
level: level,
}
stacklessWriteGzip(ctx)
return len(p), nil
default:
zw := acquireStacklessGzipWriter(w, level)
n, err := zw.Write(p)
releaseStacklessGzipWriter(zw, level)
return n, err
}
}
var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip)
func nonblockingWriteGzip(ctxv interface{}) {
ctx := ctxv.(*compressCtx)
zw := acquireRealGzipWriter(ctx.w, ctx.level)
_, err := zw.Write(ctx.p)
if err != nil {
panic(fmt.Sprintf("BUG: gzip.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
}
releaseRealGzipWriter(zw, ctx.level)
}
// WriteGzip writes gzipped p to w and returns the number of compressed
// bytes written to w.
func WriteGzip(w io.Writer, p []byte) (int, error) {
return WriteGzipLevel(w, p, CompressDefaultCompression)
}
// AppendGzipBytes appends gzipped src to dst and returns the resulting dst.
func AppendGzipBytes(dst, src []byte) []byte {
return AppendGzipBytesLevel(dst, src, CompressDefaultCompression)
}
// WriteGunzip writes ungzipped p to w and returns the number of uncompressed
// bytes written to w.
func WriteGunzip(w io.Writer, p []byte) (int, error) {
r := &byteSliceReader{p}
zr, err := acquireGzipReader(r)
if err != nil {
return 0, err
}
n, err := copyZeroAlloc(w, zr)
releaseGzipReader(zr)
nn := int(n)
if int64(nn) != n {
return 0, fmt.Errorf("too much data gunzipped: %d", n)
}
return nn, err
}
// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst.
func AppendGunzipBytes(dst, src []byte) ([]byte, error) {
w := &byteSliceWriter{dst}
_, err := WriteGunzip(w, src)
return w.b, err
}
// AppendDeflateBytesLevel appends deflated src to dst using the given
// compression level and returns the resulting dst.
//
// Supported compression levels are:
//
// * CompressNoCompression
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func AppendDeflateBytesLevel(dst, src []byte, level int) []byte {
w := &byteSliceWriter{dst}
WriteDeflateLevel(w, src, level)
return w.b
}
// WriteDeflateLevel writes deflated p to w using the given compression level
// and returns the number of compressed bytes written to w.
//
// Supported compression levels are:
//
// * CompressNoCompression
// * CompressBestSpeed
// * CompressBestCompression
// * CompressDefaultCompression
// * CompressHuffmanOnly
func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) {
switch w.(type) {
case *byteSliceWriter,
*bytes.Buffer,
*bytebufferpool.ByteBuffer:
// These writers don't block, so we can just use stacklessWriteDeflate
ctx := &compressCtx{
w: w,
p: p,
level: level,
}
stacklessWriteDeflate(ctx)
return len(p), nil
default:
zw := acquireStacklessDeflateWriter(w, level)
n, err := zw.Write(p)
releaseStacklessDeflateWriter(zw, level)
return n, err
}
}
var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate)
func nonblockingWriteDeflate(ctxv interface{}) {
ctx := ctxv.(*compressCtx)
zw := acquireRealDeflateWriter(ctx.w, ctx.level)
_, err := zw.Write(ctx.p)
if err != nil {
panic(fmt.Sprintf("BUG: zlib.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err))
}
releaseRealDeflateWriter(zw, ctx.level)
}
type compressCtx struct {
w io.Writer
p []byte
level int
}
// WriteDeflate writes deflated p to w and returns the number of compressed
// bytes written to w.
func WriteDeflate(w io.Writer, p []byte) (int, error) {
return WriteDeflateLevel(w, p, CompressDefaultCompression)
}
// AppendDeflateBytes appends deflated src to dst and returns the resulting dst.
func AppendDeflateBytes(dst, src []byte) []byte {
return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression)
}
// WriteInflate writes inflated p to w and returns the number of uncompressed
// bytes written to w.
func WriteInflate(w io.Writer, p []byte) (int, error) {
r := &byteSliceReader{p}
zr, err := acquireFlateReader(r)
if err != nil {
return 0, err
}
n, err := copyZeroAlloc(w, zr)
releaseFlateReader(zr)
nn := int(n)
if int64(nn) != n {
return 0, fmt.Errorf("too much data inflated: %d", n)
}
return nn, err
}
// AppendInflateBytes appends inflated src to dst and returns the resulting dst.
func AppendInflateBytes(dst, src []byte) ([]byte, error) {
w := &byteSliceWriter{dst}
_, err := WriteInflate(w, src)
return w.b, err
}
type byteSliceWriter struct {
b []byte
}
func (w *byteSliceWriter) Write(p []byte) (int, error) {
w.b = append(w.b, p...)
return len(p), nil
}
type byteSliceReader struct {
b []byte
}
func (r *byteSliceReader) Read(p []byte) (int, error) {
if len(r.b) == 0 {
return 0, io.EOF
}
n := copy(p, r.b)
r.b = r.b[n:]
return n, nil
}
func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer {
nLevel := normalizeCompressLevel(level)
p := stacklessDeflateWriterPoolMap[nLevel]
v := p.Get()
if v == nil {
return stackless.NewWriter(w, func(w io.Writer) stackless.Writer {
return acquireRealDeflateWriter(w, level)
})
}
sw := v.(stackless.Writer)
sw.Reset(w)
return sw
}
func releaseStacklessDeflateWriter(sw stackless.Writer, level int) {
sw.Close()
nLevel := normalizeCompressLevel(level)
p := stacklessDeflateWriterPoolMap[nLevel]
p.Put(sw)
}
func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer {
nLevel := normalizeCompressLevel(level)
p := realDeflateWriterPoolMap[nLevel]
v := p.Get()
if v == nil {
zw, err := zlib.NewWriterLevel(w, level)
if err != nil {
panic(fmt.Sprintf("BUG: unexpected error from zlib.NewWriterLevel(%d): %s", level, err))
}
return zw
}
zw := v.(*zlib.Writer)
zw.Reset(w)
return zw
}
func releaseRealDeflateWriter(zw *zlib.Writer, level int) {
zw.Close()
nLevel := normalizeCompressLevel(level)
p := realDeflateWriterPoolMap[nLevel]
p.Put(zw)
}
var (
stacklessDeflateWriterPoolMap = newCompressWriterPoolMap()
realDeflateWriterPoolMap = newCompressWriterPoolMap()
)
func newCompressWriterPoolMap() []*sync.Pool {
// Initialize pools for all the compression levels defined
// in https://golang.org/pkg/compress/flate/#pkg-constants .
// Compression levels are normalized with normalizeCompressLevel,
// so the fit [0..11].
var m []*sync.Pool
for i := 0; i < 12; i++ {
m = append(m, &sync.Pool{})
}
return m
}
func isFileCompressible(f *os.File, minCompressRatio float64) bool {
// Try compressing the first 4kb of of the file
// and see if it can be compressed by more than
// the given minCompressRatio.
b := bytebufferpool.Get()
zw := acquireStacklessGzipWriter(b, CompressDefaultCompression)
lr := &io.LimitedReader{
R: f,
N: 4096,
}
_, err := copyZeroAlloc(zw, lr)
releaseStacklessGzipWriter(zw, CompressDefaultCompression)
f.Seek(0, 0)
if err != nil {
return false
}
n := 4096 - lr.N
zn := len(b.B)
bytebufferpool.Put(b)
return float64(zn) < float64(n)*minCompressRatio
}
// normalizes compression level into [0..11], so it could be used as an index
// in *PoolMap.
func normalizeCompressLevel(level int) int {
// -2 is the lowest compression level - CompressHuffmanOnly
// 9 is the highest compression level - CompressBestCompression
if level < -2 || level > 9 {
level = CompressDefaultCompression
}
return level + 2
}

474
vendor/github.com/valyala/fasthttp/cookie.go generated vendored Normal file
View File

@ -0,0 +1,474 @@
package fasthttp
import (
"bytes"
"errors"
"io"
"sync"
"time"
)
var zeroTime time.Time
var (
// CookieExpireDelete may be set on Cookie.Expire for expiring the given cookie.
CookieExpireDelete = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
// CookieExpireUnlimited indicates that the cookie doesn't expire.
CookieExpireUnlimited = zeroTime
)
// AcquireCookie returns an empty Cookie object from the pool.
//
// The returned object may be returned back to the pool with ReleaseCookie.
// This allows reducing GC load.
func AcquireCookie() *Cookie {
return cookiePool.Get().(*Cookie)
}
// ReleaseCookie returns the Cookie object acquired with AcquireCookie back
// to the pool.
//
// Do not access released Cookie object, otherwise data races may occur.
func ReleaseCookie(c *Cookie) {
c.Reset()
cookiePool.Put(c)
}
var cookiePool = &sync.Pool{
New: func() interface{} {
return &Cookie{}
},
}
// Cookie represents HTTP response cookie.
//
// Do not copy Cookie objects. Create new object and use CopyTo instead.
//
// Cookie instance MUST NOT be used from concurrently running goroutines.
type Cookie struct {
noCopy noCopy
key []byte
value []byte
expire time.Time
maxAge int
domain []byte
path []byte
httpOnly bool
secure bool
bufKV argsKV
buf []byte
}
// CopyTo copies src cookie to c.
func (c *Cookie) CopyTo(src *Cookie) {
c.Reset()
c.key = append(c.key[:0], src.key...)
c.value = append(c.value[:0], src.value...)
c.expire = src.expire
c.maxAge = src.maxAge
c.domain = append(c.domain[:0], src.domain...)
c.path = append(c.path[:0], src.path...)
c.httpOnly = src.httpOnly
c.secure = src.secure
}
// HTTPOnly returns true if the cookie is http only.
func (c *Cookie) HTTPOnly() bool {
return c.httpOnly
}
// SetHTTPOnly sets cookie's httpOnly flag to the given value.
func (c *Cookie) SetHTTPOnly(httpOnly bool) {
c.httpOnly = httpOnly
}
// Secure returns true if the cookie is secure.
func (c *Cookie) Secure() bool {
return c.secure
}
// SetSecure sets cookie's secure flag to the given value.
func (c *Cookie) SetSecure(secure bool) {
c.secure = secure
}
// Path returns cookie path.
func (c *Cookie) Path() []byte {
return c.path
}
// SetPath sets cookie path.
func (c *Cookie) SetPath(path string) {
c.buf = append(c.buf[:0], path...)
c.path = normalizePath(c.path, c.buf)
}
// SetPathBytes sets cookie path.
func (c *Cookie) SetPathBytes(path []byte) {
c.buf = append(c.buf[:0], path...)
c.path = normalizePath(c.path, c.buf)
}
// Domain returns cookie domain.
//
// The returned domain is valid until the next Cookie modification method call.
func (c *Cookie) Domain() []byte {
return c.domain
}
// SetDomain sets cookie domain.
func (c *Cookie) SetDomain(domain string) {
c.domain = append(c.domain[:0], domain...)
}
// SetDomainBytes sets cookie domain.
func (c *Cookie) SetDomainBytes(domain []byte) {
c.domain = append(c.domain[:0], domain...)
}
// MaxAge returns the seconds until the cookie is meant to expire or 0
// if no max age.
func (c *Cookie) MaxAge() int {
return c.maxAge
}
// SetMaxAge sets cookie expiration time based on seconds. This takes precedence
// over any absolute expiry set on the cookie
//
// Set max age to 0 to unset
func (c *Cookie) SetMaxAge(seconds int) {
c.maxAge = seconds
}
// Expire returns cookie expiration time.
//
// CookieExpireUnlimited is returned if cookie doesn't expire
func (c *Cookie) Expire() time.Time {
expire := c.expire
if expire.IsZero() {
expire = CookieExpireUnlimited
}
return expire
}
// SetExpire sets cookie expiration time.
//
// Set expiration time to CookieExpireDelete for expiring (deleting)
// the cookie on the client.
//
// By default cookie lifetime is limited by browser session.
func (c *Cookie) SetExpire(expire time.Time) {
c.expire = expire
}
// Value returns cookie value.
//
// The returned value is valid until the next Cookie modification method call.
func (c *Cookie) Value() []byte {
return c.value
}
// SetValue sets cookie value.
func (c *Cookie) SetValue(value string) {
c.value = append(c.value[:0], value...)
}
// SetValueBytes sets cookie value.
func (c *Cookie) SetValueBytes(value []byte) {
c.value = append(c.value[:0], value...)
}
// Key returns cookie name.
//
// The returned value is valid until the next Cookie modification method call.
func (c *Cookie) Key() []byte {
return c.key
}
// SetKey sets cookie name.
func (c *Cookie) SetKey(key string) {
c.key = append(c.key[:0], key...)
}
// SetKeyBytes sets cookie name.
func (c *Cookie) SetKeyBytes(key []byte) {
c.key = append(c.key[:0], key...)
}
// Reset clears the cookie.
func (c *Cookie) Reset() {
c.key = c.key[:0]
c.value = c.value[:0]
c.expire = zeroTime
c.maxAge = 0
c.domain = c.domain[:0]
c.path = c.path[:0]
c.httpOnly = false
c.secure = false
}
// AppendBytes appends cookie representation to dst and returns
// the extended dst.
func (c *Cookie) AppendBytes(dst []byte) []byte {
if len(c.key) > 0 {
dst = append(dst, c.key...)
dst = append(dst, '=')
}
dst = append(dst, c.value...)
if c.maxAge > 0 {
dst = append(dst, ';', ' ')
dst = append(dst, strCookieMaxAge...)
dst = append(dst, '=')
dst = AppendUint(dst, c.maxAge)
} else if !c.expire.IsZero() {
c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire)
dst = append(dst, ';', ' ')
dst = append(dst, strCookieExpires...)
dst = append(dst, '=')
dst = append(dst, c.bufKV.value...)
}
if len(c.domain) > 0 {
dst = appendCookiePart(dst, strCookieDomain, c.domain)
}
if len(c.path) > 0 {
dst = appendCookiePart(dst, strCookiePath, c.path)
}
if c.httpOnly {
dst = append(dst, ';', ' ')
dst = append(dst, strCookieHTTPOnly...)
}
if c.secure {
dst = append(dst, ';', ' ')
dst = append(dst, strCookieSecure...)
}
return dst
}
// Cookie returns cookie representation.
//
// The returned value is valid until the next call to Cookie methods.
func (c *Cookie) Cookie() []byte {
c.buf = c.AppendBytes(c.buf[:0])
return c.buf
}
// String returns cookie representation.
func (c *Cookie) String() string {
return string(c.Cookie())
}
// WriteTo writes cookie representation to w.
//
// WriteTo implements io.WriterTo interface.
func (c *Cookie) WriteTo(w io.Writer) (int64, error) {
n, err := w.Write(c.Cookie())
return int64(n), err
}
var errNoCookies = errors.New("no cookies found")
// Parse parses Set-Cookie header.
func (c *Cookie) Parse(src string) error {
c.buf = append(c.buf[:0], src...)
return c.ParseBytes(c.buf)
}
// ParseBytes parses Set-Cookie header.
func (c *Cookie) ParseBytes(src []byte) error {
c.Reset()
var s cookieScanner
s.b = src
kv := &c.bufKV
if !s.next(kv) {
return errNoCookies
}
c.key = append(c.key[:0], kv.key...)
c.value = append(c.value[:0], kv.value...)
for s.next(kv) {
if len(kv.key) != 0 {
// Case insensitive switch on first char
switch kv.key[0] | 0x20 {
case 'm':
if caseInsensitiveCompare(strCookieMaxAge, kv.key) {
maxAge, err := ParseUint(kv.value)
if err != nil {
return err
}
c.maxAge = maxAge
}
case 'e': // "expires"
if caseInsensitiveCompare(strCookieExpires, kv.key) {
v := b2s(kv.value)
// Try the same two formats as net/http
// See: https://github.com/golang/go/blob/00379be17e63a5b75b3237819392d2dc3b313a27/src/net/http/cookie.go#L133-L135
exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC)
if err != nil {
exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", v)
if err != nil {
return err
}
}
c.expire = exptime
}
case 'd': // "domain"
if caseInsensitiveCompare(strCookieDomain, kv.key) {
c.domain = append(c.domain[:0], kv.value...)
}
case 'p': // "path"
if caseInsensitiveCompare(strCookiePath, kv.key) {
c.path = append(c.path[:0], kv.value...)
}
}
} else if len(kv.value) != 0 {
// Case insensitive switch on first char
switch kv.value[0] | 0x20 {
case 'h': // "httponly"
if caseInsensitiveCompare(strCookieHTTPOnly, kv.value) {
c.httpOnly = true
}
case 's': // "secure"
if caseInsensitiveCompare(strCookieSecure, kv.value) {
c.secure = true
}
}
} // else empty or no match
}
return nil
}
func appendCookiePart(dst, key, value []byte) []byte {
dst = append(dst, ';', ' ')
dst = append(dst, key...)
dst = append(dst, '=')
return append(dst, value...)
}
func getCookieKey(dst, src []byte) []byte {
n := bytes.IndexByte(src, '=')
if n >= 0 {
src = src[:n]
}
return decodeCookieArg(dst, src, false)
}
func appendRequestCookieBytes(dst []byte, cookies []argsKV) []byte {
for i, n := 0, len(cookies); i < n; i++ {
kv := &cookies[i]
if len(kv.key) > 0 {
dst = append(dst, kv.key...)
dst = append(dst, '=')
}
dst = append(dst, kv.value...)
if i+1 < n {
dst = append(dst, ';', ' ')
}
}
return dst
}
// For Response we can not use the above function as response cookies
// already contain the key= in the value.
func appendResponseCookieBytes(dst []byte, cookies []argsKV) []byte {
for i, n := 0, len(cookies); i < n; i++ {
kv := &cookies[i]
dst = append(dst, kv.value...)
if i+1 < n {
dst = append(dst, ';', ' ')
}
}
return dst
}
func parseRequestCookies(cookies []argsKV, src []byte) []argsKV {
var s cookieScanner
s.b = src
var kv *argsKV
cookies, kv = allocArg(cookies)
for s.next(kv) {
if len(kv.key) > 0 || len(kv.value) > 0 {
cookies, kv = allocArg(cookies)
}
}
return releaseArg(cookies)
}
type cookieScanner struct {
b []byte
}
func (s *cookieScanner) next(kv *argsKV) bool {
b := s.b
if len(b) == 0 {
return false
}
isKey := true
k := 0
for i, c := range b {
switch c {
case '=':
if isKey {
isKey = false
kv.key = decodeCookieArg(kv.key, b[:i], false)
k = i + 1
}
case ';':
if isKey {
kv.key = kv.key[:0]
}
kv.value = decodeCookieArg(kv.value, b[k:i], true)
s.b = b[i+1:]
return true
}
}
if isKey {
kv.key = kv.key[:0]
}
kv.value = decodeCookieArg(kv.value, b[k:], true)
s.b = b[len(b):]
return true
}
func decodeCookieArg(dst, src []byte, skipQuotes bool) []byte {
for len(src) > 0 && src[0] == ' ' {
src = src[1:]
}
for len(src) > 0 && src[len(src)-1] == ' ' {
src = src[:len(src)-1]
}
if skipQuotes {
if len(src) > 1 && src[0] == '"' && src[len(src)-1] == '"' {
src = src[1 : len(src)-1]
}
}
return append(dst[:0], src...)
}
// caseInsensitiveCompare does a case insensitive equality comparison of
// two []byte. Assumes only letters need to be matched.
func caseInsensitiveCompare(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i]|0x20 != b[i]|0x20 {
return false
}
}
return true
}

37
vendor/github.com/valyala/fasthttp/doc.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
/*
Package fasthttp provides fast HTTP server and client API.
Fasthttp provides the following features:
* Optimized for speed. Easily handles more than 100K qps and more than 1M
concurrent keep-alive connections on modern hardware.
* Optimized for low memory usage.
* Easy 'Connection: Upgrade' support via RequestCtx.Hijack.
* Server provides the following anti-DoS limits:
* The number of concurrent connections.
* The number of concurrent connections per client IP.
* The number of requests per connection.
* Request read timeout.
* Response write timeout.
* Maximum request header size.
* Maximum request body size.
* Maximum request execution time.
* Maximum keep-alive connection lifetime.
* Early filtering out non-GET requests.
* A lot of additional useful info is exposed to request handler:
* Server and client address.
* Per-request logger.
* Unique request id.
* Request start time.
* Connection start time.
* Request sequence number for the current connection.
* Client supports automatic retry on idempotent requests' failure.
* Fasthttp API is designed with the ability to extend existing client
and server implementations or to write custom client and server
implementations from scratch.
*/
package fasthttp

View File

@ -0,0 +1,2 @@
// Package fasthttputil provides utility functions for fasthttp.
package fasthttputil

View File

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIBpQbZ6a5jL1Yh4wdP6yZk4MKjYWArD/QOLENFw8vbELoAoGCCqGSM49
AwEHoUQDQgAEKQCZWgE2IBhb47ot8MIs1D4KSisHYlZ41IWyeutpjb0fjwwIhimh
pl1Qld1/d2j3Z3vVyfa5yD+ncV7qCFZuSg==
-----END EC PRIVATE KEY-----

View File

@ -0,0 +1,10 @@
-----BEGIN CERTIFICATE-----
MIIBbTCCAROgAwIBAgIQPo718S+K+G7hc1SgTEU4QDAKBggqhkjOPQQDAjASMRAw
DgYDVQQKEwdBY21lIENvMB4XDTE3MDQyMDIxMDExNFoXDTE4MDQyMDIxMDExNFow
EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCkA
mVoBNiAYW+O6LfDCLNQ+CkorB2JWeNSFsnrraY29H48MCIYpoaZdUJXdf3do92d7
1cn2ucg/p3Fe6ghWbkqjSzBJMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggr
BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq
hkjOPQQDAgNIADBFAiEAoLAIQkvSuIcHUqyWroA6yWYw2fznlRH/uO9/hMCxUCEC
IClRYb/5O9eD/Eq/ozPnwNpsQHOeYefEhadJ/P82y0lG
-----END CERTIFICATE-----

View File

@ -0,0 +1,84 @@
package fasthttputil
import (
"fmt"
"net"
"sync"
)
// InmemoryListener provides in-memory dialer<->net.Listener implementation.
//
// It may be used either for fast in-process client<->server communications
// without network stack overhead or for client<->server tests.
type InmemoryListener struct {
lock sync.Mutex
closed bool
conns chan net.Conn
}
// NewInmemoryListener returns new in-memory dialer<->net.Listener.
func NewInmemoryListener() *InmemoryListener {
return &InmemoryListener{
conns: make(chan net.Conn, 1024),
}
}
// Accept implements net.Listener's Accept.
//
// It is safe calling Accept from concurrently running goroutines.
//
// Accept returns new connection per each Dial call.
func (ln *InmemoryListener) Accept() (net.Conn, error) {
c, ok := <-ln.conns
if !ok {
return nil, fmt.Errorf("InmemoryListener is already closed: use of closed network connection")
}
return c, nil
}
// Close implements net.Listener's Close.
func (ln *InmemoryListener) Close() error {
var err error
ln.lock.Lock()
if !ln.closed {
close(ln.conns)
ln.closed = true
} else {
err = fmt.Errorf("InmemoryListener is already closed")
}
ln.lock.Unlock()
return err
}
// Addr implements net.Listener's Addr.
func (ln *InmemoryListener) Addr() net.Addr {
return &net.UnixAddr{
Name: "InmemoryListener",
Net: "memory",
}
}
// Dial creates new client<->server connection, enqueues server side
// of the connection to Accept and returns client side of the connection.
//
// It is safe calling Dial from concurrently running goroutines.
func (ln *InmemoryListener) Dial() (net.Conn, error) {
pc := NewPipeConns()
cConn := pc.Conn1()
sConn := pc.Conn2()
ln.lock.Lock()
if !ln.closed {
ln.conns <- sConn
} else {
sConn.Close()
cConn.Close()
cConn = nil
}
ln.lock.Unlock()
if cConn == nil {
return nil, fmt.Errorf("InmemoryListener is already closed")
}
return cConn, nil
}

View File

@ -0,0 +1,283 @@
package fasthttputil
import (
"errors"
"io"
"net"
"sync"
"time"
)
// NewPipeConns returns new bi-directional connection pipe.
func NewPipeConns() *PipeConns {
ch1 := make(chan *byteBuffer, 4)
ch2 := make(chan *byteBuffer, 4)
pc := &PipeConns{
stopCh: make(chan struct{}),
}
pc.c1.rCh = ch1
pc.c1.wCh = ch2
pc.c2.rCh = ch2
pc.c2.wCh = ch1
pc.c1.pc = pc
pc.c2.pc = pc
return pc
}
// PipeConns provides bi-directional connection pipe,
// which use in-process memory as a transport.
//
// PipeConns must be created by calling NewPipeConns.
//
// PipeConns has the following additional features comparing to connections
// returned from net.Pipe():
//
// * It is faster.
// * It buffers Write calls, so there is no need to have concurrent goroutine
// calling Read in order to unblock each Write call.
// * It supports read and write deadlines.
//
type PipeConns struct {
c1 pipeConn
c2 pipeConn
stopCh chan struct{}
stopChLock sync.Mutex
}
// Conn1 returns the first end of bi-directional pipe.
//
// Data written to Conn1 may be read from Conn2.
// Data written to Conn2 may be read from Conn1.
func (pc *PipeConns) Conn1() net.Conn {
return &pc.c1
}
// Conn2 returns the second end of bi-directional pipe.
//
// Data written to Conn2 may be read from Conn1.
// Data written to Conn1 may be read from Conn2.
func (pc *PipeConns) Conn2() net.Conn {
return &pc.c2
}
// Close closes pipe connections.
func (pc *PipeConns) Close() error {
pc.stopChLock.Lock()
select {
case <-pc.stopCh:
default:
close(pc.stopCh)
}
pc.stopChLock.Unlock()
return nil
}
type pipeConn struct {
b *byteBuffer
bb []byte
rCh chan *byteBuffer
wCh chan *byteBuffer
pc *PipeConns
readDeadlineTimer *time.Timer
writeDeadlineTimer *time.Timer
readDeadlineCh <-chan time.Time
writeDeadlineCh <-chan time.Time
}
func (c *pipeConn) Write(p []byte) (int, error) {
b := acquireByteBuffer()
b.b = append(b.b[:0], p...)
select {
case <-c.pc.stopCh:
releaseByteBuffer(b)
return 0, errConnectionClosed
default:
}
select {
case c.wCh <- b:
default:
select {
case c.wCh <- b:
case <-c.writeDeadlineCh:
c.writeDeadlineCh = closedDeadlineCh
return 0, ErrTimeout
case <-c.pc.stopCh:
releaseByteBuffer(b)
return 0, errConnectionClosed
}
}
return len(p), nil
}
func (c *pipeConn) Read(p []byte) (int, error) {
mayBlock := true
nn := 0
for len(p) > 0 {
n, err := c.read(p, mayBlock)
nn += n
if err != nil {
if !mayBlock && err == errWouldBlock {
err = nil
}
return nn, err
}
p = p[n:]
mayBlock = false
}
return nn, nil
}
func (c *pipeConn) read(p []byte, mayBlock bool) (int, error) {
if len(c.bb) == 0 {
if err := c.readNextByteBuffer(mayBlock); err != nil {
return 0, err
}
}
n := copy(p, c.bb)
c.bb = c.bb[n:]
return n, nil
}
func (c *pipeConn) readNextByteBuffer(mayBlock bool) error {
releaseByteBuffer(c.b)
c.b = nil
select {
case c.b = <-c.rCh:
default:
if !mayBlock {
return errWouldBlock
}
select {
case c.b = <-c.rCh:
case <-c.readDeadlineCh:
c.readDeadlineCh = closedDeadlineCh
// rCh may contain data when deadline is reached.
// Read the data before returning ErrTimeout.
select {
case c.b = <-c.rCh:
default:
return ErrTimeout
}
case <-c.pc.stopCh:
// rCh may contain data when stopCh is closed.
// Read the data before returning EOF.
select {
case c.b = <-c.rCh:
default:
return io.EOF
}
}
}
c.bb = c.b.b
return nil
}
var (
errWouldBlock = errors.New("would block")
errConnectionClosed = errors.New("connection closed")
// ErrTimeout is returned from Read() or Write() on timeout.
ErrTimeout = errors.New("timeout")
)
func (c *pipeConn) Close() error {
return c.pc.Close()
}
func (c *pipeConn) LocalAddr() net.Addr {
return pipeAddr(0)
}
func (c *pipeConn) RemoteAddr() net.Addr {
return pipeAddr(0)
}
func (c *pipeConn) SetDeadline(deadline time.Time) error {
c.SetReadDeadline(deadline)
c.SetWriteDeadline(deadline)
return nil
}
func (c *pipeConn) SetReadDeadline(deadline time.Time) error {
if c.readDeadlineTimer == nil {
c.readDeadlineTimer = time.NewTimer(time.Hour)
}
c.readDeadlineCh = updateTimer(c.readDeadlineTimer, deadline)
return nil
}
func (c *pipeConn) SetWriteDeadline(deadline time.Time) error {
if c.writeDeadlineTimer == nil {
c.writeDeadlineTimer = time.NewTimer(time.Hour)
}
c.writeDeadlineCh = updateTimer(c.writeDeadlineTimer, deadline)
return nil
}
func updateTimer(t *time.Timer, deadline time.Time) <-chan time.Time {
if !t.Stop() {
select {
case <-t.C:
default:
}
}
if deadline.IsZero() {
return nil
}
d := -time.Since(deadline)
if d <= 0 {
return closedDeadlineCh
}
t.Reset(d)
return t.C
}
var closedDeadlineCh = func() <-chan time.Time {
ch := make(chan time.Time)
close(ch)
return ch
}()
type pipeAddr int
func (pipeAddr) Network() string {
return "pipe"
}
func (pipeAddr) String() string {
return "pipe"
}
type byteBuffer struct {
b []byte
}
func acquireByteBuffer() *byteBuffer {
return byteBufferPool.Get().(*byteBuffer)
}
func releaseByteBuffer(b *byteBuffer) {
if b != nil {
byteBufferPool.Put(b)
}
}
var byteBufferPool = &sync.Pool{
New: func() interface{} {
return &byteBuffer{
b: make([]byte, 1024),
}
},
}

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG
3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U
wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0
FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf
IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg
GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF
sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2
sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D
uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb
K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3
YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+
DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk
B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV
Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x
IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY
wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj
wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D
FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m
tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX
fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU
ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk
K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT
6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt
9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN
Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV
c257YgaWmjK9uB0Y2r2VxS0G
-----END PRIVATE KEY-----

View File

@ -0,0 +1,17 @@
-----BEGIN CERTIFICATE-----
MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV
BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV
MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D
K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te
+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij
L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1
xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY
6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG
SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98
L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2
45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li
K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6
X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI
whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd
-----END CERTIFICATE-----

1271
vendor/github.com/valyala/fasthttp/fs.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

9
vendor/github.com/valyala/fasthttp/go.mod generated vendored Normal file
View File

@ -0,0 +1,9 @@
module github.com/valyala/fasthttp
require (
github.com/klauspost/compress v1.4.0
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e // indirect
github.com/valyala/bytebufferpool v1.0.0
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3
)

10
vendor/github.com/valyala/fasthttp/go.sum generated vendored Normal file
View File

@ -0,0 +1,10 @@
github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3 h1:czFLhve3vsQetD6JOJ8NZZvGQIXlnN3/yXxbT6/awxI=
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=

2147
vendor/github.com/valyala/fasthttp/header.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1720
vendor/github.com/valyala/fasthttp/http.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

183
vendor/github.com/valyala/fasthttp/lbclient.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
package fasthttp
import (
"sync"
"sync/atomic"
"time"
)
// BalancingClient is the interface for clients, which may be passed
// to LBClient.Clients.
type BalancingClient interface {
DoDeadline(req *Request, resp *Response, deadline time.Time) error
PendingRequests() int
}
// LBClient balances requests among available LBClient.Clients.
//
// It has the following features:
//
// - Balances load among available clients using 'least loaded' + 'round robin'
// hybrid technique.
// - Dynamically decreases load on unhealthy clients.
//
// It is forbidden copying LBClient instances. Create new instances instead.
//
// It is safe calling LBClient methods from concurrently running goroutines.
type LBClient struct {
noCopy noCopy
// Clients must contain non-zero clients list.
// Incoming requests are balanced among these clients.
Clients []BalancingClient
// HealthCheck is a callback called after each request.
//
// The request, response and the error returned by the client
// is passed to HealthCheck, so the callback may determine whether
// the client is healthy.
//
// Load on the current client is decreased if HealthCheck returns false.
//
// By default HealthCheck returns false if err != nil.
HealthCheck func(req *Request, resp *Response, err error) bool
// Timeout is the request timeout used when calling LBClient.Do.
//
// DefaultLBClientTimeout is used by default.
Timeout time.Duration
cs []*lbClient
// nextIdx is for spreading requests among equally loaded clients
// in a round-robin fashion.
nextIdx uint32
once sync.Once
}
// DefaultLBClientTimeout is the default request timeout used by LBClient
// when calling LBClient.Do.
//
// The timeout may be overridden via LBClient.Timeout.
const DefaultLBClientTimeout = time.Second
// DoDeadline calls DoDeadline on the least loaded client
func (cc *LBClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
return cc.get().DoDeadline(req, resp, deadline)
}
// DoTimeout calculates deadline and calls DoDeadline on the least loaded client
func (cc *LBClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
return cc.get().DoDeadline(req, resp, deadline)
}
// Do calls calculates deadline using LBClient.Timeout and calls DoDeadline
// on the least loaded client.
func (cc *LBClient) Do(req *Request, resp *Response) error {
timeout := cc.Timeout
if timeout <= 0 {
timeout = DefaultLBClientTimeout
}
return cc.DoTimeout(req, resp, timeout)
}
func (cc *LBClient) init() {
if len(cc.Clients) == 0 {
panic("BUG: LBClient.Clients cannot be empty")
}
for _, c := range cc.Clients {
cc.cs = append(cc.cs, &lbClient{
c: c,
healthCheck: cc.HealthCheck,
})
}
// Randomize nextIdx in order to prevent initial servers'
// hammering from a cluster of identical LBClients.
cc.nextIdx = uint32(time.Now().UnixNano())
}
func (cc *LBClient) get() *lbClient {
cc.once.Do(cc.init)
cs := cc.cs
idx := atomic.AddUint32(&cc.nextIdx, 1)
idx %= uint32(len(cs))
minC := cs[idx]
minN := minC.PendingRequests()
if minN == 0 {
return minC
}
for _, c := range cs[idx+1:] {
n := c.PendingRequests()
if n == 0 {
return c
}
if n < minN {
minC = c
minN = n
}
}
for _, c := range cs[:idx] {
n := c.PendingRequests()
if n == 0 {
return c
}
if n < minN {
minC = c
minN = n
}
}
return minC
}
type lbClient struct {
c BalancingClient
healthCheck func(req *Request, resp *Response, err error) bool
penalty uint32
}
func (c *lbClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error {
err := c.c.DoDeadline(req, resp, deadline)
if !c.isHealthy(req, resp, err) && c.incPenalty() {
// Penalize the client returning error, so the next requests
// are routed to another clients.
time.AfterFunc(penaltyDuration, c.decPenalty)
}
return err
}
func (c *lbClient) PendingRequests() int {
n := c.c.PendingRequests()
m := atomic.LoadUint32(&c.penalty)
return n + int(m)
}
func (c *lbClient) isHealthy(req *Request, resp *Response, err error) bool {
if c.healthCheck == nil {
return err == nil
}
return c.healthCheck(req, resp, err)
}
func (c *lbClient) incPenalty() bool {
m := atomic.AddUint32(&c.penalty, 1)
if m > maxPenalty {
c.decPenalty()
return false
}
return true
}
func (c *lbClient) decPenalty() {
atomic.AddUint32(&c.penalty, ^uint32(0))
}
const (
maxPenalty = 300
penaltyDuration = 3 * time.Second
)

9
vendor/github.com/valyala/fasthttp/nocopy.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
package fasthttp
// Embed this type into a struct, which mustn't be copied,
// so `go vet` gives a warning if this struct is copied.
//
// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for details.
type noCopy struct{}
func (*noCopy) Lock() {}

100
vendor/github.com/valyala/fasthttp/peripconn.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
package fasthttp
import (
"fmt"
"net"
"sync"
)
type perIPConnCounter struct {
pool sync.Pool
lock sync.Mutex
m map[uint32]int
}
func (cc *perIPConnCounter) Register(ip uint32) int {
cc.lock.Lock()
if cc.m == nil {
cc.m = make(map[uint32]int)
}
n := cc.m[ip] + 1
cc.m[ip] = n
cc.lock.Unlock()
return n
}
func (cc *perIPConnCounter) Unregister(ip uint32) {
cc.lock.Lock()
if cc.m == nil {
cc.lock.Unlock()
panic("BUG: perIPConnCounter.Register() wasn't called")
}
n := cc.m[ip] - 1
if n < 0 {
cc.lock.Unlock()
panic(fmt.Sprintf("BUG: negative per-ip counter=%d for ip=%d", n, ip))
}
cc.m[ip] = n
cc.lock.Unlock()
}
type perIPConn struct {
net.Conn
ip uint32
perIPConnCounter *perIPConnCounter
}
func acquirePerIPConn(conn net.Conn, ip uint32, counter *perIPConnCounter) *perIPConn {
v := counter.pool.Get()
if v == nil {
v = &perIPConn{
perIPConnCounter: counter,
}
}
c := v.(*perIPConn)
c.Conn = conn
c.ip = ip
return c
}
func releasePerIPConn(c *perIPConn) {
c.Conn = nil
c.perIPConnCounter.pool.Put(c)
}
func (c *perIPConn) Close() error {
err := c.Conn.Close()
c.perIPConnCounter.Unregister(c.ip)
releasePerIPConn(c)
return err
}
func getUint32IP(c net.Conn) uint32 {
return ip2uint32(getConnIP4(c))
}
func getConnIP4(c net.Conn) net.IP {
addr := c.RemoteAddr()
ipAddr, ok := addr.(*net.TCPAddr)
if !ok {
return net.IPv4zero
}
return ipAddr.IP.To4()
}
func ip2uint32(ip net.IP) uint32 {
if len(ip) != 4 {
return 0
}
return uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3])
}
func uint322ip(ip uint32) net.IP {
b := make([]byte, 4)
b[0] = byte(ip >> 24)
b[1] = byte(ip >> 16)
b[2] = byte(ip >> 8)
b[3] = byte(ip)
return b
}

21
vendor/github.com/valyala/fasthttp/reuseport/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Max Riveiro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

2361
vendor/github.com/valyala/fasthttp/server.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG
3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U
wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0
FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf
IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg
GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF
sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2
sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D
uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb
K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3
YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+
DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk
B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV
Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x
IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY
wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj
wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D
FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m
tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX
fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU
ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk
K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT
6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt
9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN
Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV
c257YgaWmjK9uB0Y2r2VxS0G
-----END PRIVATE KEY-----

View File

@ -0,0 +1,17 @@
-----BEGIN CERTIFICATE-----
MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV
BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV
MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D
K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te
+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij
L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1
xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY
6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG
SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98
L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2
45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li
K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6
X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI
whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd
-----END CERTIFICATE-----

3
vendor/github.com/valyala/fasthttp/stackless/doc.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
// Package stackless provides functionality that may save stack space
// for high number of concurrently running goroutines.
package stackless

79
vendor/github.com/valyala/fasthttp/stackless/func.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
package stackless
import (
"runtime"
"sync"
)
// NewFunc returns stackless wrapper for the function f.
//
// Unlike f, the returned stackless wrapper doesn't use stack space
// on the goroutine that calls it.
// The wrapper may save a lot of stack space if the following conditions
// are met:
//
// - f doesn't contain blocking calls on network, I/O or channels;
// - f uses a lot of stack space;
// - the wrapper is called from high number of concurrent goroutines.
//
// The stackless wrapper returns false if the call cannot be processed
// at the moment due to high load.
func NewFunc(f func(ctx interface{})) func(ctx interface{}) bool {
if f == nil {
panic("BUG: f cannot be nil")
}
funcWorkCh := make(chan *funcWork, runtime.GOMAXPROCS(-1)*2048)
onceInit := func() {
n := runtime.GOMAXPROCS(-1)
for i := 0; i < n; i++ {
go funcWorker(funcWorkCh, f)
}
}
var once sync.Once
return func(ctx interface{}) bool {
once.Do(onceInit)
fw := getFuncWork()
fw.ctx = ctx
select {
case funcWorkCh <- fw:
default:
putFuncWork(fw)
return false
}
<-fw.done
putFuncWork(fw)
return true
}
}
func funcWorker(funcWorkCh <-chan *funcWork, f func(ctx interface{})) {
for fw := range funcWorkCh {
f(fw.ctx)
fw.done <- struct{}{}
}
}
func getFuncWork() *funcWork {
v := funcWorkPool.Get()
if v == nil {
v = &funcWork{
done: make(chan struct{}, 1),
}
}
return v.(*funcWork)
}
func putFuncWork(fw *funcWork) {
fw.ctx = nil
funcWorkPool.Put(fw)
}
var funcWorkPool sync.Pool
type funcWork struct {
ctx interface{}
done chan struct{}
}

139
vendor/github.com/valyala/fasthttp/stackless/writer.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
package stackless
import (
"errors"
"fmt"
"io"
"github.com/valyala/bytebufferpool"
)
// Writer is an interface stackless writer must conform to.
//
// The interface contains common subset for Writers from compress/* packages.
type Writer interface {
Write(p []byte) (int, error)
Flush() error
Close() error
Reset(w io.Writer)
}
// NewWriterFunc must return new writer that will be wrapped into
// stackless writer.
type NewWriterFunc func(w io.Writer) Writer
// NewWriter creates a stackless writer around a writer returned
// from newWriter.
//
// The returned writer writes data to dstW.
//
// Writers that use a lot of stack space may be wrapped into stackless writer,
// thus saving stack space for high number of concurrently running goroutines.
func NewWriter(dstW io.Writer, newWriter NewWriterFunc) Writer {
w := &writer{
dstW: dstW,
}
w.zw = newWriter(&w.xw)
return w
}
type writer struct {
dstW io.Writer
zw Writer
xw xWriter
err error
n int
p []byte
op op
}
type op int
const (
opWrite op = iota
opFlush
opClose
opReset
)
func (w *writer) Write(p []byte) (int, error) {
w.p = p
err := w.do(opWrite)
w.p = nil
return w.n, err
}
func (w *writer) Flush() error {
return w.do(opFlush)
}
func (w *writer) Close() error {
return w.do(opClose)
}
func (w *writer) Reset(dstW io.Writer) {
w.xw.Reset()
w.do(opReset)
w.dstW = dstW
}
func (w *writer) do(op op) error {
w.op = op
if !stacklessWriterFunc(w) {
return errHighLoad
}
err := w.err
if err != nil {
return err
}
if w.xw.bb != nil && len(w.xw.bb.B) > 0 {
_, err = w.dstW.Write(w.xw.bb.B)
}
w.xw.Reset()
return err
}
var errHighLoad = errors.New("cannot compress data due to high load")
var stacklessWriterFunc = NewFunc(writerFunc)
func writerFunc(ctx interface{}) {
w := ctx.(*writer)
switch w.op {
case opWrite:
w.n, w.err = w.zw.Write(w.p)
case opFlush:
w.err = w.zw.Flush()
case opClose:
w.err = w.zw.Close()
case opReset:
w.zw.Reset(&w.xw)
w.err = nil
default:
panic(fmt.Sprintf("BUG: unexpected op: %d", w.op))
}
}
type xWriter struct {
bb *bytebufferpool.ByteBuffer
}
func (w *xWriter) Write(p []byte) (int, error) {
if w.bb == nil {
w.bb = bufferPool.Get()
}
w.bb.Write(p)
return len(p), nil
}
func (w *xWriter) Reset() {
if w.bb != nil {
bufferPool.Put(w.bb)
w.bb = nil
}
}
var bufferPool bytebufferpool.Pool

176
vendor/github.com/valyala/fasthttp/status.go generated vendored Normal file
View File

@ -0,0 +1,176 @@
package fasthttp
import (
"fmt"
"sync/atomic"
)
// HTTP status codes were stolen from net/http.
const (
StatusContinue = 100 // RFC 7231, 6.2.1
StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2
StatusProcessing = 102 // RFC 2518, 10.1
StatusOK = 200 // RFC 7231, 6.3.1
StatusCreated = 201 // RFC 7231, 6.3.2
StatusAccepted = 202 // RFC 7231, 6.3.3
StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4
StatusNoContent = 204 // RFC 7231, 6.3.5
StatusResetContent = 205 // RFC 7231, 6.3.6
StatusPartialContent = 206 // RFC 7233, 4.1
StatusMultiStatus = 207 // RFC 4918, 11.1
StatusAlreadyReported = 208 // RFC 5842, 7.1
StatusIMUsed = 226 // RFC 3229, 10.4.1
StatusMultipleChoices = 300 // RFC 7231, 6.4.1
StatusMovedPermanently = 301 // RFC 7231, 6.4.2
StatusFound = 302 // RFC 7231, 6.4.3
StatusSeeOther = 303 // RFC 7231, 6.4.4
StatusNotModified = 304 // RFC 7232, 4.1
StatusUseProxy = 305 // RFC 7231, 6.4.5
_ = 306 // RFC 7231, 6.4.6 (Unused)
StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7
StatusPermanentRedirect = 308 // RFC 7538, 3
StatusBadRequest = 400 // RFC 7231, 6.5.1
StatusUnauthorized = 401 // RFC 7235, 3.1
StatusPaymentRequired = 402 // RFC 7231, 6.5.2
StatusForbidden = 403 // RFC 7231, 6.5.3
StatusNotFound = 404 // RFC 7231, 6.5.4
StatusMethodNotAllowed = 405 // RFC 7231, 6.5.5
StatusNotAcceptable = 406 // RFC 7231, 6.5.6
StatusProxyAuthRequired = 407 // RFC 7235, 3.2
StatusRequestTimeout = 408 // RFC 7231, 6.5.7
StatusConflict = 409 // RFC 7231, 6.5.8
StatusGone = 410 // RFC 7231, 6.5.9
StatusLengthRequired = 411 // RFC 7231, 6.5.10
StatusPreconditionFailed = 412 // RFC 7232, 4.2
StatusRequestEntityTooLarge = 413 // RFC 7231, 6.5.11
StatusRequestURITooLong = 414 // RFC 7231, 6.5.12
StatusUnsupportedMediaType = 415 // RFC 7231, 6.5.13
StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4
StatusExpectationFailed = 417 // RFC 7231, 6.5.14
StatusTeapot = 418 // RFC 7168, 2.3.3
StatusUnprocessableEntity = 422 // RFC 4918, 11.2
StatusLocked = 423 // RFC 4918, 11.3
StatusFailedDependency = 424 // RFC 4918, 11.4
StatusUpgradeRequired = 426 // RFC 7231, 6.5.15
StatusPreconditionRequired = 428 // RFC 6585, 3
StatusTooManyRequests = 429 // RFC 6585, 4
StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5
StatusUnavailableForLegalReasons = 451 // RFC 7725, 3
StatusInternalServerError = 500 // RFC 7231, 6.6.1
StatusNotImplemented = 501 // RFC 7231, 6.6.2
StatusBadGateway = 502 // RFC 7231, 6.6.3
StatusServiceUnavailable = 503 // RFC 7231, 6.6.4
StatusGatewayTimeout = 504 // RFC 7231, 6.6.5
StatusHTTPVersionNotSupported = 505 // RFC 7231, 6.6.6
StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1
StatusInsufficientStorage = 507 // RFC 4918, 11.5
StatusLoopDetected = 508 // RFC 5842, 7.2
StatusNotExtended = 510 // RFC 2774, 7
StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6
)
var (
statusLines atomic.Value
statusMessages = map[int]string{
StatusContinue: "Continue",
StatusSwitchingProtocols: "Switching Protocols",
StatusProcessing: "Processing",
StatusOK: "OK",
StatusCreated: "Created",
StatusAccepted: "Accepted",
StatusNonAuthoritativeInfo: "Non-Authoritative Information",
StatusNoContent: "No Content",
StatusResetContent: "Reset Content",
StatusPartialContent: "Partial Content",
StatusMultiStatus: "Multi-Status",
StatusAlreadyReported: "Already Reported",
StatusIMUsed: "IM Used",
StatusMultipleChoices: "Multiple Choices",
StatusMovedPermanently: "Moved Permanently",
StatusFound: "Found",
StatusSeeOther: "See Other",
StatusNotModified: "Not Modified",
StatusUseProxy: "Use Proxy",
StatusTemporaryRedirect: "Temporary Redirect",
StatusPermanentRedirect: "Permanent Redirect",
StatusBadRequest: "Bad Request",
StatusUnauthorized: "Unauthorized",
StatusPaymentRequired: "Payment Required",
StatusForbidden: "Forbidden",
StatusNotFound: "Not Found",
StatusMethodNotAllowed: "Method Not Allowed",
StatusNotAcceptable: "Not Acceptable",
StatusProxyAuthRequired: "Proxy Authentication Required",
StatusRequestTimeout: "Request Timeout",
StatusConflict: "Conflict",
StatusGone: "Gone",
StatusLengthRequired: "Length Required",
StatusPreconditionFailed: "Precondition Failed",
StatusRequestEntityTooLarge: "Request Entity Too Large",
StatusRequestURITooLong: "Request URI Too Long",
StatusUnsupportedMediaType: "Unsupported Media Type",
StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable",
StatusExpectationFailed: "Expectation Failed",
StatusTeapot: "I'm a teapot",
StatusUnprocessableEntity: "Unprocessable Entity",
StatusLocked: "Locked",
StatusFailedDependency: "Failed Dependency",
StatusUpgradeRequired: "Upgrade Required",
StatusPreconditionRequired: "Precondition Required",
StatusTooManyRequests: "Too Many Requests",
StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large",
StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons",
StatusInternalServerError: "Internal Server Error",
StatusNotImplemented: "Not Implemented",
StatusBadGateway: "Bad Gateway",
StatusServiceUnavailable: "Service Unavailable",
StatusGatewayTimeout: "Gateway Timeout",
StatusHTTPVersionNotSupported: "HTTP Version Not Supported",
StatusVariantAlsoNegotiates: "Variant Also Negotiates",
StatusInsufficientStorage: "Insufficient Storage",
StatusLoopDetected: "Loop Detected",
StatusNotExtended: "Not Extended",
StatusNetworkAuthenticationRequired: "Network Authentication Required",
}
)
// StatusMessage returns HTTP status message for the given status code.
func StatusMessage(statusCode int) string {
s := statusMessages[statusCode]
if s == "" {
s = "Unknown Status Code"
}
return s
}
func init() {
statusLines.Store(make(map[int][]byte))
}
func statusLine(statusCode int) []byte {
m := statusLines.Load().(map[int][]byte)
h := m[statusCode]
if h != nil {
return h
}
statusText := StatusMessage(statusCode)
h = []byte(fmt.Sprintf("HTTP/1.1 %d %s\r\n", statusCode, statusText))
newM := make(map[int][]byte, len(m)+1)
for k, v := range m {
newM[k] = v
}
newM[statusCode] = h
statusLines.Store(newM)
return h
}

54
vendor/github.com/valyala/fasthttp/stream.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package fasthttp
import (
"bufio"
"io"
"sync"
"github.com/valyala/fasthttp/fasthttputil"
)
// StreamWriter must write data to w.
//
// Usually StreamWriter writes data to w in a loop (aka 'data streaming').
//
// StreamWriter must return immediately if w returns error.
//
// Since the written data is buffered, do not forget calling w.Flush
// when the data must be propagated to reader.
type StreamWriter func(w *bufio.Writer)
// NewStreamReader returns a reader, which replays all the data generated by sw.
//
// The returned reader may be passed to Response.SetBodyStream.
//
// Close must be called on the returned reader after all the required data
// has been read. Otherwise goroutine leak may occur.
//
// See also Response.SetBodyStreamWriter.
func NewStreamReader(sw StreamWriter) io.ReadCloser {
pc := fasthttputil.NewPipeConns()
pw := pc.Conn1()
pr := pc.Conn2()
var bw *bufio.Writer
v := streamWriterBufPool.Get()
if v == nil {
bw = bufio.NewWriter(pw)
} else {
bw = v.(*bufio.Writer)
bw.Reset(pw)
}
go func() {
sw(bw)
bw.Flush()
pw.Close()
streamWriterBufPool.Put(bw)
}()
return pr
}
var streamWriterBufPool sync.Pool

78
vendor/github.com/valyala/fasthttp/strings.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package fasthttp
var (
defaultServerName = []byte("fasthttp")
defaultUserAgent = []byte("fasthttp")
defaultContentType = []byte("text/plain; charset=utf-8")
)
var (
strSlash = []byte("/")
strSlashSlash = []byte("//")
strSlashDotDot = []byte("/..")
strSlashDotSlash = []byte("/./")
strSlashDotDotSlash = []byte("/../")
strCRLF = []byte("\r\n")
strHTTP = []byte("http")
strHTTPS = []byte("https")
strHTTP11 = []byte("HTTP/1.1")
strColonSlashSlash = []byte("://")
strColonSpace = []byte(": ")
strGMT = []byte("GMT")
strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n")
strGet = []byte("GET")
strHead = []byte("HEAD")
strPost = []byte("POST")
strPut = []byte("PUT")
strDelete = []byte("DELETE")
strConnect = []byte("CONNECT")
strOptions = []byte("OPTIONS")
strTrace = []byte("TRACE")
strPatch = []byte("PATCH")
strExpect = []byte("Expect")
strConnection = []byte("Connection")
strContentLength = []byte("Content-Length")
strContentType = []byte("Content-Type")
strDate = []byte("Date")
strHost = []byte("Host")
strReferer = []byte("Referer")
strServer = []byte("Server")
strTransferEncoding = []byte("Transfer-Encoding")
strContentEncoding = []byte("Content-Encoding")
strAcceptEncoding = []byte("Accept-Encoding")
strUserAgent = []byte("User-Agent")
strCookie = []byte("Cookie")
strSetCookie = []byte("Set-Cookie")
strLocation = []byte("Location")
strIfModifiedSince = []byte("If-Modified-Since")
strLastModified = []byte("Last-Modified")
strAcceptRanges = []byte("Accept-Ranges")
strRange = []byte("Range")
strContentRange = []byte("Content-Range")
strCookieExpires = []byte("expires")
strCookieDomain = []byte("domain")
strCookiePath = []byte("path")
strCookieHTTPOnly = []byte("HttpOnly")
strCookieSecure = []byte("secure")
strCookieMaxAge = []byte("max-age")
strClose = []byte("close")
strGzip = []byte("gzip")
strDeflate = []byte("deflate")
strKeepAlive = []byte("keep-alive")
strKeepAliveCamelCase = []byte("Keep-Alive")
strUpgrade = []byte("Upgrade")
strChunked = []byte("chunked")
strIdentity = []byte("identity")
str100Continue = []byte("100-continue")
strPostArgsContentType = []byte("application/x-www-form-urlencoded")
strMultipartFormData = []byte("multipart/form-data")
strBoundary = []byte("boundary")
strBytes = []byte("bytes")
strTextSlash = []byte("text/")
strApplicationSlash = []byte("application/")
)

369
vendor/github.com/valyala/fasthttp/tcpdialer.go generated vendored Normal file
View File

@ -0,0 +1,369 @@
package fasthttp
import (
"errors"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
)
// Dial dials the given TCP addr using tcp4.
//
// This function has the following additional features comparing to net.Dial:
//
// * It reduces load on DNS resolver by caching resolved TCP addressed
// for DefaultDNSCacheDuration.
// * It dials all the resolved TCP addresses in round-robin manner until
// connection is established. This may be useful if certain addresses
// are temporarily unreachable.
// * It returns ErrDialTimeout if connection cannot be established during
// DefaultDialTimeout seconds. Use DialTimeout for customizing dial timeout.
//
// This dialer is intended for custom code wrapping before passing
// to Client.Dial or HostClient.Dial.
//
// For instance, per-host counters and/or limits may be implemented
// by such wrappers.
//
// The addr passed to the function must contain port. Example addr values:
//
// * foobar.baz:443
// * foo.bar:80
// * aaa.com:8080
func Dial(addr string) (net.Conn, error) {
return getDialer(DefaultDialTimeout, false)(addr)
}
// DialTimeout dials the given TCP addr using tcp4 using the given timeout.
//
// This function has the following additional features comparing to net.Dial:
//
// * It reduces load on DNS resolver by caching resolved TCP addressed
// for DefaultDNSCacheDuration.
// * It dials all the resolved TCP addresses in round-robin manner until
// connection is established. This may be useful if certain addresses
// are temporarily unreachable.
//
// This dialer is intended for custom code wrapping before passing
// to Client.Dial or HostClient.Dial.
//
// For instance, per-host counters and/or limits may be implemented
// by such wrappers.
//
// The addr passed to the function must contain port. Example addr values:
//
// * foobar.baz:443
// * foo.bar:80
// * aaa.com:8080
func DialTimeout(addr string, timeout time.Duration) (net.Conn, error) {
return getDialer(timeout, false)(addr)
}
// DialDualStack dials the given TCP addr using both tcp4 and tcp6.
//
// This function has the following additional features comparing to net.Dial:
//
// * It reduces load on DNS resolver by caching resolved TCP addressed
// for DefaultDNSCacheDuration.
// * It dials all the resolved TCP addresses in round-robin manner until
// connection is established. This may be useful if certain addresses
// are temporarily unreachable.
// * It returns ErrDialTimeout if connection cannot be established during
// DefaultDialTimeout seconds. Use DialDualStackTimeout for custom dial
// timeout.
//
// This dialer is intended for custom code wrapping before passing
// to Client.Dial or HostClient.Dial.
//
// For instance, per-host counters and/or limits may be implemented
// by such wrappers.
//
// The addr passed to the function must contain port. Example addr values:
//
// * foobar.baz:443
// * foo.bar:80
// * aaa.com:8080
func DialDualStack(addr string) (net.Conn, error) {
return getDialer(DefaultDialTimeout, true)(addr)
}
// DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6
// using the given timeout.
//
// This function has the following additional features comparing to net.Dial:
//
// * It reduces load on DNS resolver by caching resolved TCP addressed
// for DefaultDNSCacheDuration.
// * It dials all the resolved TCP addresses in round-robin manner until
// connection is established. This may be useful if certain addresses
// are temporarily unreachable.
//
// This dialer is intended for custom code wrapping before passing
// to Client.Dial or HostClient.Dial.
//
// For instance, per-host counters and/or limits may be implemented
// by such wrappers.
//
// The addr passed to the function must contain port. Example addr values:
//
// * foobar.baz:443
// * foo.bar:80
// * aaa.com:8080
func DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) {
return getDialer(timeout, true)(addr)
}
func getDialer(timeout time.Duration, dualStack bool) DialFunc {
if timeout <= 0 {
timeout = DefaultDialTimeout
}
timeoutRounded := int(timeout.Seconds()*10 + 9)
m := dialMap
if dualStack {
m = dialDualStackMap
}
dialMapLock.Lock()
d := m[timeoutRounded]
if d == nil {
dialer := dialerStd
if dualStack {
dialer = dialerDualStack
}
d = dialer.NewDial(timeout)
m[timeoutRounded] = d
}
dialMapLock.Unlock()
return d
}
var (
dialerStd = &tcpDialer{}
dialerDualStack = &tcpDialer{DualStack: true}
dialMap = make(map[int]DialFunc)
dialDualStackMap = make(map[int]DialFunc)
dialMapLock sync.Mutex
)
type tcpDialer struct {
DualStack bool
tcpAddrsLock sync.Mutex
tcpAddrsMap map[string]*tcpAddrEntry
concurrencyCh chan struct{}
once sync.Once
}
const maxDialConcurrency = 1000
func (d *tcpDialer) NewDial(timeout time.Duration) DialFunc {
d.once.Do(func() {
d.concurrencyCh = make(chan struct{}, maxDialConcurrency)
d.tcpAddrsMap = make(map[string]*tcpAddrEntry)
go d.tcpAddrsClean()
})
return func(addr string) (net.Conn, error) {
addrs, idx, err := d.getTCPAddrs(addr)
if err != nil {
return nil, err
}
network := "tcp4"
if d.DualStack {
network = "tcp"
}
var conn net.Conn
n := uint32(len(addrs))
deadline := time.Now().Add(timeout)
for n > 0 {
conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh)
if err == nil {
return conn, nil
}
if err == ErrDialTimeout {
return nil, err
}
idx++
n--
}
return nil, err
}
}
func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) {
timeout := -time.Since(deadline)
if timeout <= 0 {
return nil, ErrDialTimeout
}
select {
case concurrencyCh <- struct{}{}:
default:
tc := acquireTimer(timeout)
isTimeout := false
select {
case concurrencyCh <- struct{}{}:
case <-tc.C:
isTimeout = true
}
releaseTimer(tc)
if isTimeout {
return nil, ErrDialTimeout
}
}
timeout = -time.Since(deadline)
if timeout <= 0 {
<-concurrencyCh
return nil, ErrDialTimeout
}
chv := dialResultChanPool.Get()
if chv == nil {
chv = make(chan dialResult, 1)
}
ch := chv.(chan dialResult)
go func() {
var dr dialResult
dr.conn, dr.err = net.DialTCP(network, nil, addr)
ch <- dr
<-concurrencyCh
}()
var (
conn net.Conn
err error
)
tc := acquireTimer(timeout)
select {
case dr := <-ch:
conn = dr.conn
err = dr.err
dialResultChanPool.Put(ch)
case <-tc.C:
err = ErrDialTimeout
}
releaseTimer(tc)
return conn, err
}
var dialResultChanPool sync.Pool
type dialResult struct {
conn net.Conn
err error
}
// ErrDialTimeout is returned when TCP dialing is timed out.
var ErrDialTimeout = errors.New("dialing to the given TCP address timed out")
// DefaultDialTimeout is timeout used by Dial and DialDualStack
// for establishing TCP connections.
const DefaultDialTimeout = 3 * time.Second
type tcpAddrEntry struct {
addrs []net.TCPAddr
addrsIdx uint32
resolveTime time.Time
pending bool
}
// DefaultDNSCacheDuration is the duration for caching resolved TCP addresses
// by Dial* functions.
const DefaultDNSCacheDuration = time.Minute
func (d *tcpDialer) tcpAddrsClean() {
expireDuration := 2 * DefaultDNSCacheDuration
for {
time.Sleep(time.Second)
t := time.Now()
d.tcpAddrsLock.Lock()
for k, e := range d.tcpAddrsMap {
if t.Sub(e.resolveTime) > expireDuration {
delete(d.tcpAddrsMap, k)
}
}
d.tcpAddrsLock.Unlock()
}
}
func (d *tcpDialer) getTCPAddrs(addr string) ([]net.TCPAddr, uint32, error) {
d.tcpAddrsLock.Lock()
e := d.tcpAddrsMap[addr]
if e != nil && !e.pending && time.Since(e.resolveTime) > DefaultDNSCacheDuration {
e.pending = true
e = nil
}
d.tcpAddrsLock.Unlock()
if e == nil {
addrs, err := resolveTCPAddrs(addr, d.DualStack)
if err != nil {
d.tcpAddrsLock.Lock()
e = d.tcpAddrsMap[addr]
if e != nil && e.pending {
e.pending = false
}
d.tcpAddrsLock.Unlock()
return nil, 0, err
}
e = &tcpAddrEntry{
addrs: addrs,
resolveTime: time.Now(),
}
d.tcpAddrsLock.Lock()
d.tcpAddrsMap[addr] = e
d.tcpAddrsLock.Unlock()
}
idx := atomic.AddUint32(&e.addrsIdx, 1)
return e.addrs, idx, nil
}
func resolveTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, error) {
host, portS, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
port, err := strconv.Atoi(portS)
if err != nil {
return nil, err
}
ips, err := net.LookupIP(host)
if err != nil {
return nil, err
}
n := len(ips)
addrs := make([]net.TCPAddr, 0, n)
for i := 0; i < n; i++ {
ip := ips[i]
if !dualStack && ip.To4() == nil {
continue
}
addrs = append(addrs, net.TCPAddr{
IP: ip,
Port: port,
})
}
if len(addrs) == 0 {
return nil, errNoDNSEntries
}
return addrs, nil
}
var errNoDNSEntries = errors.New("couldn't find DNS entries for the given domain. Try using DialDualStack")

44
vendor/github.com/valyala/fasthttp/timer.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package fasthttp
import (
"sync"
"time"
)
func initTimer(t *time.Timer, timeout time.Duration) *time.Timer {
if t == nil {
return time.NewTimer(timeout)
}
if t.Reset(timeout) {
panic("BUG: active timer trapped into initTimer()")
}
return t
}
func stopTimer(t *time.Timer) {
if !t.Stop() {
// Collect possibly added time from the channel
// if timer has been stopped and nobody collected its' value.
select {
case <-t.C:
default:
}
}
}
func acquireTimer(timeout time.Duration) *time.Timer {
v := timerPool.Get()
if v == nil {
return time.NewTimer(timeout)
}
t := v.(*time.Timer)
initTimer(t, timeout)
return t
}
func releaseTimer(t *time.Timer) {
stopTimer(t)
timerPool.Put(t)
}
var timerPool sync.Pool

525
vendor/github.com/valyala/fasthttp/uri.go generated vendored Normal file
View File

@ -0,0 +1,525 @@
package fasthttp
import (
"bytes"
"io"
"sync"
)
// AcquireURI returns an empty URI instance from the pool.
//
// Release the URI with ReleaseURI after the URI is no longer needed.
// This allows reducing GC load.
func AcquireURI() *URI {
return uriPool.Get().(*URI)
}
// ReleaseURI releases the URI acquired via AcquireURI.
//
// The released URI mustn't be used after releasing it, otherwise data races
// may occur.
func ReleaseURI(u *URI) {
u.Reset()
uriPool.Put(u)
}
var uriPool = &sync.Pool{
New: func() interface{} {
return &URI{}
},
}
// URI represents URI :) .
//
// It is forbidden copying URI instances. Create new instance and use CopyTo
// instead.
//
// URI instance MUST NOT be used from concurrently running goroutines.
type URI struct {
noCopy noCopy
pathOriginal []byte
scheme []byte
path []byte
queryString []byte
hash []byte
host []byte
queryArgs Args
parsedQueryArgs bool
fullURI []byte
requestURI []byte
h *RequestHeader
}
// CopyTo copies uri contents to dst.
func (u *URI) CopyTo(dst *URI) {
dst.Reset()
dst.pathOriginal = append(dst.pathOriginal[:0], u.pathOriginal...)
dst.scheme = append(dst.scheme[:0], u.scheme...)
dst.path = append(dst.path[:0], u.path...)
dst.queryString = append(dst.queryString[:0], u.queryString...)
dst.hash = append(dst.hash[:0], u.hash...)
dst.host = append(dst.host[:0], u.host...)
u.queryArgs.CopyTo(&dst.queryArgs)
dst.parsedQueryArgs = u.parsedQueryArgs
// fullURI and requestURI shouldn't be copied, since they are created
// from scratch on each FullURI() and RequestURI() call.
dst.h = u.h
}
// Hash returns URI hash, i.e. qwe of http://aaa.com/foo/bar?baz=123#qwe .
//
// The returned value is valid until the next URI method call.
func (u *URI) Hash() []byte {
return u.hash
}
// SetHash sets URI hash.
func (u *URI) SetHash(hash string) {
u.hash = append(u.hash[:0], hash...)
}
// SetHashBytes sets URI hash.
func (u *URI) SetHashBytes(hash []byte) {
u.hash = append(u.hash[:0], hash...)
}
// QueryString returns URI query string,
// i.e. baz=123 of http://aaa.com/foo/bar?baz=123#qwe .
//
// The returned value is valid until the next URI method call.
func (u *URI) QueryString() []byte {
return u.queryString
}
// SetQueryString sets URI query string.
func (u *URI) SetQueryString(queryString string) {
u.queryString = append(u.queryString[:0], queryString...)
u.parsedQueryArgs = false
}
// SetQueryStringBytes sets URI query string.
func (u *URI) SetQueryStringBytes(queryString []byte) {
u.queryString = append(u.queryString[:0], queryString...)
u.parsedQueryArgs = false
}
// Path returns URI path, i.e. /foo/bar of http://aaa.com/foo/bar?baz=123#qwe .
//
// The returned path is always urldecoded and normalized,
// i.e. '//f%20obar/baz/../zzz' becomes '/f obar/zzz'.
//
// The returned value is valid until the next URI method call.
func (u *URI) Path() []byte {
path := u.path
if len(path) == 0 {
path = strSlash
}
return path
}
// SetPath sets URI path.
func (u *URI) SetPath(path string) {
u.pathOriginal = append(u.pathOriginal[:0], path...)
u.path = normalizePath(u.path, u.pathOriginal)
}
// SetPathBytes sets URI path.
func (u *URI) SetPathBytes(path []byte) {
u.pathOriginal = append(u.pathOriginal[:0], path...)
u.path = normalizePath(u.path, u.pathOriginal)
}
// PathOriginal returns the original path from requestURI passed to URI.Parse().
//
// The returned value is valid until the next URI method call.
func (u *URI) PathOriginal() []byte {
return u.pathOriginal
}
// Scheme returns URI scheme, i.e. http of http://aaa.com/foo/bar?baz=123#qwe .
//
// Returned scheme is always lowercased.
//
// The returned value is valid until the next URI method call.
func (u *URI) Scheme() []byte {
scheme := u.scheme
if len(scheme) == 0 {
scheme = strHTTP
}
return scheme
}
// SetScheme sets URI scheme, i.e. http, https, ftp, etc.
func (u *URI) SetScheme(scheme string) {
u.scheme = append(u.scheme[:0], scheme...)
lowercaseBytes(u.scheme)
}
// SetSchemeBytes sets URI scheme, i.e. http, https, ftp, etc.
func (u *URI) SetSchemeBytes(scheme []byte) {
u.scheme = append(u.scheme[:0], scheme...)
lowercaseBytes(u.scheme)
}
// Reset clears uri.
func (u *URI) Reset() {
u.pathOriginal = u.pathOriginal[:0]
u.scheme = u.scheme[:0]
u.path = u.path[:0]
u.queryString = u.queryString[:0]
u.hash = u.hash[:0]
u.host = u.host[:0]
u.queryArgs.Reset()
u.parsedQueryArgs = false
// There is no need in u.fullURI = u.fullURI[:0], since full uri
// is calculated on each call to FullURI().
// There is no need in u.requestURI = u.requestURI[:0], since requestURI
// is calculated on each call to RequestURI().
u.h = nil
}
// Host returns host part, i.e. aaa.com of http://aaa.com/foo/bar?baz=123#qwe .
//
// Host is always lowercased.
func (u *URI) Host() []byte {
if len(u.host) == 0 && u.h != nil {
u.host = append(u.host[:0], u.h.Host()...)
lowercaseBytes(u.host)
u.h = nil
}
return u.host
}
// SetHost sets host for the uri.
func (u *URI) SetHost(host string) {
u.host = append(u.host[:0], host...)
lowercaseBytes(u.host)
}
// SetHostBytes sets host for the uri.
func (u *URI) SetHostBytes(host []byte) {
u.host = append(u.host[:0], host...)
lowercaseBytes(u.host)
}
// Parse initializes URI from the given host and uri.
//
// host may be nil. In this case uri must contain fully qualified uri,
// i.e. with scheme and host. http is assumed if scheme is omitted.
//
// uri may contain e.g. RequestURI without scheme and host if host is non-empty.
func (u *URI) Parse(host, uri []byte) {
u.parse(host, uri, nil)
}
func (u *URI) parseQuick(uri []byte, h *RequestHeader, isTLS bool) {
u.parse(nil, uri, h)
if isTLS {
u.scheme = append(u.scheme[:0], strHTTPS...)
}
}
func (u *URI) parse(host, uri []byte, h *RequestHeader) {
u.Reset()
u.h = h
scheme, host, uri := splitHostURI(host, uri)
u.scheme = append(u.scheme, scheme...)
lowercaseBytes(u.scheme)
u.host = append(u.host, host...)
lowercaseBytes(u.host)
b := uri
queryIndex := bytes.IndexByte(b, '?')
fragmentIndex := bytes.IndexByte(b, '#')
// Ignore query in fragment part
if fragmentIndex >= 0 && queryIndex > fragmentIndex {
queryIndex = -1
}
if queryIndex < 0 && fragmentIndex < 0 {
u.pathOriginal = append(u.pathOriginal, b...)
u.path = normalizePath(u.path, u.pathOriginal)
return
}
if queryIndex >= 0 {
// Path is everything up to the start of the query
u.pathOriginal = append(u.pathOriginal, b[:queryIndex]...)
u.path = normalizePath(u.path, u.pathOriginal)
if fragmentIndex < 0 {
u.queryString = append(u.queryString, b[queryIndex+1:]...)
} else {
u.queryString = append(u.queryString, b[queryIndex+1:fragmentIndex]...)
u.hash = append(u.hash, b[fragmentIndex+1:]...)
}
return
}
// fragmentIndex >= 0 && queryIndex < 0
// Path is up to the start of fragment
u.pathOriginal = append(u.pathOriginal, b[:fragmentIndex]...)
u.path = normalizePath(u.path, u.pathOriginal)
u.hash = append(u.hash, b[fragmentIndex+1:]...)
}
func normalizePath(dst, src []byte) []byte {
dst = dst[:0]
dst = addLeadingSlash(dst, src)
dst = decodeArgAppendNoPlus(dst, src)
// remove duplicate slashes
b := dst
bSize := len(b)
for {
n := bytes.Index(b, strSlashSlash)
if n < 0 {
break
}
b = b[n:]
copy(b, b[1:])
b = b[:len(b)-1]
bSize--
}
dst = dst[:bSize]
// remove /./ parts
b = dst
for {
n := bytes.Index(b, strSlashDotSlash)
if n < 0 {
break
}
nn := n + len(strSlashDotSlash) - 1
copy(b[n:], b[nn:])
b = b[:len(b)-nn+n]
}
// remove /foo/../ parts
for {
n := bytes.Index(b, strSlashDotDotSlash)
if n < 0 {
break
}
nn := bytes.LastIndexByte(b[:n], '/')
if nn < 0 {
nn = 0
}
n += len(strSlashDotDotSlash) - 1
copy(b[nn:], b[n:])
b = b[:len(b)-n+nn]
}
// remove trailing /foo/..
n := bytes.LastIndex(b, strSlashDotDot)
if n >= 0 && n+len(strSlashDotDot) == len(b) {
nn := bytes.LastIndexByte(b[:n], '/')
if nn < 0 {
return strSlash
}
b = b[:nn+1]
}
return b
}
// RequestURI returns RequestURI - i.e. URI without Scheme and Host.
func (u *URI) RequestURI() []byte {
dst := appendQuotedPath(u.requestURI[:0], u.Path())
if u.queryArgs.Len() > 0 {
dst = append(dst, '?')
dst = u.queryArgs.AppendBytes(dst)
} else if len(u.queryString) > 0 {
dst = append(dst, '?')
dst = append(dst, u.queryString...)
}
if len(u.hash) > 0 {
dst = append(dst, '#')
dst = append(dst, u.hash...)
}
u.requestURI = dst
return u.requestURI
}
// LastPathSegment returns the last part of uri path after '/'.
//
// Examples:
//
// * For /foo/bar/baz.html path returns baz.html.
// * For /foo/bar/ returns empty byte slice.
// * For /foobar.js returns foobar.js.
func (u *URI) LastPathSegment() []byte {
path := u.Path()
n := bytes.LastIndexByte(path, '/')
if n < 0 {
return path
}
return path[n+1:]
}
// Update updates uri.
//
// The following newURI types are accepted:
//
// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original
// uri is replaced by newURI.
// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case
// the original scheme is preserved.
// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part
// of the original uri is replaced.
// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI
// is updated according to the new relative path.
func (u *URI) Update(newURI string) {
u.UpdateBytes(s2b(newURI))
}
// UpdateBytes updates uri.
//
// The following newURI types are accepted:
//
// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original
// uri is replaced by newURI.
// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case
// the original scheme is preserved.
// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part
// of the original uri is replaced.
// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI
// is updated according to the new relative path.
func (u *URI) UpdateBytes(newURI []byte) {
u.requestURI = u.updateBytes(newURI, u.requestURI)
}
func (u *URI) updateBytes(newURI, buf []byte) []byte {
if len(newURI) == 0 {
return buf
}
n := bytes.Index(newURI, strSlashSlash)
if n >= 0 {
// absolute uri
var b [32]byte
schemeOriginal := b[:0]
if len(u.scheme) > 0 {
schemeOriginal = append([]byte(nil), u.scheme...)
}
u.Parse(nil, newURI)
if len(schemeOriginal) > 0 && len(u.scheme) == 0 {
u.scheme = append(u.scheme[:0], schemeOriginal...)
}
return buf
}
if newURI[0] == '/' {
// uri without host
buf = u.appendSchemeHost(buf[:0])
buf = append(buf, newURI...)
u.Parse(nil, buf)
return buf
}
// relative path
switch newURI[0] {
case '?':
// query string only update
u.SetQueryStringBytes(newURI[1:])
return append(buf[:0], u.FullURI()...)
case '#':
// update only hash
u.SetHashBytes(newURI[1:])
return append(buf[:0], u.FullURI()...)
default:
// update the last path part after the slash
path := u.Path()
n = bytes.LastIndexByte(path, '/')
if n < 0 {
panic("BUG: path must contain at least one slash")
}
buf = u.appendSchemeHost(buf[:0])
buf = appendQuotedPath(buf, path[:n+1])
buf = append(buf, newURI...)
u.Parse(nil, buf)
return buf
}
}
// FullURI returns full uri in the form {Scheme}://{Host}{RequestURI}#{Hash}.
func (u *URI) FullURI() []byte {
u.fullURI = u.AppendBytes(u.fullURI[:0])
return u.fullURI
}
// AppendBytes appends full uri to dst and returns the extended dst.
func (u *URI) AppendBytes(dst []byte) []byte {
dst = u.appendSchemeHost(dst)
return append(dst, u.RequestURI()...)
}
func (u *URI) appendSchemeHost(dst []byte) []byte {
dst = append(dst, u.Scheme()...)
dst = append(dst, strColonSlashSlash...)
return append(dst, u.Host()...)
}
// WriteTo writes full uri to w.
//
// WriteTo implements io.WriterTo interface.
func (u *URI) WriteTo(w io.Writer) (int64, error) {
n, err := w.Write(u.FullURI())
return int64(n), err
}
// String returns full uri.
func (u *URI) String() string {
return string(u.FullURI())
}
func splitHostURI(host, uri []byte) ([]byte, []byte, []byte) {
n := bytes.Index(uri, strSlashSlash)
if n < 0 {
return strHTTP, host, uri
}
scheme := uri[:n]
if bytes.IndexByte(scheme, '/') >= 0 {
return strHTTP, host, uri
}
if len(scheme) > 0 && scheme[len(scheme)-1] == ':' {
scheme = scheme[:len(scheme)-1]
}
n += len(strSlashSlash)
uri = uri[n:]
n = bytes.IndexByte(uri, '/')
if n < 0 {
// A hack for bogus urls like foobar.com?a=b without
// slash after host.
if n = bytes.IndexByte(uri, '?'); n >= 0 {
return scheme, uri[:n], uri[n:]
}
return scheme, uri, strSlash
}
return scheme, uri[:n], uri[n:]
}
// QueryArgs returns query args.
func (u *URI) QueryArgs() *Args {
u.parseQueryArgs()
return &u.queryArgs
}
func (u *URI) parseQueryArgs() {
if u.parsedQueryArgs {
return
}
u.queryArgs.ParseBytes(u.queryString)
u.parsedQueryArgs = true
}

12
vendor/github.com/valyala/fasthttp/uri_unix.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build !windows
package fasthttp
func addLeadingSlash(dst, src []byte) []byte {
// add leading slash for unix paths
if len(src) == 0 || src[0] != '/' {
dst = append(dst, '/')
}
return dst
}

12
vendor/github.com/valyala/fasthttp/uri_windows.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build windows
package fasthttp
func addLeadingSlash(dst, src []byte) []byte {
// zero length and "C:/" case
if len(src) == 0 || (len(src) > 2 && src[1] != ':') {
dst = append(dst, '/')
}
return dst
}

71
vendor/github.com/valyala/fasthttp/userdata.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package fasthttp
import (
"io"
)
type userDataKV struct {
key []byte
value interface{}
}
type userData []userDataKV
func (d *userData) Set(key string, value interface{}) {
args := *d
n := len(args)
for i := 0; i < n; i++ {
kv := &args[i]
if string(kv.key) == key {
kv.value = value
return
}
}
c := cap(args)
if c > n {
args = args[:n+1]
kv := &args[n]
kv.key = append(kv.key[:0], key...)
kv.value = value
*d = args
return
}
kv := userDataKV{}
kv.key = append(kv.key[:0], key...)
kv.value = value
*d = append(args, kv)
}
func (d *userData) SetBytes(key []byte, value interface{}) {
d.Set(b2s(key), value)
}
func (d *userData) Get(key string) interface{} {
args := *d
n := len(args)
for i := 0; i < n; i++ {
kv := &args[i]
if string(kv.key) == key {
return kv.value
}
}
return nil
}
func (d *userData) GetBytes(key []byte) interface{} {
return d.Get(b2s(key))
}
func (d *userData) Reset() {
args := *d
n := len(args)
for i := 0; i < n; i++ {
v := args[i].value
if vc, ok := v.(io.Closer); ok {
vc.Close()
}
}
*d = (*d)[:0]
}

237
vendor/github.com/valyala/fasthttp/workerpool.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
package fasthttp
import (
"net"
"runtime"
"strings"
"sync"
"time"
)
// workerPool serves incoming connections via a pool of workers
// in FILO order, i.e. the most recently stopped worker will serve the next
// incoming connection.
//
// Such a scheme keeps CPU caches hot (in theory).
type workerPool struct {
// Function for serving server connections.
// It must leave c unclosed.
WorkerFunc ServeHandler
MaxWorkersCount int
LogAllErrors bool
MaxIdleWorkerDuration time.Duration
Logger Logger
lock sync.Mutex
workersCount int
mustStop bool
ready []*workerChan
stopCh chan struct{}
workerChanPool sync.Pool
connState func(net.Conn, ConnState)
}
type workerChan struct {
lastUseTime time.Time
ch chan net.Conn
}
func (wp *workerPool) Start() {
if wp.stopCh != nil {
panic("BUG: workerPool already started")
}
wp.stopCh = make(chan struct{})
stopCh := wp.stopCh
go func() {
var scratch []*workerChan
for {
wp.clean(&scratch)
select {
case <-stopCh:
return
default:
time.Sleep(wp.getMaxIdleWorkerDuration())
}
}
}()
}
func (wp *workerPool) Stop() {
if wp.stopCh == nil {
panic("BUG: workerPool wasn't started")
}
close(wp.stopCh)
wp.stopCh = nil
// Stop all the workers waiting for incoming connections.
// Do not wait for busy workers - they will stop after
// serving the connection and noticing wp.mustStop = true.
wp.lock.Lock()
ready := wp.ready
for i, ch := range ready {
ch.ch <- nil
ready[i] = nil
}
wp.ready = ready[:0]
wp.mustStop = true
wp.lock.Unlock()
}
func (wp *workerPool) getMaxIdleWorkerDuration() time.Duration {
if wp.MaxIdleWorkerDuration <= 0 {
return 10 * time.Second
}
return wp.MaxIdleWorkerDuration
}
func (wp *workerPool) clean(scratch *[]*workerChan) {
maxIdleWorkerDuration := wp.getMaxIdleWorkerDuration()
// Clean least recently used workers if they didn't serve connections
// for more than maxIdleWorkerDuration.
currentTime := time.Now()
wp.lock.Lock()
ready := wp.ready
n := len(ready)
i := 0
for i < n && currentTime.Sub(ready[i].lastUseTime) > maxIdleWorkerDuration {
i++
}
*scratch = append((*scratch)[:0], ready[:i]...)
if i > 0 {
m := copy(ready, ready[i:])
for i = m; i < n; i++ {
ready[i] = nil
}
wp.ready = ready[:m]
}
wp.lock.Unlock()
// Notify obsolete workers to stop.
// This notification must be outside the wp.lock, since ch.ch
// may be blocking and may consume a lot of time if many workers
// are located on non-local CPUs.
tmp := *scratch
for i, ch := range tmp {
ch.ch <- nil
tmp[i] = nil
}
}
func (wp *workerPool) Serve(c net.Conn) bool {
ch := wp.getCh()
if ch == nil {
return false
}
ch.ch <- c
return true
}
var workerChanCap = func() int {
// Use blocking workerChan if GOMAXPROCS=1.
// This immediately switches Serve to WorkerFunc, which results
// in higher performance (under go1.5 at least).
if runtime.GOMAXPROCS(0) == 1 {
return 0
}
// Use non-blocking workerChan if GOMAXPROCS>1,
// since otherwise the Serve caller (Acceptor) may lag accepting
// new connections if WorkerFunc is CPU-bound.
return 1
}()
func (wp *workerPool) getCh() *workerChan {
var ch *workerChan
createWorker := false
wp.lock.Lock()
ready := wp.ready
n := len(ready) - 1
if n < 0 {
if wp.workersCount < wp.MaxWorkersCount {
createWorker = true
wp.workersCount++
}
} else {
ch = ready[n]
ready[n] = nil
wp.ready = ready[:n]
}
wp.lock.Unlock()
if ch == nil {
if !createWorker {
return nil
}
vch := wp.workerChanPool.Get()
if vch == nil {
vch = &workerChan{
ch: make(chan net.Conn, workerChanCap),
}
}
ch = vch.(*workerChan)
go func() {
wp.workerFunc(ch)
wp.workerChanPool.Put(vch)
}()
}
return ch
}
func (wp *workerPool) release(ch *workerChan) bool {
ch.lastUseTime = time.Now()
wp.lock.Lock()
if wp.mustStop {
wp.lock.Unlock()
return false
}
wp.ready = append(wp.ready, ch)
wp.lock.Unlock()
return true
}
func (wp *workerPool) workerFunc(ch *workerChan) {
var c net.Conn
var err error
for c = range ch.ch {
if c == nil {
break
}
if err = wp.WorkerFunc(c); err != nil && err != errHijacked {
errStr := err.Error()
if wp.LogAllErrors || !(strings.Contains(errStr, "broken pipe") ||
strings.Contains(errStr, "reset by peer") ||
strings.Contains(errStr, "request headers: small read buffer") ||
strings.Contains(errStr, "i/o timeout")) {
wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
}
}
if err == errHijacked {
wp.connState(c, StateHijacked)
} else {
c.Close()
wp.connState(c, StateClosed)
}
c = nil
if !wp.release(ch) {
break
}
}
wp.lock.Lock()
wp.workersCount--
wp.lock.Unlock()
}