1
0
mirror of https://github.com/imgproxy/imgproxy.git synced 2025-03-22 20:21:28 +02:00

Update vendored packages

This commit is contained in:
DarthSim 2018-12-09 17:06:17 +06:00
parent ccc22e45eb
commit 69505cb6d2
363 changed files with 18292 additions and 9440 deletions

119
Gopkg.lock generated
View File

@ -2,7 +2,7 @@
[[projects]]
digest = "1:f759cc0befb2617d0ef11edaa395e4b79b893f263bcdc7b97721fa7b7fd0c339"
digest = "1:c7931499c1ea384e8ddb3d48cb60ecda4136eb84ca8db0b358c5781f5863b9d5"
name = "cloud.google.com/go"
packages = [
"compute/metadata",
@ -14,8 +14,8 @@
"storage",
]
pruneopts = "NUT"
revision = "debcad1964693daf8ef4bc06292d7e828e075130"
version = "v0.31.0"
revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430"
version = "v0.34.0"
[[projects]]
digest = "1:3ccf8ba7afe02fd470c4f07d6eea4d0e6875da3d129f95b925f2003ce5dd2024"
@ -26,7 +26,7 @@
version = "1.0.0"
[[projects]]
digest = "1:810e9eeb47b2743479b201151ac5bd1ae019ff30ba7827a2fcb7a7a50c55164e"
digest = "1:925b994ff7dbd4ccd8ceeb31af5221c72445eb40e0cfdbb70b70a89c2ba37625"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
@ -38,6 +38,7 @@
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/processcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
@ -46,6 +47,8 @@
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/ini",
"internal/s3err",
"internal/sdkio",
"internal/sdkrand",
"internal/sdkuri",
@ -62,8 +65,8 @@
"service/sts",
]
pruneopts = "NUT"
revision = "cfcda8304585604aabf1f7f8f7ce67b55029d0ca"
version = "v1.15.47"
revision = "c4e0e593d5f18b478f17ff0e86b56d41a70ced2d"
version = "v1.16.2"
[[projects]]
branch = "master"
@ -74,15 +77,18 @@
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
digest = "1:edc8f0a313d7949ed1a75ab541d495aaa7654756a7a7be6554690d18d943565a"
digest = "1:dded9e568aec57d91db79b0a7f2f091899c1260869daccb7eaa05e8ce81ffefd"
name = "github.com/bugsnag/bugsnag-go"
packages = [
".",
"device",
"errors",
"headers",
"sessions",
]
pruneopts = "NUT"
revision = "3f5889f222e9c07aa1f62c5e8c202e402ce574cd"
version = "v1.3.2"
revision = "834d8fee64233ea79b0389ad7a5a438261788f66"
version = "v1.4.0"
[[projects]]
digest = "1:1f5a99725a5e5d17691e5cc04d9ca729673528624c72d25ec1d7ec785836cea5"
@ -100,14 +106,6 @@
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:d32823ccbd16481c42356a1ae7f2284761da19cae3131b554a0b7c086a650292"
name = "github.com/go-ini/ini"
packages = ["."]
pruneopts = "NUT"
revision = "7b294651033cd7d9e7f0d9ffa1b75ed1e198e737"
version = "v1.38.3"
[[projects]]
digest = "1:cb4e216bd9f58866f42dc65893455b24f879b026fdaa1ecc3aafff625fdb5a66"
name = "github.com/go-ole/go-ole"
@ -119,6 +117,14 @@
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
version = "v1.2.1"
[[projects]]
digest = "1:c967237d234df97d560a82b9e3166234b88fb947676af4e3c897a7d613a287a6"
name = "github.com/gofrs/uuid"
packages = ["."]
pruneopts = "NUT"
revision = "7077aa61129615a0d7f45c49101cd011ab221c27"
version = "v3.1.2"
[[projects]]
digest = "1:5ae5b54d7cd695bafa92bf426b7c3d046f907260ec0fcb4fe5c368d937597c00"
name = "github.com/golang/protobuf"
@ -135,20 +141,20 @@
version = "v1.2.0"
[[projects]]
digest = "1:a1578f7323eca2b88021fdc9a79a99833d40b12c32a5ea4f284e2fad19ea2657"
digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1"
name = "github.com/google/uuid"
packages = ["."]
pruneopts = "NUT"
revision = "d460ce9f8df2e77fb1ba55ca87fafed96c607494"
version = "v1.0.0"
revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8"
version = "v1.1.0"
[[projects]]
digest = "1:fe852c57b4fc4d11e6ef79bce1e930ee2f2f7d148b370afef8f8d012a80960ea"
digest = "1:fb9f5147a9d6fc69057d05b0739bcd79562c55db143c3bdacfe1c9c6129461f6"
name = "github.com/googleapis/gax-go"
packages = ["."]
pruneopts = "NUT"
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
version = "v2.0.0"
revision = "b001040cd31805261cbd978842099e326dfa857b"
version = "v2.0.2"
[[projects]]
digest = "1:1d879fa52622c803ae26e4960efd41e299067885b19c9c536352f0d41bcf8c7e"
@ -159,11 +165,11 @@
version = "v0.4.0"
[[projects]]
digest = "1:ac6d01547ec4f7f673311b4663909269bfb8249952de3279799289467837c3cc"
digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885"
name = "github.com/jmespath/go-jmespath"
packages = ["."]
pruneopts = "NUT"
revision = "0b12d6b5"
revision = "c2b33e84"
[[projects]]
branch = "master"
@ -183,11 +189,11 @@
[[projects]]
branch = "master"
digest = "1:3b49114ad9128e431cf2e14ca21f753d523a7e5ce6393f59aaf6d886cce216f5"
digest = "1:91ee858ec7fc39208597251d5c4a1f2ef80524b51ed7e04eb1764f462d79dbe7"
name = "github.com/matoous/go-nanoid"
packages = ["."]
pruneopts = "NUT"
revision = "3de1538a83bcb1749aa54482113e5cd06e4fb938"
revision = "eab626deece6c9806cfbe2290859eb31a934edb0"
[[projects]]
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
@ -230,7 +236,7 @@
version = "v1.0.0"
[[projects]]
digest = "1:aa2da1df3327c3a338bb42f978407c07de74cd0a5bef35e9411881dffd444214"
digest = "1:7c7cfeecd2b7147bcfec48a4bf622b4879e26aec145a9e373ce51d0c23b16f6b"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
@ -238,8 +244,8 @@
"prometheus/promhttp",
]
pruneopts = "NUT"
revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4"
version = "v0.9.0"
revision = "505eaef017263e299324067d40ca2c48f6a2cf50"
version = "v0.9.2"
[[projects]]
branch = "master"
@ -259,7 +265,7 @@
"model",
]
pruneopts = "NUT"
revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
revision = "4724e9255275ce38f7179b2478abeae4e28c904f"
[[projects]]
branch = "master"
@ -272,7 +278,7 @@
"xfs",
]
pruneopts = "NUT"
revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
revision = "1dc9a6cbc91aacc3e8b2d63db4d2e957a5394ac4"
[[projects]]
digest = "1:d0c064d8db7da0a052c5a24ceae9c36f2ed345f26f55e58268ee8a5308b5a103"
@ -283,8 +289,8 @@
"mem",
]
pruneopts = "NUT"
revision = "3ec50d2876a36047b2ca39f955ba88fb7a455e92"
version = "v2.18.10"
revision = "0f70a4a06f7a1039305ec9b3ba9c2800f8929fba"
version = "v2.18.11"
[[projects]]
digest = "1:b31059dac028ff111793a8345eacf0f99d0f1150ead34ebf32fdd2b7d54c2d45"
@ -322,7 +328,8 @@
version = "v0.18.0"
[[projects]]
digest = "1:16d8d746fb42f8318537e77dbf22745c180db4f5b61b59a89e0fb8101bb3303e"
branch = "master"
digest = "1:45cffd5d7beb358f820cf86c094126bb55d9e3f022bc2fe429086742aa70d620"
name = "golang.org/x/image"
packages = [
"bmp",
@ -332,28 +339,29 @@
"webp",
]
pruneopts = "NUT"
revision = "334384d9e19178a0488c9360d94d183c1ef0f711"
revision = "cd38e8056d9b27bb2f265effa37fb0ea6b8a7f0f"
[[projects]]
digest = "1:cb4725f57703f163dbafad68e93645976cedb9e609cf180b6b39667e1c96489b"
branch = "master"
digest = "1:f9eb627e1450a0417d409f65fe6efdc8b8c2394cbe31975e2829a3b07ad8e525"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"netutil",
"trace",
]
pruneopts = "NUT"
revision = "0a9397675ba34b2845f758fe3cd68828369c6517"
revision = "610586996380ceef02dd726cc09df7e00a3f8e56"
[[projects]]
branch = "master"
digest = "1:b0fef33b00740f7eeb5198f67ee1642d8d2560e9b428df7fb5f69fb140f5c4d0"
digest = "1:5364323a31838682c280a6bae0efe47991de96b9cbfeef4f27b3ca2353eb7ff8"
name = "golang.org/x/oauth2"
packages = [
".",
@ -363,7 +371,7 @@
"jwt",
]
pruneopts = "NUT"
revision = "9dcd33a902f40452422c2367fefcb95b54f9f8f8"
revision = "d668ce993890a79bda886613ee587a69dd5da7a6"
[[projects]]
branch = "master"
@ -375,14 +383,14 @@
[[projects]]
branch = "master"
digest = "1:c6c74d739f3e96cf1cb83c9ffdb8d68581d18dcc458c65428b6befa08830f4ed"
digest = "1:b9652309d4d5820d01ef9aed88f46dd6b8021b3eaa4d5f0f6808e2d84386040d"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "d69651ed3497faee15a5363a89578e9991f6d5e2"
revision = "ad97f365e15054527b467150f898ec66d7f81e14"
[[projects]]
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
@ -409,7 +417,7 @@
[[projects]]
branch = "master"
digest = "1:909df9070de231de88b68b826a91025fd9fddc412f6906f999925d42a472b4cd"
digest = "1:23d9fc277640dc6c36702437a57f0ee033b9f7c9f05a7a425abc7df28f6dd259"
name = "google.golang.org/api"
packages = [
"gensupport",
@ -424,10 +432,10 @@
"transport/http/internal/propagation",
]
pruneopts = "NUT"
revision = "1d582fd0359e47ca7d8183d23e4a4166bfe20bc1"
revision = "1a5ef82f9af45ef51c486291ef2b0a16d82fdb95"
[[projects]]
digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
digest = "1:b63b351b57e64ae8aec1af030dc5e74a2676fcda62102f006ae4411fac1b04c8"
name = "google.golang.org/appengine"
packages = [
".",
@ -442,12 +450,12 @@
"urlfetch",
]
pruneopts = "NUT"
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
version = "v1.2.0"
revision = "4a4468ece617fc8205e99368fa2200e9d1fad421"
version = "v1.3.0"
[[projects]]
branch = "master"
digest = "1:a58c1a53a58f69e1b9396b18b2a9cf7dd591a12d0a8f6a57322a7a6571a4c610"
digest = "1:a7d48ca460ca1b4f6ccd8c95502443afa05df88aee84de7dbeb667a8754e8fa6"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
@ -456,27 +464,32 @@
"googleapis/rpc/status",
]
pruneopts = "NUT"
revision = "b69ba1387ce2108ac9bc8e8e5e5a46e7d5c72313"
revision = "bd91e49a0898e27abb88c339b432fa53d7497ac0"
[[projects]]
digest = "1:2f91d3e11b666570f8c923912f1cc8cf2f0c6b7371b2687ee67a8f73f08c6272"
digest = "1:638e6e596d67d0a0c8aeb76ebdcf73561b701ea43f21963b1db231d96ed7db68"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"binarylog/grpc_binarylog_v1",
"codes",
"connectivity",
"credentials",
"credentials/internal",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/binarylog",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/grpcsync",
"internal/syscall",
"internal/transport",
"keepalive",
"metadata",
@ -490,8 +503,8 @@
"tap",
]
pruneopts = "NUT"
revision = "2e463a05d100327ca47ac218281906921038fd95"
version = "v1.16.0"
revision = "df014850f6dee74ba2fc94874043a9f3f75fbfd8"
version = "v1.17.0"
[solve-meta]
analyzer-name = "dep"

View File

@ -20,6 +20,7 @@
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@ -31,9 +32,6 @@ import (
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
@ -143,7 +141,7 @@ func testOnGCE() bool {
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return

View File

@ -22,10 +22,10 @@
package iam
import (
"context"
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/iam/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"

View File

@ -15,10 +15,10 @@
package internal
import (
"context"
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
)
// Retry calls the supplied function f repeatedly according to the provided

View File

@ -1,30 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package trace
import (
"golang.org/x/net/context"
)
// OpenCensus only supports go 1.8 and higher.
func StartSpan(ctx context.Context, _ string) context.Context {
return ctx
}
func EndSpan(context.Context, error) {
}

View File

@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package trace
import (
"context"
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"

View File

@ -15,11 +15,11 @@
package storage
import (
"context"
"net/http"
"reflect"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)

View File

@ -15,6 +15,7 @@
package storage
import (
"context"
"fmt"
"net/http"
"reflect"
@ -22,7 +23,6 @@ import (
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"

View File

@ -15,11 +15,11 @@
package storage
import (
"context"
"errors"
"fmt"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)

View File

@ -1,30 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.7
package storage
import (
"context"
"net/http"
)
func withContext(ctx context.Context, r *http.Request) *http.Request {
return r.WithContext(ctx)
}
func goHTTPUncompressed(res *http.Response) bool {
return res.Uncompressed
}

View File

@ -15,9 +15,10 @@
package storage
import (
"context"
"cloud.google.com/go/iam"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
iampb "google.golang.org/genproto/googleapis/iam/v1"
)

View File

@ -15,9 +15,10 @@
package storage
import (
"context"
"cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
)
// runWithRetry calls the function until it returns nil or a non-retryable error, or

View File

@ -1,33 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.7
package storage
import (
"net/http"
)
func withContext(_ interface{}, r *http.Request) *http.Request {
// In Go 1.6 and below, ignore the context.
return r
}
// Go 1.6 doesn't have http.Response.Uncompressed, so we can't know whether the Go
// HTTP stack uncompressed a gzip file. As a good approximation, assume that
// the lack of a Content-Length header means that it did uncompress.
func goHTTPUncompressed(res *http.Response) bool {
return res.Header.Get("Content-Length") == ""
}

View File

@ -15,12 +15,12 @@
package storage
import (
"context"
"errors"
"fmt"
"regexp"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
raw "google.golang.org/api/storage/v1"
)

View File

@ -15,6 +15,7 @@
package storage
import (
"context"
"errors"
"fmt"
"hash/crc32"
@ -28,7 +29,6 @@ import (
"time"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
)
@ -107,7 +107,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
if err != nil {
return nil, err
}
req = withContext(ctx, req)
req = req.WithContext(ctx)
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
@ -202,7 +202,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
// The problem with the last two cases is that the CRC will not match -- GCS
// computes it on the compressed contents, but we compute it on the
// uncompressed contents.
if length != 0 && !goHTTPUncompressed(res) && !uncompressedByServer(res) {
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
crc, checkCRC = parseCRC32c(res)
}
}

View File

@ -16,6 +16,7 @@ package storage
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"crypto/rsa"
@ -39,7 +40,6 @@ import (
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/internal/version"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
raw "google.golang.org/api/storage/v1"

View File

@ -15,6 +15,7 @@
package storage
import (
"context"
"encoding/base64"
"errors"
"fmt"
@ -22,7 +23,6 @@ import (
"sync"
"unicode/utf8"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)

View File

@ -23,28 +23,27 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
case reflect.Struct:
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
ft := v.Type().Field(i)
fv := v.Field(i)
if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
stringValue(val, indent+2, buf)
buf.WriteString(ft.Name + ": ")
if i < len(names)-1 {
buf.WriteString(",\n")
if tag := ft.Tag.Get("sensitive"); tag == "true" {
buf.WriteString("<sensitive>")
} else {
stringValue(fv, indent+2, buf)
}
buf.WriteString(",\n")
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")

View File

@ -45,8 +45,8 @@ type Config struct {
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// @note You must still provide a `Region` value when specifying an
// endpoint for a client.
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The resolver to use for looking up endpoints for AWS service clients
@ -65,8 +65,8 @@ type Config struct {
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
// AWS Regions and Endpoints
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
// Regions and Endpoints.
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
@ -120,9 +120,10 @@ type Config struct {
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// @note This configuration option is specific to the Amazon S3 service.
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// Amazon S3: Virtual Hosting of Buckets
// Note: This configuration option is specific to the Amazon S3 service.
//
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// for Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
@ -223,6 +224,28 @@ type Config struct {
// Key: aws.String("//foo//bar//moo"),
// })
DisableRestProtocolURICleaning *bool
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
// have the definition in its model. By default, endpoint discovery is off.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// EnableEndpointDiscovery: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("/foo/bar/moo"),
// })
EnableEndpointDiscovery *bool
// DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
// request endpoint hosts with modeled information.
//
// Disabling this feature is useful when you want to use local endpoints
// for testing that do not support the modeled host prefix pattern.
DisableEndpointHostPrefix *bool
}
// NewConfig returns a new Config pointer that can be chained with builder
@ -377,6 +400,19 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
return c
}
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
func (c *Config) WithEndpointDiscovery(t bool) *Config {
c.EnableEndpointDiscovery = &t
return c
}
// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
// when making requests.
func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
c.DisableEndpointHostPrefix = &t
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
@ -476,6 +512,14 @@ func mergeInConfig(dst *Config, other *Config) {
if other.EnforceShouldRetryCheck != nil {
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
}
if other.EnableEndpointDiscovery != nil {
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
}
if other.DisableEndpointHostPrefix != nil {
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
}
}
// Copy will return a shallow copy of the Config object. If any additional

View File

@ -72,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{
signedTime = r.LastSignedAt
}
// 10 minutes to allow for some clock skew/delays in transmission.
// 5 minutes to allow for some clock skew/delays in transmission.
// Would be improved with aws/aws-sdk-go#423
if signedTime.Add(10 * time.Minute).After(time.Now()) {
if signedTime.Add(5 * time.Minute).After(time.Now()) {
return
}

View File

@ -9,9 +9,7 @@ var (
// providers in the ChainProvider.
//
// This has been deprecated. For verbose error messaging set
// aws.Config.CredentialsChainVerboseErrors to true
//
// @readonly
// aws.Config.CredentialsChainVerboseErrors to true.
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
`no valid providers in chain. Deprecated.
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,

View File

@ -64,8 +64,6 @@ import (
// Credentials: credentials.AnonymousCredentials,
// })))
// // Access public S3 buckets.
//
// @readonly
var AnonymousCredentials = NewStaticCredentials("", "", "")
// A Value is the AWS credentials value for individual credential fields.

View File

@ -12,14 +12,10 @@ const EnvProviderName = "EnvProvider"
var (
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
// found in the process's environment.
//
// @readonly
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
// can't be found in the process's environment.
//
// @readonly
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
)

View File

@ -0,0 +1,425 @@
/*
Package processcreds is a credential Provider to retrieve `credential_process`
credentials.
WARNING: The following describes a method of sourcing credentials from an external
process. This can potentially be dangerous, so proceed with caution. Other
credential providers should be preferred if at all possible. If using this
option, you should make sure that the config file is as locked down as possible
using security best practices for your operating system.
You can use credentials from a `credential_process` in a variety of ways.
One way is to setup your shared config file, located in the default
location, with the `credential_process` key and the command you want to be
called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
[default]
credential_process = /command/to/call
Creating a new session will use the credential process to retrieve credentials.
NOTE: If there are credentials in the profile you are using, the credential
process will not be used.
// Initialize a session to load credentials.
sess, _ := session.NewSession(&aws.Config{
Region: aws.String("us-east-1")},
)
// Create S3 service client to use the credentials.
svc := s3.New(sess)
Another way to use the `credential_process` method is by using
`credentials.NewCredentials()` and providing a command to be executed to
retrieve credentials:
// Create credentials using the ProcessProvider.
creds := processcreds.NewCredentials("/path/to/command")
// Create service client value configured for credentials.
svc := s3.New(sess, &aws.Config{Credentials: creds})
You can set a non-default timeout for the `credential_process` with another
constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
set a one minute timeout:
// Create credentials using the ProcessProvider.
creds := processcreds.NewCredentialsTimeout(
"/path/to/command",
time.Duration(500) * time.Millisecond)
If you need more control, you can set any configurable options in the
credentials using one or more option functions. For example, you can set a two
minute timeout, a credential duration of 60 minutes, and a maximum stdout
buffer size of 2k.
creds := processcreds.NewCredentials(
"/path/to/command",
func(opt *ProcessProvider) {
opt.Timeout = time.Duration(2) * time.Minute
opt.Duration = time.Duration(60) * time.Minute
opt.MaxBufSize = 2048
})
You can also use your own `exec.Cmd`:
// Create an exec.Cmd
myCommand := exec.Command("/path/to/command")
// Create credentials using your exec.Cmd and custom timeout
creds := processcreds.NewCredentialsCommand(
myCommand,
func(opt *processcreds.ProcessProvider) {
opt.Timeout = time.Duration(1) * time.Second
})
*/
package processcreds
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"runtime"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
)
const (
// ProviderName is the name this credentials provider will label any
// returned credentials Value with.
ProviderName = `ProcessProvider`
// ErrCodeProcessProviderParse error parsing process output
ErrCodeProcessProviderParse = "ProcessProviderParseError"
// ErrCodeProcessProviderVersion version error in output
ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
// ErrCodeProcessProviderRequired required attribute missing in output
ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
// ErrCodeProcessProviderExecution execution of command failed
ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
// errMsgProcessProviderTimeout process took longer than allowed
errMsgProcessProviderTimeout = "credential process timed out"
// errMsgProcessProviderProcess process error
errMsgProcessProviderProcess = "error in credential_process"
// errMsgProcessProviderParse problem parsing output
errMsgProcessProviderParse = "parse failed of credential_process output"
// errMsgProcessProviderVersion version error in output
errMsgProcessProviderVersion = "wrong version in process output (not 1)"
// errMsgProcessProviderMissKey missing access key id in output
errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
// errMsgProcessProviderMissSecret missing secret acess key in output
errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
// errMsgProcessProviderPrepareCmd prepare of command failed
errMsgProcessProviderPrepareCmd = "failed to prepare command"
// errMsgProcessProviderEmptyCmd command must not be empty
errMsgProcessProviderEmptyCmd = "command must not be empty"
// errMsgProcessProviderPipe failed to initialize pipe
errMsgProcessProviderPipe = "failed to initialize pipe"
// DefaultDuration is the default amount of time in minutes that the
// credentials will be valid for.
DefaultDuration = time.Duration(15) * time.Minute
// DefaultBufSize limits buffer size from growing to an enormous
// amount due to a faulty process.
DefaultBufSize = 1024
// DefaultTimeout default limit on time a process can run.
DefaultTimeout = time.Duration(1) * time.Minute
)
// ProcessProvider satisfies the credentials.Provider interface, and is a
// client to retrieve credentials from a process.
type ProcessProvider struct {
staticCreds bool
credentials.Expiry
originalCommand []string
// Expiry duration of the credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// A string representing an os command that should return a JSON with
// credential information.
command *exec.Cmd
// MaxBufSize limits memory usage from growing to an enormous
// amount due to a faulty process.
MaxBufSize int
// Timeout limits the time a process can run.
Timeout time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// ProcessProvider. The credentials will expire every 15 minutes by default.
func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
p := &ProcessProvider{
command: exec.Command(command),
Duration: DefaultDuration,
Timeout: DefaultTimeout,
MaxBufSize: DefaultBufSize,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsTimeout returns a pointer to a new Credentials object with
// the specified command and timeout, and default duration and max buffer size.
func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
p := NewCredentials(command, func(opt *ProcessProvider) {
opt.Timeout = timeout
})
return p
}
// NewCredentialsCommand returns a pointer to a new Credentials object with
// the specified command, and default timeout, duration and max buffer size.
func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
p := &ProcessProvider{
command: command,
Duration: DefaultDuration,
Timeout: DefaultTimeout,
MaxBufSize: DefaultBufSize,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
type credentialProcessResponse struct {
Version int
AccessKeyID string `json:"AccessKeyId"`
SecretAccessKey string
SessionToken string
Expiration *time.Time
}
// Retrieve executes the 'credential_process' and returns the credentials.
func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
out, err := p.executeCredentialProcess()
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
// Serialize and validate response
resp := &credentialProcessResponse{}
if err = json.Unmarshal(out, resp); err != nil {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderParse,
fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
err)
}
if resp.Version != 1 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderVersion,
errMsgProcessProviderVersion,
nil)
}
if len(resp.AccessKeyID) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderRequired,
errMsgProcessProviderMissKey,
nil)
}
if len(resp.SecretAccessKey) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderRequired,
errMsgProcessProviderMissSecret,
nil)
}
// Handle expiration
p.staticCreds = resp.Expiration == nil
if resp.Expiration != nil {
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
}
return credentials.Value{
ProviderName: ProviderName,
AccessKeyID: resp.AccessKeyID,
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.SessionToken,
}, nil
}
// IsExpired returns true if the credentials retrieved are expired, or not yet
// retrieved.
func (p *ProcessProvider) IsExpired() bool {
if p.staticCreds {
return false
}
return p.Expiry.IsExpired()
}
// prepareCommand prepares the command to be executed.
func (p *ProcessProvider) prepareCommand() error {
var cmdArgs []string
if runtime.GOOS == "windows" {
cmdArgs = []string{"cmd.exe", "/C"}
} else {
cmdArgs = []string{"sh", "-c"}
}
if len(p.originalCommand) == 0 {
p.originalCommand = make([]string, len(p.command.Args))
copy(p.originalCommand, p.command.Args)
// check for empty command because it succeeds
if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
return awserr.New(
ErrCodeProcessProviderExecution,
fmt.Sprintf(
"%s: %s",
errMsgProcessProviderPrepareCmd,
errMsgProcessProviderEmptyCmd),
nil)
}
}
cmdArgs = append(cmdArgs, p.originalCommand...)
p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
p.command.Env = os.Environ()
return nil
}
// executeCredentialProcess starts the credential process on the OS and
// returns the results or an error.
func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
if err := p.prepareCommand(); err != nil {
return nil, err
}
// Setup the pipes
outReadPipe, outWritePipe, err := os.Pipe()
if err != nil {
return nil, awserr.New(
ErrCodeProcessProviderExecution,
errMsgProcessProviderPipe,
err)
}
p.command.Stderr = os.Stderr // display stderr on console for MFA
p.command.Stdout = outWritePipe // get creds json on process's stdout
p.command.Stdin = os.Stdin // enable stdin for MFA
output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
stdoutCh := make(chan error, 1)
go readInput(
io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
output,
stdoutCh)
execCh := make(chan error, 1)
go executeCommand(*p.command, execCh)
finished := false
var errors []error
for !finished {
select {
case readError := <-stdoutCh:
errors = appendError(errors, readError)
finished = true
case execError := <-execCh:
err := outWritePipe.Close()
errors = appendError(errors, err)
errors = appendError(errors, execError)
if errors != nil {
return output.Bytes(), awserr.NewBatchError(
ErrCodeProcessProviderExecution,
errMsgProcessProviderProcess,
errors)
}
case <-time.After(p.Timeout):
finished = true
return output.Bytes(), awserr.NewBatchError(
ErrCodeProcessProviderExecution,
errMsgProcessProviderTimeout,
errors) // errors can be nil
}
}
out := output.Bytes()
if runtime.GOOS == "windows" {
// windows adds slashes to quotes
out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
}
return out, nil
}
// appendError conveniently checks for nil before appending slice
func appendError(errors []error, err error) []error {
if err != nil {
return append(errors, err)
}
return errors
}
func executeCommand(cmd exec.Cmd, exec chan error) {
// Start the command
err := cmd.Start()
if err == nil {
err = cmd.Wait()
}
exec <- err
}
func readInput(r io.Reader, w io.Writer, read chan error) {
tee := io.TeeReader(r, w)
_, err := ioutil.ReadAll(tee)
if err == io.EOF {
err = nil
}
read <- err // will only arrive here when write end of pipe is closed
}

View File

@ -4,9 +4,8 @@ import (
"fmt"
"os"
"github.com/go-ini/ini"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/ini"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
@ -77,36 +76,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool {
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (Value, error) {
config, err := ini.Load(filename)
config, err := ini.OpenFile(filename)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
}
iniProfile, err := config.GetSection(profile)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
iniProfile, ok := config.GetSection(profile)
if !ok {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
}
id, err := iniProfile.GetKey("aws_access_key_id")
if err != nil {
id := iniProfile.String("aws_access_key_id")
if len(id) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
err)
nil)
}
secret, err := iniProfile.GetKey("aws_secret_access_key")
if err != nil {
secret := iniProfile.String("aws_secret_access_key")
if len(secret) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
nil)
}
// Default to empty string if not found
token := iniProfile.Key("aws_session_token")
token := iniProfile.String("aws_session_token")
return Value{
AccessKeyID: id.String(),
SecretAccessKey: secret.String(),
SessionToken: token.String(),
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
ProviderName: SharedCredsProviderName,
}, nil
}

View File

@ -9,8 +9,6 @@ const StaticProviderName = "StaticProvider"
var (
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
//
// @readonly
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
)

View File

@ -48,4 +48,6 @@ type metric struct {
DNSLatency *int `json:"DnsLatency,omitempty"`
TCPLatency *int `json:"TcpLatency,omitempty"`
SSLLatency *int `json:"SslLatency,omitempty"`
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
}

View File

@ -112,15 +112,16 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
now := time.Now()
m := metric{
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
}
// TODO: Probably want to figure something out for logging dropped
@ -226,7 +227,14 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
handlers.Complete.PushFrontNamed(apiCallHandler)
handlers.Complete.PushFrontNamed(apiCallAttemptHandler)
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
handlers.CompleteAttempt.PushFrontNamed(apiCallAttemptHandler)
}
// boolIntValue return 1 for true and 0 for false.
func boolIntValue(b bool) int {
if b {
return 1
}
return 0
}

View File

@ -24,6 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// A Defaults provides a collection of default values for SDK clients.
@ -114,7 +115,6 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro
const (
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
)
// RemoteCredProvider returns a credentials provider for the default remote
@ -124,8 +124,8 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
return localHTTPCredProvider(cfg, handlers, u)
}
if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("http://169.254.170.2%s", uri)
if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
return httpCredProvider(cfg, handlers, u)
}

View File

@ -95,7 +95,12 @@ func custAddS3DualStack(p *partition) {
return
}
s, ok := p.Services["s3"]
custAddDualstack(p, "s3")
custAddDualstack(p, "s3-control")
}
func custAddDualstack(p *partition, svcName string) {
s, ok := p.Services[svcName]
if !ok {
return
}
@ -103,7 +108,7 @@ func custAddS3DualStack(p *partition) {
s.Defaults.HasDualStack = boxedTrue
s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
p.Services["s3"] = s
p.Services[svcName] = s
}
func custAddEC2Metadata(p *partition) {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,141 @@
package endpoints
// Service identifiers
//
// Deprecated: Use client package's EndpointID value instead of these
// ServiceIDs. These IDs are not maintained, and are out of date.
const (
A4bServiceID = "a4b" // A4b.
AcmServiceID = "acm" // Acm.
AcmPcaServiceID = "acm-pca" // AcmPca.
ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
ApiPricingServiceID = "api.pricing" // ApiPricing.
ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
ApigatewayServiceID = "apigateway" // Apigateway.
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
Appstream2ServiceID = "appstream2" // Appstream2.
AppsyncServiceID = "appsync" // Appsync.
AthenaServiceID = "athena" // Athena.
AutoscalingServiceID = "autoscaling" // Autoscaling.
AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
BatchServiceID = "batch" // Batch.
BudgetsServiceID = "budgets" // Budgets.
CeServiceID = "ce" // Ce.
ChimeServiceID = "chime" // Chime.
Cloud9ServiceID = "cloud9" // Cloud9.
ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
CloudformationServiceID = "cloudformation" // Cloudformation.
CloudfrontServiceID = "cloudfront" // Cloudfront.
CloudhsmServiceID = "cloudhsm" // Cloudhsm.
Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
CloudsearchServiceID = "cloudsearch" // Cloudsearch.
CloudtrailServiceID = "cloudtrail" // Cloudtrail.
CodebuildServiceID = "codebuild" // Codebuild.
CodecommitServiceID = "codecommit" // Codecommit.
CodedeployServiceID = "codedeploy" // Codedeploy.
CodepipelineServiceID = "codepipeline" // Codepipeline.
CodestarServiceID = "codestar" // Codestar.
CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
CognitoSyncServiceID = "cognito-sync" // CognitoSync.
ComprehendServiceID = "comprehend" // Comprehend.
ConfigServiceID = "config" // Config.
CurServiceID = "cur" // Cur.
DatapipelineServiceID = "datapipeline" // Datapipeline.
DaxServiceID = "dax" // Dax.
DevicefarmServiceID = "devicefarm" // Devicefarm.
DirectconnectServiceID = "directconnect" // Directconnect.
DiscoveryServiceID = "discovery" // Discovery.
DmsServiceID = "dms" // Dms.
DsServiceID = "ds" // Ds.
DynamodbServiceID = "dynamodb" // Dynamodb.
Ec2ServiceID = "ec2" // Ec2.
Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
EcrServiceID = "ecr" // Ecr.
EcsServiceID = "ecs" // Ecs.
ElasticacheServiceID = "elasticache" // Elasticache.
ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
EmailServiceID = "email" // Email.
EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
EsServiceID = "es" // Es.
EventsServiceID = "events" // Events.
FirehoseServiceID = "firehose" // Firehose.
FmsServiceID = "fms" // Fms.
GameliftServiceID = "gamelift" // Gamelift.
GlacierServiceID = "glacier" // Glacier.
GlueServiceID = "glue" // Glue.
GreengrassServiceID = "greengrass" // Greengrass.
GuarddutyServiceID = "guardduty" // Guardduty.
HealthServiceID = "health" // Health.
IamServiceID = "iam" // Iam.
ImportexportServiceID = "importexport" // Importexport.
InspectorServiceID = "inspector" // Inspector.
IotServiceID = "iot" // Iot.
IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
KinesisServiceID = "kinesis" // Kinesis.
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
KmsServiceID = "kms" // Kms.
LambdaServiceID = "lambda" // Lambda.
LightsailServiceID = "lightsail" // Lightsail.
LogsServiceID = "logs" // Logs.
MachinelearningServiceID = "machinelearning" // Machinelearning.
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
MediaconvertServiceID = "mediaconvert" // Mediaconvert.
MedialiveServiceID = "medialive" // Medialive.
MediapackageServiceID = "mediapackage" // Mediapackage.
MediastoreServiceID = "mediastore" // Mediastore.
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
MghServiceID = "mgh" // Mgh.
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
ModelsLexServiceID = "models.lex" // ModelsLex.
MonitoringServiceID = "monitoring" // Monitoring.
MturkRequesterServiceID = "mturk-requester" // MturkRequester.
NeptuneServiceID = "neptune" // Neptune.
OpsworksServiceID = "opsworks" // Opsworks.
OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
OrganizationsServiceID = "organizations" // Organizations.
PinpointServiceID = "pinpoint" // Pinpoint.
PollyServiceID = "polly" // Polly.
RdsServiceID = "rds" // Rds.
RedshiftServiceID = "redshift" // Redshift.
RekognitionServiceID = "rekognition" // Rekognition.
ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
Route53ServiceID = "route53" // Route53.
Route53domainsServiceID = "route53domains" // Route53domains.
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
S3ServiceID = "s3" // S3.
S3ControlServiceID = "s3-control" // S3Control.
SagemakerServiceID = "api.sagemaker" // Sagemaker.
SdbServiceID = "sdb" // Sdb.
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
ShieldServiceID = "shield" // Shield.
SmsServiceID = "sms" // Sms.
SnowballServiceID = "snowball" // Snowball.
SnsServiceID = "sns" // Sns.
SqsServiceID = "sqs" // Sqs.
SsmServiceID = "ssm" // Ssm.
StatesServiceID = "states" // States.
StoragegatewayServiceID = "storagegateway" // Storagegateway.
StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
StsServiceID = "sts" // Sts.
SupportServiceID = "support" // Support.
SwfServiceID = "swf" // Swf.
TaggingServiceID = "tagging" // Tagging.
TransferServiceID = "transfer" // Transfer.
TranslateServiceID = "translate" // Translate.
WafServiceID = "waf" // Waf.
WafRegionalServiceID = "waf-regional" // WafRegional.
WorkdocsServiceID = "workdocs" // Workdocs.
WorkmailServiceID = "workmail" // Workmail.
WorkspacesServiceID = "workspaces" // Workspaces.
XrayServiceID = "xray" // Xray.
)

View File

@ -16,6 +16,10 @@ import (
type CodeGenOptions struct {
// Options for how the model will be decoded.
DecodeModelOptions DecodeModelOptions
// Disables code generation of the service endpoint prefix IDs defined in
// the model.
DisableGenerateServiceIDs bool
}
// Set combines all of the option functions together
@ -39,8 +43,16 @@ func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGe
return err
}
v := struct {
Resolver
CodeGenOptions
}{
Resolver: resolver,
CodeGenOptions: opts,
}
tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil {
if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
return fmt.Errorf("failed to execute template, %v", err)
}
@ -166,15 +178,17 @@ import (
"regexp"
)
{{ template "partition consts" . }}
{{ template "partition consts" $.Resolver }}
{{ range $_, $partition := . }}
{{ range $_, $partition := $.Resolver }}
{{ template "partition region consts" $partition }}
{{ end }}
{{ template "service consts" . }}
{{ if not $.DisableGenerateServiceIDs -}}
{{ template "service consts" $.Resolver }}
{{- end }}
{{ template "endpoint resolvers" . }}
{{ template "endpoint resolvers" $.Resolver }}
{{- end }}
{{ define "partition consts" }}

View File

@ -5,13 +5,9 @@ import "github.com/aws/aws-sdk-go/aws/awserr"
var (
// ErrMissingRegion is an error that is returned if region configuration is
// not found.
//
// @readonly
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
// resolved for a service.
//
// @readonly
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
)

View File

@ -19,6 +19,7 @@ type Handlers struct {
UnmarshalError HandlerList
Retry HandlerList
AfterRetry HandlerList
CompleteAttempt HandlerList
Complete HandlerList
}
@ -36,6 +37,7 @@ func (h *Handlers) Copy() Handlers {
UnmarshalMeta: h.UnmarshalMeta.copy(),
Retry: h.Retry.copy(),
AfterRetry: h.AfterRetry.copy(),
CompleteAttempt: h.CompleteAttempt.copy(),
Complete: h.Complete.copy(),
}
}
@ -53,6 +55,7 @@ func (h *Handlers) Clear() {
h.ValidateResponse.Clear()
h.Retry.Clear()
h.AfterRetry.Clear()
h.CompleteAttempt.Clear()
h.Complete.Clear()
}

View File

@ -122,7 +122,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
Handlers: handlers.Copy(),
Retryer: retryer,
AttemptTime: time.Now(),
Time: time.Now(),
ExpireTime: 0,
Operation: operation,
@ -266,7 +265,9 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) {
}
// Presign returns the request's signed URL. Error will be returned
// if the signing fails.
// if the signing fails. The expire parameter is only used for presigned Amazon
// S3 API requests. All other AWS services will use a fixed expriation
// time of 15 minutes.
//
// It is invalid to create a presigned URL with a expire duration 0 or less. An
// error is returned if expire duration is 0 or less.
@ -283,7 +284,9 @@ func (r *Request) Presign(expire time.Duration) (string, error) {
}
// PresignRequest behaves just like presign, with the addition of returning a
// set of headers that were signed.
// set of headers that were signed. The expire parameter is only used for
// presigned Amazon S3 API requests. All other AWS services will use a fixed
// expriation time of 15 minutes.
//
// It is invalid to create a presigned URL with a expire duration 0 or less. An
// error is returned if expire duration is 0 or less.
@ -462,80 +465,78 @@ func (r *Request) Send() error {
r.Handlers.Complete.Run(r)
}()
if err := r.Error; err != nil {
return err
}
for {
r.Error = nil
r.AttemptTime = time.Now()
if aws.BoolValue(r.Retryable) {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
// The previous http.Request will have a reference to the r.Body
// and the HTTP Client's Transport may still be reading from
// the request's body even though the Client's Do returned.
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
r.ResetBody()
// Closing response body to ensure that no response body is leaked
// between retry attempts.
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
r.HTTPResponse.Body.Close()
}
if err := r.Sign(); err != nil {
debugLogReqError(r, "Sign Request", false, err)
return err
}
r.Sign()
if r.Error != nil {
return r.Error
}
r.Retryable = nil
r.Handlers.Send.Run(r)
if r.Error != nil {
if !shouldRetryCancel(r) {
return r.Error
}
err := r.Error
if err := r.sendRequest(); err == nil {
return nil
} else if !shouldRetryCancel(r) {
return err
} else {
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Send Request", false, err)
if r.Error != nil || !aws.BoolValue(r.Retryable) {
return r.Error
}
debugLogReqError(r, "Send Request", true, err)
r.prepareRetry()
continue
}
r.Handlers.UnmarshalMeta.Run(r)
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {
r.Handlers.UnmarshalError.Run(r)
err := r.Error
}
}
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Validate Response", false, err)
return r.Error
}
debugLogReqError(r, "Validate Response", true, err)
continue
}
func (r *Request) prepareRetry() {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
r.Handlers.Unmarshal.Run(r)
if r.Error != nil {
err := r.Error
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
debugLogReqError(r, "Unmarshal Response", false, err)
return r.Error
}
debugLogReqError(r, "Unmarshal Response", true, err)
continue
}
// The previous http.Request will have a reference to the r.Body
// and the HTTP Client's Transport may still be reading from
// the request's body even though the Client's Do returned.
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
r.ResetBody()
break
// Closing response body to ensure that no response body is leaked
// between retry attempts.
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
r.HTTPResponse.Body.Close()
}
}
func (r *Request) sendRequest() (sendErr error) {
defer r.Handlers.CompleteAttempt.Run(r)
r.Retryable = nil
r.Handlers.Send.Run(r)
if r.Error != nil {
debugLogReqError(r, "Send Request", r.WillRetry(), r.Error)
return r.Error
}
r.Handlers.UnmarshalMeta.Run(r)
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {
r.Handlers.UnmarshalError.Run(r)
debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error)
return r.Error
}
r.Handlers.Unmarshal.Run(r)
if r.Error != nil {
debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error)
return r.Error
}
return nil

View File

@ -40,6 +40,7 @@ var throttleCodes = map[string]struct{}{
"RequestThrottled": {},
"TooManyRequestsException": {}, // Lambda functions
"PriorRequestNotComplete": {}, // Route53
"TransactionInProgressException": {},
}
// credsExpiredCodes is a collection of error codes which signify the credentials

View File

@ -17,6 +17,12 @@ const (
ParamMinValueErrCode = "ParamMinValueError"
// ParamMinLenErrCode is the error code for fields without enough elements.
ParamMinLenErrCode = "ParamMinLenError"
// ParamMaxLenErrCode is the error code for value being too long.
ParamMaxLenErrCode = "ParamMaxLenError"
// ParamFormatErrCode is the error code for a field with invalid
// format or characters.
ParamFormatErrCode = "ParamFormatInvalidError"
)
// Validator provides a way for types to perform validation logic on their
@ -232,3 +238,49 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
func (e *ErrParamMinLen) MinLen() int {
return e.min
}
// An ErrParamMaxLen represents a maximum length parameter error.
type ErrParamMaxLen struct {
errInvalidParam
max int
}
// NewErrParamMaxLen creates a new maximum length parameter error.
func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
return &ErrParamMaxLen{
errInvalidParam: errInvalidParam{
code: ParamMaxLenErrCode,
field: field,
msg: fmt.Sprintf("maximum size of %v, %v", max, value),
},
max: max,
}
}
// MaxLen returns the field's required minimum length.
func (e *ErrParamMaxLen) MaxLen() int {
return e.max
}
// An ErrParamFormat represents a invalid format parameter error.
type ErrParamFormat struct {
errInvalidParam
format string
}
// NewErrParamFormat creates a new invalid format parameter error.
func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
return &ErrParamFormat{
errInvalidParam: errInvalidParam{
code: ParamFormatErrCode,
field: field,
msg: fmt.Sprintf("format %v, %v", format, value),
},
format: format,
}
}
// Format returns the field's required format.
func (e *ErrParamFormat) Format() string {
return e.format
}

View File

@ -99,7 +99,7 @@ handler logs every request and its payload made by a service client:
sess.Handlers.Send.PushFront(func(r *request.Request) {
// Log every request made and its payload
logger.Println("Request: %s/%s, Payload: %s",
logger.Printf("Request: %s/%s, Payload: %s",
r.ClientInfo.ServiceName, r.Operation, r.Params)
})

View File

@ -4,6 +4,7 @@ import (
"os"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
)
@ -101,6 +102,12 @@ type envConfig struct {
CSMEnabled bool
CSMPort string
CSMClientID string
enableEndpointDiscovery string
// Enables endpoint discovery via environment variables.
//
// AWS_ENABLE_ENDPOINT_DISCOVERY=true
EnableEndpointDiscovery *bool
}
var (
@ -125,6 +132,10 @@ var (
"AWS_SESSION_TOKEN",
}
enableEndpointDiscoveryEnvKey = []string{
"AWS_ENABLE_ENDPOINT_DISCOVERY",
}
regionEnvKeys = []string{
"AWS_REGION",
"AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
@ -194,6 +205,12 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
setFromEnvVal(&cfg.Region, regionKeys)
setFromEnvVal(&cfg.Profile, profileKeys)
// endpoint discovery is in reference to it being enabled.
setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
if len(cfg.enableEndpointDiscovery) > 0 {
cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
}
setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)

View File

@ -14,13 +14,32 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/processcreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/csm"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
const (
// ErrCodeSharedConfig represents an error that occurs in the shared
// configuration logic
ErrCodeSharedConfig = "SharedConfigErr"
)
// ErrSharedConfigSourceCollision will be returned if a section contains both
// source_profile and credential_source
var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
// variables are empty and Environment was set as the credential source
var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
// A Session provides a central location to create service clients from and
// store configurations and request handlers for those services.
//
@ -434,8 +453,67 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
}
}
if cfg.EnableEndpointDiscovery == nil {
if envCfg.EnableEndpointDiscovery != nil {
cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
} else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
}
}
// Configure credentials if not already set
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
// inspect the profile to see if a credential source has been specified.
if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 {
// if both credential_source and source_profile have been set, return an error
// as this is undefined behavior.
if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
return ErrSharedConfigSourceCollision
}
// valid credential source values
const (
credSourceEc2Metadata = "Ec2InstanceMetadata"
credSourceEnvironment = "Environment"
credSourceECSContainer = "EcsContainer"
)
switch sharedCfg.AssumeRole.CredentialSource {
case credSourceEc2Metadata:
cfgCp := *cfg
p := defaults.RemoteCredProvider(cfgCp, handlers)
cfgCp.Credentials = credentials.NewCredentials(p)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
case credSourceEnvironment:
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
)
case credSourceECSContainer:
if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
return ErrSharedConfigECSContainerEnvVarEmpty
}
cfgCp := *cfg
p := defaults.RemoteCredProvider(cfgCp, handlers)
creds := credentials.NewCredentials(p)
cfg.Credentials = creds
default:
return ErrSharedConfigInvalidCredSource
}
return nil
}
if len(envCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
@ -445,36 +523,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.AssumeRoleSource.Creds,
)
if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
// AssumeRole Token provider is required if doing Assume Role
// with MFA.
return AssumeRoleTokenProviderNotSetError{}
}
cfg.Credentials = stscreds.NewCredentials(
&Session{
Config: &cfgCp,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
// Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
)
cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.Creds,
)
} else if len(sharedCfg.CredentialProcess) > 0 {
cfg.Credentials = processcreds.NewCredentials(
sharedCfg.CredentialProcess,
)
} else {
// Fallback to default credentials provider, include mock errors
// for the credential chain so user can identify why credentials
@ -493,6 +557,30 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
return nil
}
func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
return stscreds.NewCredentials(
&Session{
Config: &cfg,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
// Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// Assume role with MFA
if len(sharedCfg.AssumeRole.MFASerial) > 0 {
opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
}
},
)
}
// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
// MFAToken option is not set when shared config is configured load assume a
// role with an MFA token.

View File

@ -2,11 +2,11 @@ package session
import (
"fmt"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/go-ini/ini"
"github.com/aws/aws-sdk-go/internal/ini"
)
const (
@ -16,15 +16,21 @@ const (
sessionTokenKey = `aws_session_token` // optional
// Assume Role Credentials group
roleArnKey = `role_arn` // group required
sourceProfileKey = `source_profile` // group required
externalIDKey = `external_id` // optional
mfaSerialKey = `mfa_serial` // optional
roleSessionNameKey = `role_session_name` // optional
roleArnKey = `role_arn` // group required
sourceProfileKey = `source_profile` // group required (or credential_source)
credentialSourceKey = `credential_source` // group required (or source_profile)
externalIDKey = `external_id` // optional
mfaSerialKey = `mfa_serial` // optional
roleSessionNameKey = `role_session_name` // optional
// Additional Config fields
regionKey = `region`
// endpoint discovery group
enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
// External Credential Process
credentialProcessKey = `credential_process`
// DefaultSharedConfigProfile is the default profile to be used when
// loading configuration from the config files if another profile name
// is not provided.
@ -32,11 +38,12 @@ const (
)
type assumeRoleConfig struct {
RoleARN string
SourceProfile string
ExternalID string
MFASerial string
RoleSessionName string
RoleARN string
SourceProfile string
CredentialSource string
ExternalID string
MFASerial string
RoleSessionName string
}
// sharedConfig represents the configuration fields of the SDK config files.
@ -55,16 +62,25 @@ type sharedConfig struct {
AssumeRole assumeRoleConfig
AssumeRoleSource *sharedConfig
// An external process to request credentials
CredentialProcess string
// Region is the region the SDK should use for looking up AWS service endpoints
// and signing requests.
//
// region
Region string
// EnableEndpointDiscovery can be enabled in the shared config by setting
// endpoint_discovery_enabled to true
//
// endpoint_discovery_enabled = true
EnableEndpointDiscovery *bool
}
type sharedConfigFile struct {
Filename string
IniData *ini.File
IniData ini.Sections
}
// loadSharedConfig retrieves the configuration from the list of files
@ -105,19 +121,16 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
files := make([]sharedConfigFile, 0, len(filenames))
for _, filename := range filenames {
b, err := ioutil.ReadFile(filename)
if err != nil {
sections, err := ini.OpenFile(filename)
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
// Skip files which can't be opened and read for whatever reason
continue
}
f, err := ini.Load(b)
if err != nil {
} else if err != nil {
return nil, SharedConfigLoadError{Filename: filename, Err: err}
}
files = append(files, sharedConfigFile{
Filename: filename, IniData: f,
Filename: filename, IniData: sections,
})
}
@ -127,6 +140,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
var assumeRoleSrc sharedConfig
if len(cfg.AssumeRole.CredentialSource) > 0 {
// setAssumeRoleSource is only called when source_profile is found.
// If both source_profile and credential_source are set, then
// ErrSharedConfigSourceCollision will be returned
return ErrSharedConfigSourceCollision
}
// Multiple level assume role chains are not support
if cfg.AssumeRole.SourceProfile == origProfile {
assumeRoleSrc = *cfg
@ -171,45 +191,59 @@ func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFil
// if a config file only includes aws_access_key_id but no aws_secret_access_key
// the aws_access_key_id will be ignored.
func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
section, err := file.IniData.GetSection(profile)
if err != nil {
section, ok := file.IniData.GetSection(profile)
if !ok {
// Fallback to to alternate profile name: profile <name>
section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
if err != nil {
return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
if !ok {
return SharedConfigProfileNotExistsError{Profile: profile, Err: nil}
}
}
// Shared Credentials
akid := section.Key(accessKeyIDKey).String()
secret := section.Key(secretAccessKey).String()
akid := section.String(accessKeyIDKey)
secret := section.String(secretAccessKey)
if len(akid) > 0 && len(secret) > 0 {
cfg.Creds = credentials.Value{
AccessKeyID: akid,
SecretAccessKey: secret,
SessionToken: section.Key(sessionTokenKey).String(),
SessionToken: section.String(sessionTokenKey),
ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
}
}
// Assume Role
roleArn := section.Key(roleArnKey).String()
srcProfile := section.Key(sourceProfileKey).String()
if len(roleArn) > 0 && len(srcProfile) > 0 {
roleArn := section.String(roleArnKey)
srcProfile := section.String(sourceProfileKey)
credentialSource := section.String(credentialSourceKey)
hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
if len(roleArn) > 0 && hasSource {
cfg.AssumeRole = assumeRoleConfig{
RoleARN: roleArn,
SourceProfile: srcProfile,
ExternalID: section.Key(externalIDKey).String(),
MFASerial: section.Key(mfaSerialKey).String(),
RoleSessionName: section.Key(roleSessionNameKey).String(),
RoleARN: roleArn,
SourceProfile: srcProfile,
CredentialSource: credentialSource,
ExternalID: section.String(externalIDKey),
MFASerial: section.String(mfaSerialKey),
RoleSessionName: section.String(roleSessionNameKey),
}
}
// `credential_process`
if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
cfg.CredentialProcess = credProc
}
// Region
if v := section.Key(regionKey).String(); len(v) > 0 {
if v := section.String(regionKey); len(v) > 0 {
cfg.Region = v
}
// Endpoint discovery
if section.Has(enableEndpointDiscoveryKey) {
v := section.Bool(enableEndpointDiscoveryKey)
cfg.EnableEndpointDiscovery = &v
}
return nil
}

View File

@ -98,25 +98,25 @@ var ignoredHeaders = rules{
var requiredSignedHeaders = rules{
whitelist{
mapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
@ -134,6 +134,7 @@ var requiredSignedHeaders = rules{
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Tagging": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
"X-Amz-Content-Sha256": struct{}{},
},
@ -421,7 +422,7 @@ var SignRequestHandler = request.NamedHandler{
// If the credentials of the request's config are set to
// credentials.AnonymousCredentials the request will not be signed.
func SignSDKRequest(req *request.Request) {
signSDKRequestWithCurrTime(req, time.Now)
SignSDKRequestWithCurrentTime(req, time.Now)
}
// BuildNamedHandler will build a generic handler for signing.
@ -429,12 +430,15 @@ func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler
return request.NamedHandler{
Name: name,
Fn: func(req *request.Request) {
signSDKRequestWithCurrTime(req, time.Now, opts...)
SignSDKRequestWithCurrentTime(req, time.Now, opts...)
},
}
}
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
// function passed in. Behaves the same as SignSDKRequest with the exception
// the request is signed with the value returned by the current time function.
func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
@ -470,13 +474,9 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
opt(v4)
}
signingTime := req.Time
if !req.LastSignedAt.IsZero() {
signingTime = req.LastSignedAt
}
curTime := curTimeFn()
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
name, region, req.ExpireTime, req.ExpireTime > 0, signingTime,
name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
)
if err != nil {
req.Error = err
@ -485,7 +485,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
}
req.SignedHeaderVals = signedHeaders
req.LastSignedAt = curTimeFn()
req.LastSignedAt = curTime
}
const logSignInfoMsg = `DEBUG: Request Signature:

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.15.47"
const SDKVersion = "1.16.2"

120
vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
package ini
// ASTKind represents different states in the parse table
// and the type of AST that is being constructed
type ASTKind int
// ASTKind* is used in the parse table to transition between
// the different states
const (
ASTKindNone = ASTKind(iota)
ASTKindStart
ASTKindExpr
ASTKindEqualExpr
ASTKindStatement
ASTKindSkipStatement
ASTKindExprStatement
ASTKindSectionStatement
ASTKindNestedSectionStatement
ASTKindCompletedNestedSectionStatement
ASTKindCommentStatement
ASTKindCompletedSectionStatement
)
func (k ASTKind) String() string {
switch k {
case ASTKindNone:
return "none"
case ASTKindStart:
return "start"
case ASTKindExpr:
return "expr"
case ASTKindStatement:
return "stmt"
case ASTKindSectionStatement:
return "section_stmt"
case ASTKindExprStatement:
return "expr_stmt"
case ASTKindCommentStatement:
return "comment"
case ASTKindNestedSectionStatement:
return "nested_section_stmt"
case ASTKindCompletedSectionStatement:
return "completed_stmt"
case ASTKindSkipStatement:
return "skip"
default:
return ""
}
}
// AST interface allows us to determine what kind of node we
// are on and casting may not need to be necessary.
//
// The root is always the first node in Children
type AST struct {
Kind ASTKind
Root Token
RootToken bool
Children []AST
}
func newAST(kind ASTKind, root AST, children ...AST) AST {
return AST{
Kind: kind,
Children: append([]AST{root}, children...),
}
}
func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
return AST{
Kind: kind,
Root: root,
RootToken: true,
Children: children,
}
}
// AppendChild will append to the list of children an AST has.
func (a *AST) AppendChild(child AST) {
a.Children = append(a.Children, child)
}
// GetRoot will return the root AST which can be the first entry
// in the children list or a token.
func (a *AST) GetRoot() AST {
if a.RootToken {
return *a
}
if len(a.Children) == 0 {
return AST{}
}
return a.Children[0]
}
// GetChildren will return the current AST's list of children
func (a *AST) GetChildren() []AST {
if len(a.Children) == 0 {
return []AST{}
}
if a.RootToken {
return a.Children
}
return a.Children[1:]
}
// SetChildren will set and override all children of the AST.
func (a *AST) SetChildren(children []AST) {
if a.RootToken {
a.Children = children
} else {
a.Children = append(a.Children[:1], children...)
}
}
// Start is used to indicate the starting state of the parse table.
var Start = newAST(ASTKindStart, AST{})

View File

@ -0,0 +1,11 @@
package ini
var commaRunes = []rune(",")
func isComma(b rune) bool {
return b == ','
}
func newCommaToken() Token {
return newToken(TokenComma, commaRunes, NoneType)
}

View File

@ -0,0 +1,35 @@
package ini
// isComment will return whether or not the next byte(s) is a
// comment.
func isComment(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case ';':
return true
case '#':
return true
}
return false
}
// newCommentToken will create a comment token and
// return how many bytes were read.
func newCommentToken(b []rune) (Token, int, error) {
i := 0
for ; i < len(b); i++ {
if b[i] == '\n' {
break
}
if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
break
}
}
return newToken(TokenComment, b[:i], NoneType), i, nil
}

29
vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Package ini is an LL(1) parser for configuration files.
//
// Example:
// sections, err := ini.OpenFile("/path/to/file")
// if err != nil {
// panic(err)
// }
//
// profile := "foo"
// section, ok := sections.GetSection(profile)
// if !ok {
// fmt.Printf("section %q could not be found", profile)
// }
//
// Below is the BNF that describes this parser
// Grammar:
// stmt -> value stmt'
// stmt' -> epsilon | op stmt
// value -> number | string | boolean | quoted_string
//
// section -> [ section'
// section' -> value section_close
// section_close -> ]
//
// SkipState will skip (NL WS)+
//
// comment -> # comment' | ; comment'
// comment' -> epsilon | value
package ini

View File

@ -0,0 +1,4 @@
package ini
// emptyToken is used to satisfy the Token interface
var emptyToken = newToken(TokenNone, []rune{}, NoneType)

View File

@ -0,0 +1,24 @@
package ini
// newExpression will return an expression AST.
// Expr represents an expression
//
// grammar:
// expr -> string | number
func newExpression(tok Token) AST {
return newASTWithRootToken(ASTKindExpr, tok)
}
func newEqualExpr(left AST, tok Token) AST {
return newASTWithRootToken(ASTKindEqualExpr, tok, left)
}
// EqualExprKey will return a LHS value in the equal expr
func EqualExprKey(ast AST) string {
children := ast.GetChildren()
if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
return ""
}
return string(children[0].Root.Raw())
}

17
vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// +build gofuzz
package ini
import (
"bytes"
)
func Fuzz(data []byte) int {
b := bytes.NewReader(data)
if _, err := Parse(b); err != nil {
return 0
}
return 1
}

51
vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package ini
import (
"io"
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// OpenFile takes a path to a given file, and will open and parse
// that file.
func OpenFile(path string) (Sections, error) {
f, err := os.Open(path)
if err != nil {
return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
}
defer f.Close()
return Parse(f)
}
// Parse will parse the given file using the shared config
// visitor.
func Parse(f io.Reader) (Sections, error) {
tree, err := ParseAST(f)
if err != nil {
return Sections{}, err
}
v := NewDefaultVisitor()
if err = Walk(tree, v); err != nil {
return Sections{}, err
}
return v.Sections, nil
}
// ParseBytes will parse the given bytes and return the parsed sections.
func ParseBytes(b []byte) (Sections, error) {
tree, err := ParseASTBytes(b)
if err != nil {
return Sections{}, err
}
v := NewDefaultVisitor()
if err = Walk(tree, v); err != nil {
return Sections{}, err
}
return v.Sections, nil
}

View File

@ -0,0 +1,165 @@
package ini
import (
"bytes"
"io"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/awserr"
)
const (
// ErrCodeUnableToReadFile is used when a file is failed to be
// opened or read from.
ErrCodeUnableToReadFile = "FailedRead"
)
// TokenType represents the various different tokens types
type TokenType int
func (t TokenType) String() string {
switch t {
case TokenNone:
return "none"
case TokenLit:
return "literal"
case TokenSep:
return "sep"
case TokenOp:
return "op"
case TokenWS:
return "ws"
case TokenNL:
return "newline"
case TokenComment:
return "comment"
case TokenComma:
return "comma"
default:
return ""
}
}
// TokenType enums
const (
TokenNone = TokenType(iota)
TokenLit
TokenSep
TokenComma
TokenOp
TokenWS
TokenNL
TokenComment
)
type iniLexer struct{}
// Tokenize will return a list of tokens during lexical analysis of the
// io.Reader.
func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
}
return l.tokenize(b)
}
func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
runes := bytes.Runes(b)
var err error
n := 0
tokenAmount := countTokens(runes)
tokens := make([]Token, tokenAmount)
count := 0
for len(runes) > 0 && count < tokenAmount {
switch {
case isWhitespace(runes[0]):
tokens[count], n, err = newWSToken(runes)
case isComma(runes[0]):
tokens[count], n = newCommaToken(), 1
case isComment(runes):
tokens[count], n, err = newCommentToken(runes)
case isNewline(runes):
tokens[count], n, err = newNewlineToken(runes)
case isSep(runes):
tokens[count], n, err = newSepToken(runes)
case isOp(runes):
tokens[count], n, err = newOpToken(runes)
default:
tokens[count], n, err = newLitToken(runes)
}
if err != nil {
return nil, err
}
count++
runes = runes[n:]
}
return tokens[:count], nil
}
func countTokens(runes []rune) int {
count, n := 0, 0
var err error
for len(runes) > 0 {
switch {
case isWhitespace(runes[0]):
_, n, err = newWSToken(runes)
case isComma(runes[0]):
_, n = newCommaToken(), 1
case isComment(runes):
_, n, err = newCommentToken(runes)
case isNewline(runes):
_, n, err = newNewlineToken(runes)
case isSep(runes):
_, n, err = newSepToken(runes)
case isOp(runes):
_, n, err = newOpToken(runes)
default:
_, n, err = newLitToken(runes)
}
if err != nil {
return 0
}
count++
runes = runes[n:]
}
return count + 1
}
// Token indicates a metadata about a given value.
type Token struct {
t TokenType
ValueType ValueType
base int
raw []rune
}
var emptyValue = Value{}
func newToken(t TokenType, raw []rune, v ValueType) Token {
return Token{
t: t,
raw: raw,
ValueType: v,
}
}
// Raw return the raw runes that were consumed
func (tok Token) Raw() []rune {
return tok.raw
}
// Type returns the token type
func (tok Token) Type() TokenType {
return tok.t
}

View File

@ -0,0 +1,347 @@
package ini
import (
"fmt"
"io"
)
// State enums for the parse table
const (
InvalidState = iota
// stmt -> value stmt'
StatementState
// stmt' -> MarkComplete | op stmt
StatementPrimeState
// value -> number | string | boolean | quoted_string
ValueState
// section -> [ section'
OpenScopeState
// section' -> value section_close
SectionState
// section_close -> ]
CloseScopeState
// SkipState will skip (NL WS)+
SkipState
// SkipTokenState will skip any token and push the previous
// state onto the stack.
SkipTokenState
// comment -> # comment' | ; comment'
// comment' -> MarkComplete | value
CommentState
// MarkComplete state will complete statements and move that
// to the completed AST list
MarkCompleteState
// TerminalState signifies that the tokens have been fully parsed
TerminalState
)
// parseTable is a state machine to dictate the grammar above.
var parseTable = map[ASTKind]map[TokenType]int{
ASTKindStart: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: TerminalState,
},
ASTKindCommentStatement: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindExpr: map[TokenType]int{
TokenOp: StatementPrimeState,
TokenLit: ValueState,
TokenSep: OpenScopeState,
TokenWS: ValueState,
TokenNL: SkipState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindEqualExpr: map[TokenType]int{
TokenLit: ValueState,
TokenWS: SkipTokenState,
TokenNL: SkipState,
},
ASTKindStatement: map[TokenType]int{
TokenLit: SectionState,
TokenSep: CloseScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindExprStatement: map[TokenType]int{
TokenLit: ValueState,
TokenSep: OpenScopeState,
TokenOp: ValueState,
TokenWS: ValueState,
TokenNL: MarkCompleteState,
TokenComment: CommentState,
TokenNone: TerminalState,
TokenComma: SkipState,
},
ASTKindSectionStatement: map[TokenType]int{
TokenLit: SectionState,
TokenOp: SectionState,
TokenSep: CloseScopeState,
TokenWS: SectionState,
TokenNL: SkipTokenState,
},
ASTKindCompletedSectionStatement: map[TokenType]int{
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenComment: CommentState,
TokenNone: MarkCompleteState,
},
ASTKindSkipStatement: map[TokenType]int{
TokenLit: StatementState,
TokenSep: OpenScopeState,
TokenWS: SkipTokenState,
TokenNL: SkipTokenState,
TokenComment: CommentState,
TokenNone: TerminalState,
},
}
// ParseAST will parse input from an io.Reader using
// an LL(1) parser.
func ParseAST(r io.Reader) ([]AST, error) {
lexer := iniLexer{}
tokens, err := lexer.Tokenize(r)
if err != nil {
return []AST{}, err
}
return parse(tokens)
}
// ParseASTBytes will parse input from a byte slice using
// an LL(1) parser.
func ParseASTBytes(b []byte) ([]AST, error) {
lexer := iniLexer{}
tokens, err := lexer.tokenize(b)
if err != nil {
return []AST{}, err
}
return parse(tokens)
}
func parse(tokens []Token) ([]AST, error) {
start := Start
stack := newParseStack(3, len(tokens))
stack.Push(start)
s := newSkipper()
loop:
for stack.Len() > 0 {
k := stack.Pop()
var tok Token
if len(tokens) == 0 {
// this occurs when all the tokens have been processed
// but reduction of what's left on the stack needs to
// occur.
tok = emptyToken
} else {
tok = tokens[0]
}
step := parseTable[k.Kind][tok.Type()]
if s.ShouldSkip(tok) {
// being in a skip state with no tokens will break out of
// the parse loop since there is nothing left to process.
if len(tokens) == 0 {
break loop
}
step = SkipTokenState
}
switch step {
case TerminalState:
// Finished parsing. Push what should be the last
// statement to the stack. If there is anything left
// on the stack, an error in parsing has occurred.
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
break loop
case SkipTokenState:
// When skipping a token, the previous state was popped off the stack.
// To maintain the correct state, the previous state will be pushed
// onto the stack.
stack.Push(k)
case StatementState:
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
expr := newExpression(tok)
stack.Push(expr)
case StatementPrimeState:
if tok.Type() != TokenOp {
stack.MarkComplete(k)
continue
}
if k.Kind != ASTKindExpr {
return nil, NewParseError(
fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
)
}
k = trimSpaces(k)
expr := newEqualExpr(k, tok)
stack.Push(expr)
case ValueState:
// ValueState requires the previous state to either be an equal expression
// or an expression statement.
//
// This grammar occurs when the RHS is a number, word, or quoted string.
// equal_expr -> lit op equal_expr'
// equal_expr' -> number | string | quoted_string
// quoted_string -> " quoted_string'
// quoted_string' -> string quoted_string_end
// quoted_string_end -> "
//
// otherwise
// expr_stmt -> equal_expr (expr_stmt')*
// expr_stmt' -> ws S | op S | MarkComplete
// S -> equal_expr' expr_stmt'
switch k.Kind {
case ASTKindEqualExpr:
// assiging a value to some key
k.AppendChild(newExpression(tok))
stack.Push(newExprStatement(k))
case ASTKindExpr:
k.Root.raw = append(k.Root.raw, tok.Raw()...)
stack.Push(k)
case ASTKindExprStatement:
root := k.GetRoot()
children := root.GetChildren()
if len(children) == 0 {
return nil, NewParseError(
fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
)
}
rhs := children[len(children)-1]
if rhs.Root.ValueType != QuotedStringType {
rhs.Root.ValueType = StringType
rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
}
children[len(children)-1] = rhs
k.SetChildren(children)
stack.Push(k)
}
case OpenScopeState:
if !runeCompare(tok.Raw(), openBrace) {
return nil, NewParseError("expected '['")
}
stmt := newStatement()
stack.Push(stmt)
case CloseScopeState:
if !runeCompare(tok.Raw(), closeBrace) {
return nil, NewParseError("expected ']'")
}
k = trimSpaces(k)
stack.Push(newCompletedSectionStatement(k))
case SectionState:
var stmt AST
switch k.Kind {
case ASTKindStatement:
// If there are multiple literals inside of a scope declaration,
// then the current token's raw value will be appended to the Name.
//
// This handles cases like [ profile default ]
//
// k will represent a SectionStatement with the children representing
// the label of the section
stmt = newSectionStatement(tok)
case ASTKindSectionStatement:
k.Root.raw = append(k.Root.raw, tok.Raw()...)
stmt = k
default:
return nil, NewParseError(
fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
)
}
stack.Push(stmt)
case MarkCompleteState:
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
if stack.Len() == 0 {
stack.Push(start)
}
case SkipState:
stack.Push(newSkipStatement(k))
s.Skip()
case CommentState:
if k.Kind == ASTKindStart {
stack.Push(k)
} else {
stack.MarkComplete(k)
}
stmt := newCommentStatement(tok)
stack.Push(stmt)
default:
return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok))
}
if len(tokens) > 0 {
tokens = tokens[1:]
}
}
// this occurs when a statement has not been completed
if stack.top > 1 {
return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
}
// returns a sublist which exludes the start symbol
return stack.List(), nil
}
// trimSpaces will trim spaces on the left and right hand side of
// the literal.
func trimSpaces(k AST) AST {
// trim left hand side of spaces
for i := 0; i < len(k.Root.raw); i++ {
if !isWhitespace(k.Root.raw[i]) {
break
}
k.Root.raw = k.Root.raw[1:]
i--
}
// trim right hand side of spaces
for i := len(k.Root.raw) - 1; i >= 0; i-- {
if !isWhitespace(k.Root.raw[i]) {
break
}
k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
}
return k
}

View File

@ -0,0 +1,324 @@
package ini
import (
"fmt"
"strconv"
"strings"
)
var (
runesTrue = []rune("true")
runesFalse = []rune("false")
)
var literalValues = [][]rune{
runesTrue,
runesFalse,
}
func isBoolValue(b []rune) bool {
for _, lv := range literalValues {
if isLitValue(lv, b) {
return true
}
}
return false
}
func isLitValue(want, have []rune) bool {
if len(have) < len(want) {
return false
}
for i := 0; i < len(want); i++ {
if want[i] != have[i] {
return false
}
}
return true
}
// isNumberValue will return whether not the leading characters in
// a byte slice is a number. A number is delimited by whitespace or
// the newline token.
//
// A number is defined to be in a binary, octal, decimal (int | float), hex format,
// or in scientific notation.
func isNumberValue(b []rune) bool {
negativeIndex := 0
helper := numberHelper{}
needDigit := false
for i := 0; i < len(b); i++ {
negativeIndex++
switch b[i] {
case '-':
if helper.IsNegative() || negativeIndex != 1 {
return false
}
helper.Determine(b[i])
needDigit = true
continue
case 'e', 'E':
if err := helper.Determine(b[i]); err != nil {
return false
}
negativeIndex = 0
needDigit = true
continue
case 'b':
if helper.numberFormat == hex {
break
}
fallthrough
case 'o', 'x':
needDigit = true
if i == 0 {
return false
}
fallthrough
case '.':
if err := helper.Determine(b[i]); err != nil {
return false
}
needDigit = true
continue
}
if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
return !needDigit
}
if !helper.CorrectByte(b[i]) {
return false
}
needDigit = false
}
return !needDigit
}
func isValid(b []rune) (bool, int, error) {
if len(b) == 0 {
// TODO: should probably return an error
return false, 0, nil
}
return isValidRune(b[0]), 1, nil
}
func isValidRune(r rune) bool {
return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
}
// ValueType is an enum that will signify what type
// the Value is
type ValueType int
func (v ValueType) String() string {
switch v {
case NoneType:
return "NONE"
case DecimalType:
return "FLOAT"
case IntegerType:
return "INT"
case StringType:
return "STRING"
case BoolType:
return "BOOL"
}
return ""
}
// ValueType enums
const (
NoneType = ValueType(iota)
DecimalType
IntegerType
StringType
QuotedStringType
BoolType
)
// Value is a union container
type Value struct {
Type ValueType
raw []rune
integer int64
decimal float64
boolean bool
str string
}
func newValue(t ValueType, base int, raw []rune) (Value, error) {
v := Value{
Type: t,
raw: raw,
}
var err error
switch t {
case DecimalType:
v.decimal, err = strconv.ParseFloat(string(raw), 64)
case IntegerType:
if base != 10 {
raw = raw[2:]
}
v.integer, err = strconv.ParseInt(string(raw), base, 64)
case StringType:
v.str = string(raw)
case QuotedStringType:
v.str = string(raw[1 : len(raw)-1])
case BoolType:
v.boolean = runeCompare(v.raw, runesTrue)
}
// issue 2253
//
// if the value trying to be parsed is too large, then we will use
// the 'StringType' and raw value instead.
if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
v.Type = StringType
v.str = string(raw)
err = nil
}
return v, err
}
// Append will append values and change the type to a string
// type.
func (v *Value) Append(tok Token) {
r := tok.Raw()
if v.Type != QuotedStringType {
v.Type = StringType
r = tok.raw[1 : len(tok.raw)-1]
}
if tok.Type() != TokenLit {
v.raw = append(v.raw, tok.Raw()...)
} else {
v.raw = append(v.raw, r...)
}
}
func (v Value) String() string {
switch v.Type {
case DecimalType:
return fmt.Sprintf("decimal: %f", v.decimal)
case IntegerType:
return fmt.Sprintf("integer: %d", v.integer)
case StringType:
return fmt.Sprintf("string: %s", string(v.raw))
case QuotedStringType:
return fmt.Sprintf("quoted string: %s", string(v.raw))
case BoolType:
return fmt.Sprintf("bool: %t", v.boolean)
default:
return "union not set"
}
}
func newLitToken(b []rune) (Token, int, error) {
n := 0
var err error
token := Token{}
if b[0] == '"' {
n, err = getStringValue(b)
if err != nil {
return token, n, err
}
token = newToken(TokenLit, b[:n], QuotedStringType)
} else if isNumberValue(b) {
var base int
base, n, err = getNumericalValue(b)
if err != nil {
return token, 0, err
}
value := b[:n]
vType := IntegerType
if contains(value, '.') || hasExponent(value) {
vType = DecimalType
}
token = newToken(TokenLit, value, vType)
token.base = base
} else if isBoolValue(b) {
n, err = getBoolValue(b)
token = newToken(TokenLit, b[:n], BoolType)
} else {
n, err = getValue(b)
token = newToken(TokenLit, b[:n], StringType)
}
return token, n, err
}
// IntValue returns an integer value
func (v Value) IntValue() int64 {
return v.integer
}
// FloatValue returns a float value
func (v Value) FloatValue() float64 {
return v.decimal
}
// BoolValue returns a bool value
func (v Value) BoolValue() bool {
return v.boolean
}
func isTrimmable(r rune) bool {
switch r {
case '\n', ' ':
return true
}
return false
}
// StringValue returns the string value
func (v Value) StringValue() string {
switch v.Type {
case StringType:
return strings.TrimFunc(string(v.raw), isTrimmable)
case QuotedStringType:
// preserve all characters in the quotes
return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
default:
return strings.TrimFunc(string(v.raw), isTrimmable)
}
}
func contains(runes []rune, c rune) bool {
for i := 0; i < len(runes); i++ {
if runes[i] == c {
return true
}
}
return false
}
func runeCompare(v1 []rune, v2 []rune) bool {
if len(v1) != len(v2) {
return false
}
for i := 0; i < len(v1); i++ {
if v1[i] != v2[i] {
return false
}
}
return true
}

View File

@ -0,0 +1,30 @@
package ini
func isNewline(b []rune) bool {
if len(b) == 0 {
return false
}
if b[0] == '\n' {
return true
}
if len(b) < 2 {
return false
}
return b[0] == '\r' && b[1] == '\n'
}
func newNewlineToken(b []rune) (Token, int, error) {
i := 1
if b[0] == '\r' && isNewline(b[1:]) {
i++
}
if !isNewline([]rune(b[:i])) {
return emptyToken, 0, NewParseError("invalid new line token")
}
return newToken(TokenNL, b[:i], NoneType), i, nil
}

View File

@ -0,0 +1,152 @@
package ini
import (
"bytes"
"fmt"
"strconv"
)
const (
none = numberFormat(iota)
binary
octal
decimal
hex
exponent
)
type numberFormat int
// numberHelper is used to dictate what format a number is in
// and what to do for negative values. Since -1e-4 is a valid
// number, we cannot just simply check for duplicate negatives.
type numberHelper struct {
numberFormat numberFormat
negative bool
negativeExponent bool
}
func (b numberHelper) Exists() bool {
return b.numberFormat != none
}
func (b numberHelper) IsNegative() bool {
return b.negative || b.negativeExponent
}
func (b *numberHelper) Determine(c rune) error {
if b.Exists() {
return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
}
switch c {
case 'b':
b.numberFormat = binary
case 'o':
b.numberFormat = octal
case 'x':
b.numberFormat = hex
case 'e', 'E':
b.numberFormat = exponent
case '-':
if b.numberFormat != exponent {
b.negative = true
} else {
b.negativeExponent = true
}
case '.':
b.numberFormat = decimal
default:
return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
}
return nil
}
func (b numberHelper) CorrectByte(c rune) bool {
switch {
case b.numberFormat == binary:
if !isBinaryByte(c) {
return false
}
case b.numberFormat == octal:
if !isOctalByte(c) {
return false
}
case b.numberFormat == hex:
if !isHexByte(c) {
return false
}
case b.numberFormat == decimal:
if !isDigit(c) {
return false
}
case b.numberFormat == exponent:
if !isDigit(c) {
return false
}
case b.negativeExponent:
if !isDigit(c) {
return false
}
case b.negative:
if !isDigit(c) {
return false
}
default:
if !isDigit(c) {
return false
}
}
return true
}
func (b numberHelper) Base() int {
switch b.numberFormat {
case binary:
return 2
case octal:
return 8
case hex:
return 16
default:
return 10
}
}
func (b numberHelper) String() string {
buf := bytes.Buffer{}
i := 0
switch b.numberFormat {
case binary:
i++
buf.WriteString(strconv.Itoa(i) + ": binary format\n")
case octal:
i++
buf.WriteString(strconv.Itoa(i) + ": octal format\n")
case hex:
i++
buf.WriteString(strconv.Itoa(i) + ": hex format\n")
case exponent:
i++
buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
default:
i++
buf.WriteString(strconv.Itoa(i) + ": integer format\n")
}
if b.negative {
i++
buf.WriteString(strconv.Itoa(i) + ": negative format\n")
}
if b.negativeExponent {
i++
buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
}
return buf.String()
}

View File

@ -0,0 +1,39 @@
package ini
import (
"fmt"
)
var (
equalOp = []rune("=")
equalColonOp = []rune(":")
)
func isOp(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case '=':
return true
case ':':
return true
default:
return false
}
}
func newOpToken(b []rune) (Token, int, error) {
tok := Token{}
switch b[0] {
case '=':
tok = newToken(TokenOp, equalOp, NoneType)
case ':':
tok = newToken(TokenOp, equalColonOp, NoneType)
default:
return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
}
return tok, 1, nil
}

View File

@ -0,0 +1,43 @@
package ini
import "fmt"
const (
// ErrCodeParseError is returned when a parsing error
// has occurred.
ErrCodeParseError = "INIParseError"
)
// ParseError is an error which is returned during any part of
// the parsing process.
type ParseError struct {
msg string
}
// NewParseError will return a new ParseError where message
// is the description of the error.
func NewParseError(message string) *ParseError {
return &ParseError{
msg: message,
}
}
// Code will return the ErrCodeParseError
func (err *ParseError) Code() string {
return ErrCodeParseError
}
// Message returns the error's message
func (err *ParseError) Message() string {
return err.msg
}
// OrigError return nothing since there will never be any
// original error.
func (err *ParseError) OrigError() error {
return nil
}
func (err *ParseError) Error() string {
return fmt.Sprintf("%s: %s", err.Code(), err.Message())
}

View File

@ -0,0 +1,60 @@
package ini
import (
"bytes"
"fmt"
)
// ParseStack is a stack that contains a container, the stack portion,
// and the list which is the list of ASTs that have been successfully
// parsed.
type ParseStack struct {
top int
container []AST
list []AST
index int
}
func newParseStack(sizeContainer, sizeList int) ParseStack {
return ParseStack{
container: make([]AST, sizeContainer),
list: make([]AST, sizeList),
}
}
// Pop will return and truncate the last container element.
func (s *ParseStack) Pop() AST {
s.top--
return s.container[s.top]
}
// Push will add the new AST to the container
func (s *ParseStack) Push(ast AST) {
s.container[s.top] = ast
s.top++
}
// MarkComplete will append the AST to the list of completed statements
func (s *ParseStack) MarkComplete(ast AST) {
s.list[s.index] = ast
s.index++
}
// List will return the completed statements
func (s ParseStack) List() []AST {
return s.list[:s.index]
}
// Len will return the length of the container
func (s *ParseStack) Len() int {
return s.top
}
func (s ParseStack) String() string {
buf := bytes.Buffer{}
for i, node := range s.list {
buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
}
return buf.String()
}

View File

@ -0,0 +1,41 @@
package ini
import (
"fmt"
)
var (
emptyRunes = []rune{}
)
func isSep(b []rune) bool {
if len(b) == 0 {
return false
}
switch b[0] {
case '[', ']':
return true
default:
return false
}
}
var (
openBrace = []rune("[")
closeBrace = []rune("]")
)
func newSepToken(b []rune) (Token, int, error) {
tok := Token{}
switch b[0] {
case '[':
tok = newToken(TokenSep, openBrace, NoneType)
case ']':
tok = newToken(TokenSep, closeBrace, NoneType)
default:
return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
}
return tok, 1, nil
}

View File

@ -0,0 +1,45 @@
package ini
// skipper is used to skip certain blocks of an ini file.
// Currently skipper is used to skip nested blocks of ini
// files. See example below
//
// [ foo ]
// nested = ; this section will be skipped
// a=b
// c=d
// bar=baz ; this will be included
type skipper struct {
shouldSkip bool
TokenSet bool
prevTok Token
}
func newSkipper() skipper {
return skipper{
prevTok: emptyToken,
}
}
func (s *skipper) ShouldSkip(tok Token) bool {
if s.shouldSkip &&
s.prevTok.Type() == TokenNL &&
tok.Type() != TokenWS {
s.Continue()
return false
}
s.prevTok = tok
return s.shouldSkip
}
func (s *skipper) Skip() {
s.shouldSkip = true
s.prevTok = emptyToken
}
func (s *skipper) Continue() {
s.shouldSkip = false
s.prevTok = emptyToken
}

View File

@ -0,0 +1,35 @@
package ini
// Statement is an empty AST mostly used for transitioning states.
func newStatement() AST {
return newAST(ASTKindStatement, AST{})
}
// SectionStatement represents a section AST
func newSectionStatement(tok Token) AST {
return newASTWithRootToken(ASTKindSectionStatement, tok)
}
// ExprStatement represents a completed expression AST
func newExprStatement(ast AST) AST {
return newAST(ASTKindExprStatement, ast)
}
// CommentStatement represents a comment in the ini defintion.
//
// grammar:
// comment -> #comment' | ;comment'
// comment' -> epsilon | value
func newCommentStatement(tok Token) AST {
return newAST(ASTKindCommentStatement, newExpression(tok))
}
// CompletedSectionStatement represents a completed section
func newCompletedSectionStatement(ast AST) AST {
return newAST(ASTKindCompletedSectionStatement, ast)
}
// SkipStatement is used to skip whole statements
func newSkipStatement(ast AST) AST {
return newAST(ASTKindSkipStatement, ast)
}

View File

@ -0,0 +1,284 @@
package ini
import (
"fmt"
)
// getStringValue will return a quoted string and the amount
// of bytes read
//
// an error will be returned if the string is not properly formatted
func getStringValue(b []rune) (int, error) {
if b[0] != '"' {
return 0, NewParseError("strings must start with '\"'")
}
endQuote := false
i := 1
for ; i < len(b) && !endQuote; i++ {
if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
endQuote = true
break
} else if escaped {
/*c, err := getEscapedByte(b[i])
if err != nil {
return 0, err
}
b[i-1] = c
b = append(b[:i], b[i+1:]...)
i--*/
continue
}
}
if !endQuote {
return 0, NewParseError("missing '\"' in string value")
}
return i + 1, nil
}
// getBoolValue will return a boolean and the amount
// of bytes read
//
// an error will be returned if the boolean is not of a correct
// value
func getBoolValue(b []rune) (int, error) {
if len(b) < 4 {
return 0, NewParseError("invalid boolean value")
}
n := 0
for _, lv := range literalValues {
if len(lv) > len(b) {
continue
}
if isLitValue(lv, b) {
n = len(lv)
}
}
if n == 0 {
return 0, NewParseError("invalid boolean value")
}
return n, nil
}
// getNumericalValue will return a numerical string, the amount
// of bytes read, and the base of the number
//
// an error will be returned if the number is not of a correct
// value
func getNumericalValue(b []rune) (int, int, error) {
if !isDigit(b[0]) {
return 0, 0, NewParseError("invalid digit value")
}
i := 0
helper := numberHelper{}
loop:
for negativeIndex := 0; i < len(b); i++ {
negativeIndex++
if !isDigit(b[i]) {
switch b[i] {
case '-':
if helper.IsNegative() || negativeIndex != 1 {
return 0, 0, NewParseError("parse error '-'")
}
n := getNegativeNumber(b[i:])
i += (n - 1)
helper.Determine(b[i])
continue
case '.':
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
case 'e', 'E':
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
negativeIndex = 0
case 'b':
if helper.numberFormat == hex {
break
}
fallthrough
case 'o', 'x':
if i == 0 && b[i] != '0' {
return 0, 0, NewParseError("incorrect base format, expected leading '0'")
}
if i != 1 {
return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
}
if err := helper.Determine(b[i]); err != nil {
return 0, 0, err
}
default:
if isWhitespace(b[i]) {
break loop
}
if isNewline(b[i:]) {
break loop
}
if !(helper.numberFormat == hex && isHexByte(b[i])) {
if i+2 < len(b) && !isNewline(b[i:i+2]) {
return 0, 0, NewParseError("invalid numerical character")
} else if !isNewline([]rune{b[i]}) {
return 0, 0, NewParseError("invalid numerical character")
}
break loop
}
}
}
}
return helper.Base(), i, nil
}
// isDigit will return whether or not something is an integer
func isDigit(b rune) bool {
return b >= '0' && b <= '9'
}
func hasExponent(v []rune) bool {
return contains(v, 'e') || contains(v, 'E')
}
func isBinaryByte(b rune) bool {
switch b {
case '0', '1':
return true
default:
return false
}
}
func isOctalByte(b rune) bool {
switch b {
case '0', '1', '2', '3', '4', '5', '6', '7':
return true
default:
return false
}
}
func isHexByte(b rune) bool {
if isDigit(b) {
return true
}
return (b >= 'A' && b <= 'F') ||
(b >= 'a' && b <= 'f')
}
func getValue(b []rune) (int, error) {
i := 0
for i < len(b) {
if isNewline(b[i:]) {
break
}
if isOp(b[i:]) {
break
}
valid, n, err := isValid(b[i:])
if err != nil {
return 0, err
}
if !valid {
break
}
i += n
}
return i, nil
}
// getNegativeNumber will return a negative number from a
// byte slice. This will iterate through all characters until
// a non-digit has been found.
func getNegativeNumber(b []rune) int {
if b[0] != '-' {
return 0
}
i := 1
for ; i < len(b); i++ {
if !isDigit(b[i]) {
return i
}
}
return i
}
// isEscaped will return whether or not the character is an escaped
// character.
func isEscaped(value []rune, b rune) bool {
if len(value) == 0 {
return false
}
switch b {
case '\'': // single quote
case '"': // quote
case 'n': // newline
case 't': // tab
case '\\': // backslash
default:
return false
}
return value[len(value)-1] == '\\'
}
func getEscapedByte(b rune) (rune, error) {
switch b {
case '\'': // single quote
return '\'', nil
case '"': // quote
return '"', nil
case 'n': // newline
return '\n', nil
case 't': // table
return '\t', nil
case '\\': // backslash
return '\\', nil
default:
return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
}
}
func removeEscapedCharacters(b []rune) []rune {
for i := 0; i < len(b); i++ {
if isEscaped(b[:i], b[i]) {
c, err := getEscapedByte(b[i])
if err != nil {
return b
}
b[i-1] = c
b = append(b[:i], b[i+1:]...)
i--
}
}
return b
}

View File

@ -0,0 +1,166 @@
package ini
import (
"fmt"
"sort"
)
// Visitor is an interface used by walkers that will
// traverse an array of ASTs.
type Visitor interface {
VisitExpr(AST) error
VisitStatement(AST) error
}
// DefaultVisitor is used to visit statements and expressions
// and ensure that they are both of the correct format.
// In addition, upon visiting this will build sections and populate
// the Sections field which can be used to retrieve profile
// configuration.
type DefaultVisitor struct {
scope string
Sections Sections
}
// NewDefaultVisitor return a DefaultVisitor
func NewDefaultVisitor() *DefaultVisitor {
return &DefaultVisitor{
Sections: Sections{
container: map[string]Section{},
},
}
}
// VisitExpr visits expressions...
func (v *DefaultVisitor) VisitExpr(expr AST) error {
t := v.Sections.container[v.scope]
if t.values == nil {
t.values = values{}
}
switch expr.Kind {
case ASTKindExprStatement:
opExpr := expr.GetRoot()
switch opExpr.Kind {
case ASTKindEqualExpr:
children := opExpr.GetChildren()
if len(children) <= 1 {
return NewParseError("unexpected token type")
}
rhs := children[1]
if rhs.Root.Type() != TokenLit {
return NewParseError("unexpected token type")
}
key := EqualExprKey(opExpr)
v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
if err != nil {
return err
}
t.values[key] = v
default:
return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
}
default:
return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
}
v.Sections.container[v.scope] = t
return nil
}
// VisitStatement visits statements...
func (v *DefaultVisitor) VisitStatement(stmt AST) error {
switch stmt.Kind {
case ASTKindCompletedSectionStatement:
child := stmt.GetRoot()
if child.Kind != ASTKindSectionStatement {
return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
}
name := string(child.Root.Raw())
v.Sections.container[name] = Section{}
v.scope = name
default:
return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
}
return nil
}
// Sections is a map of Section structures that represent
// a configuration.
type Sections struct {
container map[string]Section
}
// GetSection will return section p. If section p does not exist,
// false will be returned in the second parameter.
func (t Sections) GetSection(p string) (Section, bool) {
v, ok := t.container[p]
return v, ok
}
// values represents a map of union values.
type values map[string]Value
// List will return a list of all sections that were successfully
// parsed.
func (t Sections) List() []string {
keys := make([]string, len(t.container))
i := 0
for k := range t.container {
keys[i] = k
i++
}
sort.Strings(keys)
return keys
}
// Section contains a name and values. This represent
// a sectioned entry in a configuration file.
type Section struct {
Name string
values values
}
// Has will return whether or not an entry exists in a given section
func (t Section) Has(k string) bool {
_, ok := t.values[k]
return ok
}
// ValueType will returned what type the union is set to. If
// k was not found, the NoneType will be returned.
func (t Section) ValueType(k string) (ValueType, bool) {
v, ok := t.values[k]
return v.Type, ok
}
// Bool returns a bool value at k
func (t Section) Bool(k string) bool {
return t.values[k].BoolValue()
}
// Int returns an integer value at k
func (t Section) Int(k string) int64 {
return t.values[k].IntValue()
}
// Float64 returns a float value at k
func (t Section) Float64(k string) float64 {
return t.values[k].FloatValue()
}
// String returns the string value at k
func (t Section) String(k string) string {
_, ok := t.values[k]
if !ok {
return ""
}
return t.values[k].StringValue()
}

View File

@ -0,0 +1,25 @@
package ini
// Walk will traverse the AST using the v, the Visitor.
func Walk(tree []AST, v Visitor) error {
for _, node := range tree {
switch node.Kind {
case ASTKindExpr,
ASTKindExprStatement:
if err := v.VisitExpr(node); err != nil {
return err
}
case ASTKindStatement,
ASTKindCompletedSectionStatement,
ASTKindNestedSectionStatement,
ASTKindCompletedNestedSectionStatement:
if err := v.VisitStatement(node); err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,24 @@
package ini
import (
"unicode"
)
// isWhitespace will return whether or not the character is
// a whitespace character.
//
// Whitespace is defined as a space or tab.
func isWhitespace(c rune) bool {
return unicode.IsSpace(c) && c != '\n' && c != '\r'
}
func newWSToken(b []rune) (Token, int, error) {
i := 0
for ; i < len(b); i++ {
if !isWhitespace(b[i]) {
break
}
}
return newToken(TokenWS, b[:i], NoneType), i, nil
}

View File

@ -0,0 +1,57 @@
package s3err
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// RequestFailure provides additional S3 specific metadata for the request
// failure.
type RequestFailure struct {
awserr.RequestFailure
hostID string
}
// NewRequestFailure returns a request failure error decordated with S3
// specific metadata.
func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure {
return &RequestFailure{RequestFailure: err, hostID: hostID}
}
func (r RequestFailure) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
r.StatusCode(), r.RequestID(), r.hostID)
return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
func (r RequestFailure) String() string {
return r.Error()
}
// HostID returns the HostID request response value.
func (r RequestFailure) HostID() string {
return r.hostID
}
// RequestFailureWrapperHandler returns a handler to rap an
// awserr.RequestFailure with the S3 request ID 2 from the response.
func RequestFailureWrapperHandler() request.NamedHandler {
return request.NamedHandler{
Name: "awssdk.s3.errorHandler",
Fn: func(req *request.Request) {
reqErr, ok := req.Error.(awserr.RequestFailure)
if !ok || reqErr == nil {
return
}
hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2")
if req.Error == nil {
return
}
req.Error = NewRequestFailure(reqErr, hostID)
},
}
}

View File

@ -0,0 +1,12 @@
package shareddefaults
const (
// ECSCredsProviderEnvVar is an environmental variable key used to
// determine which path needs to be hit.
ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
)
// ECSContainerCredentialsURI is the endpoint to retrieve container
// credentials. This can be overriden to test to ensure the credential process
// is behaving correctly.
var ECSContainerCredentialsURI = "http://169.254.170.2"

View File

@ -0,0 +1,68 @@
package protocol
import (
"strings"
"github.com/aws/aws-sdk-go/aws/request"
)
// ValidateEndpointHostHandler is a request handler that will validate the
// request endpoint's hosts is a valid RFC 3986 host.
var ValidateEndpointHostHandler = request.NamedHandler{
Name: "awssdk.protocol.ValidateEndpointHostHandler",
Fn: func(r *request.Request) {
err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
if err != nil {
r.Error = err
}
},
}
// ValidateEndpointHost validates that the host string passed in is a valid RFC
// 3986 host. Returns error if the host is not valid.
func ValidateEndpointHost(opName, host string) error {
paramErrs := request.ErrInvalidParams{Context: opName}
labels := strings.Split(host, ".")
for i, label := range labels {
if i == len(labels)-1 && len(label) == 0 {
// Allow trailing dot for FQDN hosts.
continue
}
if !ValidHostLabel(label) {
paramErrs.Add(request.NewErrParamFormat(
"endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
}
}
if len(host) > 255 {
paramErrs.Add(request.NewErrParamMaxLen(
"endpoint host", 255, host,
))
}
if paramErrs.Len() > 0 {
return paramErrs
}
return nil
}
// ValidHostLabel returns if the label is a valid RFC 3986 host label.
func ValidHostLabel(label string) bool {
if l := len(label); l == 0 || l > 63 {
return false
}
for _, r := range label {
switch {
case r >= '0' && r <= '9':
case r >= 'A' && r <= 'Z':
case r >= 'a' && r <= 'z':
case r == '-':
default:
return false
}
}
return true
}

View File

@ -0,0 +1,54 @@
package protocol
import (
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
// HostPrefixHandlerName is the handler name for the host prefix request
// handler.
const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
// NewHostPrefixHandler constructs a build handler
func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
builder := HostPrefixBuilder{
Prefix: prefix,
LabelsFn: labelsFn,
}
return request.NamedHandler{
Name: HostPrefixHandlerName,
Fn: builder.Build,
}
}
// HostPrefixBuilder provides the request handler to expand and prepend
// the host prefix into the operation's request endpoint host.
type HostPrefixBuilder struct {
Prefix string
LabelsFn func() map[string]string
}
// Build updates the passed in Request with the HostPrefix template expanded.
func (h HostPrefixBuilder) Build(r *request.Request) {
if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
return
}
var labels map[string]string
if h.LabelsFn != nil {
labels = h.LabelsFn()
}
prefix := h.Prefix
for name, value := range labels {
prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
}
r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
if len(r.HTTPRequest.Host) > 0 {
r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
}
}

View File

@ -87,7 +87,7 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle
}
}
// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested
// types are converted to XMLNodes also.
func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
if !value.IsValid() {

File diff suppressed because it is too large Load Diff

View File

@ -3,6 +3,7 @@ package s3
import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3err"
)
func init() {
@ -21,6 +22,7 @@ func defaultInitClientFn(c *client.Client) {
// S3 uses custom error unmarshaling logic
c.Handlers.UnmarshalError.Clear()
c.Handlers.UnmarshalError.PushBack(unmarshalError)
c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
}
func defaultInitRequestFn(r *request.Request) {
@ -31,6 +33,7 @@ func defaultInitRequestFn(r *request.Request) {
switch r.Operation.Name {
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
opPutBucketReplication:
// These S3 operations require Content-MD5 to be set
r.Handlers.Build.PushBack(contentMD5)
@ -42,6 +45,7 @@ func defaultInitRequestFn(r *request.Request) {
r.Handlers.Validate.PushFront(populateLocationConstraint)
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
case opPutObject, opUploadPart:
r.Handlers.Build.PushBack(computeBodyHashes)
// Disabled until #1837 root issue is resolved.

View File

@ -13,7 +13,11 @@ import (
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "unable to read response body", err)
r.Error = awserr.NewRequestFailure(
awserr.New("SerializationError", "unable to read response body", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
body := bytes.NewReader(b)

View File

@ -23,22 +23,17 @@ func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2")
// Bucket exists in a different region, and request needs
// to be made to the correct region.
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
r.Error = requestFailure{
RequestFailure: awserr.NewRequestFailure(
awserr.New("BucketRegionError",
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
aws.StringValue(r.Config.Region)),
nil),
r.HTTPResponse.StatusCode,
r.RequestID,
),
hostID: hostID,
}
r.Error = awserr.NewRequestFailure(
awserr.New("BucketRegionError",
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
aws.StringValue(r.Config.Region)),
nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
@ -63,14 +58,11 @@ func unmarshalError(r *request.Request) {
errMsg = statusText
}
r.Error = requestFailure{
RequestFailure: awserr.NewRequestFailure(
awserr.New(errCode, errMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
),
hostID: hostID,
}
r.Error = awserr.NewRequestFailure(
awserr.New(errCode, errMsg, err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}
// A RequestFailure provides access to the S3 Request ID and Host ID values
@ -83,21 +75,3 @@ type RequestFailure interface {
// Host ID is the S3 Host ID needed for debug, and contacting support
HostID() string
}
type requestFailure struct {
awserr.RequestFailure
hostID string
}
func (r requestFailure) Error() string {
extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
r.StatusCode(), r.RequestID(), r.hostID)
return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
}
func (r requestFailure) String() string {
return r.Error()
}
func (r requestFailure) HostID() string {
return r.hostID
}

View File

@ -15,7 +15,7 @@ const opAssumeRole = "AssumeRole"
// AssumeRoleRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRole operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -209,7 +209,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithSAML operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -391,7 +391,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -602,7 +602,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
// client's request for the DecodeAuthorizationMessage operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -714,7 +714,7 @@ const opGetCallerIdentity = "GetCallerIdentity"
// GetCallerIdentityRequest generates a "aws/request.Request" representing the
// client's request for the GetCallerIdentity operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -789,7 +789,7 @@ const opGetFederationToken = "GetFederationToken"
// GetFederationTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetFederationToken operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
@ -958,7 +958,7 @@ const opGetSessionToken = "GetSessionToken"
// GetSessionTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetSessionToken operation. The "output" return
// value will be populated with the request's response once the request completes
// successfuly.
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.

View File

@ -1,29 +1,42 @@
package bugsnag
import (
"github.com/bugsnag/bugsnag-go/errors"
"context"
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"runtime"
"sync"
"time"
"github.com/bugsnag/bugsnag-go/device"
"github.com/bugsnag/bugsnag-go/errors"
"github.com/bugsnag/bugsnag-go/sessions"
// Fixes a bug with SHA-384 intermediate certs on some platforms.
// - https://github.com/bugsnag/bugsnag-go/issues/9
_ "crypto/sha512"
)
// The current version of bugsnag-go.
const VERSION = "1.3.2"
// VERSION defines the version of this Bugsnag notifier
const VERSION = "1.4.0"
var once sync.Once
var panicHandlerOnce sync.Once
var sessionTrackerOnce sync.Once
var middleware middlewareStack
// The configuration for the default bugsnag notifier.
// Config is the configuration for the default bugsnag notifier.
var Config Configuration
var sessionTrackingConfig sessions.SessionTrackingConfiguration
// DefaultSessionPublishInterval defines how often sessions should be sent to
// Bugsnag.
// Deprecated: Exposed for developer sanity in testing. Modify at own risk.
var DefaultSessionPublishInterval = 60 * time.Second
var defaultNotifier = Notifier{&Config, nil}
var sessionTracker sessions.SessionTracker
// Configure Bugsnag. The only required setting is the APIKey, which can be
// obtained by clicking on "Settings" in your Bugsnag dashboard. This function
@ -31,24 +44,53 @@ var defaultNotifier = Notifier{&Config, nil}
// called as early as possible in your initialization process.
func Configure(config Configuration) {
Config.update(&config)
once.Do(Config.PanicHandler)
updateSessionConfig()
// Only do once in case the user overrides the default panichandler, and
// configures multiple times.
panicHandlerOnce.Do(Config.PanicHandler)
}
// Notify sends an error to Bugsnag along with the current stack trace. The
// rawData is used to send extra information along with the error. For example
// you can pass the current http.Request to Bugsnag to see information about it
// in the dashboard, or set the severity of the notification.
// StartSession creates new context from the context.Context instance with
// Bugsnag session data attached. Will start the session tracker if not already
// started
func StartSession(ctx context.Context) context.Context {
sessionTrackerOnce.Do(startSessionTracking)
return sessionTracker.StartSession(ctx)
}
// Notify sends an error.Error to Bugsnag along with the current stack trace.
// If at all possible, it is recommended to pass in a context.Context, e.g.
// from a http.Request or bugsnag.StartSession() as Bugsnag will be able to
// extract additional information in some cases. The rawData is used to send
// extra information along with the error. For example you can pass the current
// http.Request to Bugsnag to see information about it in the dashboard, or set
// the severity of the notification. For a detailed list of the information
// that can be extracted, see
// https://docs.bugsnag.com/platforms/go/reporting-handled-errors/
func Notify(err error, rawData ...interface{}) error {
return defaultNotifier.Notify(errors.New(err, 1), rawData...)
if e := checkForEmptyError(err); e != nil {
return e
}
// Stripping one stackframe to not include this function in the stacktrace
// for a manual notification.
skipFrames := 1
return defaultNotifier.Notify(errors.New(err, skipFrames), rawData...)
}
// AutoNotify logs a panic on a goroutine and then repanics.
// It should only be used in places that have existing panic handlers further
// up the stack. The rawData is used to send extra information along with any
// up the stack.
// Although it's not strictly enforced, it's highly recommended to pass a
// context.Context object that has at one-point been returned from
// bugsnag.StartSession. Doing so ensures your stability score remains accurate,
// and future versions of Bugsnag may extract more useful information from this
// context.
// The rawData is used to send extra information along with any
// panics that are handled this way.
// Usage:
// go func() {
// defer bugsnag.AutoNotify()
// ctx := bugsnag.StartSession(context.Background())
// defer bugsnag.AutoNotify(ctx)
// // (possibly crashy code)
// }()
// See also: bugsnag.Recover()
@ -57,21 +99,50 @@ func AutoNotify(rawData ...interface{}) {
severity := defaultNotifier.getDefaultSeverity(rawData, SeverityError)
state := HandledState{SeverityReasonHandledPanic, severity, true, ""}
rawData = append([]interface{}{state}, rawData...)
defaultNotifier.NotifySync(errors.New(err, 2), true, rawData...)
// We strip the following stackframes as they don't add much info
// - runtime/$arch - e.g. runtime/asm_amd64.s#call32
// - runtime/panic.go#gopanic
// Panics have their own stacktrace, so no stripping of the current stack
skipFrames := 2
defaultNotifier.NotifySync(errors.New(err, skipFrames), true, rawData...)
sessionTracker.FlushSessions()
panic(err)
}
}
// Recover logs a panic on a goroutine and then recovers.
// Although it's not strictly enforced, it's highly recommended to pass a
// context.Context object that has at one-point been returned from
// bugsnag.StartSession. Doing so ensures your stability score remains accurate,
// and future versions of Bugsnag may extract more useful information from this
// context.
// The rawData is used to send extra information along with
// any panics that are handled this way
// Usage: defer bugsnag.Recover()
// Usage:
// go func() {
// ctx := bugsnag.StartSession(context.Background())
// defer bugsnag.Recover(ctx)
// // (possibly crashy code)
// }()
// If you wish that any panics caught by the call to Recover shall affect your
// stability score (it does not by default):
// go func() {
// ctx := bugsnag.StartSession(context.Background())
// defer bugsnag.Recover(ctx, bugsnag.HandledState{Unhandled: true})
// // (possibly crashy code)
// }()
// See also: bugsnag.AutoNotify()
func Recover(rawData ...interface{}) {
if err := recover(); err != nil {
severity := defaultNotifier.getDefaultSeverity(rawData, SeverityWarning)
state := HandledState{SeverityReasonHandledPanic, severity, false, ""}
rawData = append([]interface{}{state}, rawData...)
defaultNotifier.Notify(errors.New(err, 2), rawData...)
// We strip the following stackframes as they don't add much info
// - runtime/$arch - e.g. runtime/asm_amd64.s#call32
// - runtime/panic.go#gopanic
// Panics have their own stacktrace, so no stripping of the current stack
skipFrames := 2
defaultNotifier.Notify(errors.New(err, skipFrames), rawData...)
}
}
@ -95,8 +166,17 @@ func Handler(h http.Handler, rawData ...interface{}) http.Handler {
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer notifier.AutoNotify(r)
h.ServeHTTP(w, r)
request := r
// Record a session if auto notify session is enabled
ctx := r.Context()
if Config.IsAutoCaptureSessions() {
ctx = StartSession(ctx)
}
ctx = AttachRequestData(ctx, request)
request = r.WithContext(ctx)
defer notifier.AutoNotify(ctx, request)
h.ServeHTTP(w, request)
})
}
@ -110,11 +190,31 @@ func HandlerFunc(h http.HandlerFunc, rawData ...interface{}) http.HandlerFunc {
notifier := New(rawData...)
return func(w http.ResponseWriter, r *http.Request) {
defer notifier.AutoNotify(r)
h(w, r)
request := r
// Record a session if auto notify session is enabled
ctx := request.Context()
if notifier.Config.IsAutoCaptureSessions() {
ctx = StartSession(ctx)
}
ctx = AttachRequestData(ctx, request)
request = request.WithContext(ctx)
defer notifier.AutoNotify(ctx)
h(w, request)
}
}
// checkForEmptyError checks if the given error (to be reported to Bugsnag) is
// nil. If it is, then log an error message and return another error wrapping
// this error message.
func checkForEmptyError(err error) error {
if err != nil {
return nil
}
msg := "attempted to notify Bugsnag without supplying an error. Bugsnag not notified"
Config.Logger.Printf("ERROR: " + msg)
return fmt.Errorf(msg)
}
func init() {
// Set up builtin middlewarez
OnBeforeNotify(httpRequestMiddleware)
@ -127,24 +227,50 @@ func init() {
sourceRoot = filepath.Join(runtime.GOROOT(), "src") + "/"
}
Config.update(&Configuration{
APIKey: "",
Endpoint: "https://notify.bugsnag.com/",
Hostname: "",
AppType: "",
AppVersion: "",
ReleaseStage: "",
ParamsFilters: []string{"password", "secret"},
SourceRoot: sourceRoot,
APIKey: "",
Endpoints: Endpoints{
Notify: "https://notify.bugsnag.com",
Sessions: "https://sessions.bugsnag.com",
},
Hostname: device.GetHostname(),
AppType: "",
AppVersion: "",
AutoCaptureSessions: true,
ReleaseStage: "",
ParamsFilters: []string{"password", "secret", "authorization", "cookie"},
SourceRoot: sourceRoot,
// * for app-engine
ProjectPackages: []string{"main*"},
NotifyReleaseStages: nil,
Logger: log.New(os.Stdout, log.Prefix(), log.Flags()),
PanicHandler: defaultPanicHandler,
Transport: http.DefaultTransport,
})
hostname, err := os.Hostname()
if err == nil {
Config.Hostname = hostname
flushSessionsOnRepanic: true,
})
updateSessionConfig()
}
func startSessionTracking() {
if sessionTracker == nil {
updateSessionConfig()
sessionTracker = sessions.NewSessionTracker(&sessionTrackingConfig)
}
}
func updateSessionConfig() {
sessionTrackingConfig.Update(&sessions.SessionTrackingConfiguration{
APIKey: Config.APIKey,
AutoCaptureSessions: Config.AutoCaptureSessions,
Endpoint: Config.Endpoints.Sessions,
Version: VERSION,
PublishInterval: DefaultSessionPublishInterval,
Transport: Config.Transport,
ReleaseStage: Config.ReleaseStage,
Hostname: Config.Hostname,
AppType: Config.AppType,
AppVersion: Config.AppVersion,
NotifyReleaseStages: Config.NotifyReleaseStages,
Logger: Config.Logger,
})
}

View File

@ -7,15 +7,32 @@ import (
"strings"
)
// Endpoints hold the HTTP endpoints of the notifier.
type Endpoints struct {
Sessions string
Notify string
}
// Configuration sets up and customizes communication with the Bugsnag API.
type Configuration struct {
// Your Bugsnag API key, e.g. "c9d60ae4c7e70c4b6c4ebd3e8056d2b8". You can
// find this by clicking Settings on https://bugsnag.com/.
APIKey string
// Deprecated: Use Endpoints (with an 's') instead.
// The Endpoint to notify about crashes. This defaults to
// "https://notify.bugsnag.com/", if you're using Bugsnag Enterprise then
// set it to your internal Bugsnag endpoint.
Endpoint string
// Endpoints define the HTTP endpoints that the notifier should notify
// about crashes and sessions. These default to notify.bugsnag.com for
// error reports and sessions.bugsnag.com for sessions.
// If you are using bugsnag on-premise you will have to set these to your
// Event Server and Session Server endpoints. If the notify endpoint is set
// but the sessions endpoint is not, session tracking will be disabled
// automatically to avoid leaking session information outside of your
// server configuration, and a warning will be logged.
Endpoints Endpoints
// The current release stage. This defaults to "production" and is used to
// filter errors in the Bugsnag dashboard.
@ -27,6 +44,17 @@ type Configuration struct {
// in the Bugsnag dasboard. If you set this then Bugsnag will only re-open
// resolved errors if they happen in different app versions.
AppVersion string
// AutoCaptureSessions can be set to false to disable automatic session
// tracking. If you want control over what is deemed a session, you can
// switch off automatic session tracking with this configuration, and call
// bugsnag.StartSession() when appropriate for your application. See the
// official docs for instructions and examples of associating handled
// errors with sessions and ensuring error rate accuracy on the Bugsnag
// dashboard. This will default to true, but is stored as an interface to enable
// us to detect when this option has not been set.
AutoCaptureSessions interface{}
// The hostname of the current server. This defaults to the return value of
// os.Hostname() and is graphed in the Bugsnag dashboard.
Hostname string
@ -51,7 +79,7 @@ type Configuration struct {
// build.
SourceRoot string
// Any meta-data that matches these filters will be marked as [REDACTED]
// Any meta-data that matches these filters will be marked as [FILTERED]
// before sending a Notification to Bugsnag. It defaults to
// []string{"password", "secret"} so that request parameters like password,
// password_confirmation and auth_secret will not be sent to Bugsnag.
@ -75,6 +103,10 @@ type Configuration struct {
// Whether bugsnag should notify synchronously. This defaults to false which
// causes bugsnag-go to spawn a new goroutine for each notification.
Synchronous bool
// Whether the notifier should send all sessions recorded so far to Bugsnag
// when repanicking to ensure that no session information is lost in a
// fatal crash.
flushSessionsOnRepanic bool
// TODO: remember to update the update() function when modifying this struct
}
@ -82,9 +114,6 @@ func (config *Configuration) update(other *Configuration) *Configuration {
if other.APIKey != "" {
config.APIKey = other.APIKey
}
if other.Endpoint != "" {
config.Endpoint = other.Endpoint
}
if other.Hostname != "" {
config.Hostname = other.Hostname
}
@ -122,9 +151,50 @@ func (config *Configuration) update(other *Configuration) *Configuration {
config.Synchronous = true
}
if other.AutoCaptureSessions != nil {
config.AutoCaptureSessions = other.AutoCaptureSessions
}
config.updateEndpoints(other.Endpoint, &other.Endpoints)
return config
}
// IsAutoCaptureSessions identifies whether or not the notifier should
// automatically capture sessions as requests come in. It's a convenience
// wrapper that allows automatic session capturing to be enabled by default.
func (config *Configuration) IsAutoCaptureSessions() bool {
if config.AutoCaptureSessions == nil {
return true // enabled by default
}
if val, ok := config.AutoCaptureSessions.(bool); ok {
return val
}
// It has been configured to *something* (although not a valid value)
// assume the user wanted to disable this option.
return false
}
func (config *Configuration) updateEndpoints(endpoint string, endpoints *Endpoints) {
if endpoint != "" {
config.Logger.Printf("WARNING: the 'Endpoint' Bugsnag configuration parameter is deprecated in favor of 'Endpoints'")
config.Endpoints.Notify = endpoint
config.Endpoints.Sessions = ""
}
if endpoints.Notify != "" {
config.Endpoints.Notify = endpoints.Notify
if endpoints.Sessions == "" {
config.Logger.Printf("WARNING: Bugsnag notify endpoint configured without also configuring the sessions endpoint. No sessions will be recorded")
config.Endpoints.Sessions = ""
}
}
if endpoints.Sessions != "" {
if endpoints.Notify == "" {
panic("FATAL: Bugsnag sessions endpoint configured without also changing the notify endpoint. Bugsnag cannot identify where to report errors")
}
config.Endpoints.Sessions = endpoints.Sessions
}
}
func (config *Configuration) merge(other *Configuration) *Configuration {
return config.clone().update(other)
}
@ -182,6 +252,9 @@ func (config *Configuration) notifyInReleaseStage() bool {
if config.NotifyReleaseStages == nil {
return true
}
if config.ReleaseStage == "" {
return true
}
for _, r := range config.NotifyReleaseStages {
if r == config.ReleaseStage {
return true

View File

@ -0,0 +1,15 @@
package device
import "os"
var hostname string
// GetHostname returns the hostname of the current device. Caches the hostname
// between calls to ensure this is performant. Returns a blank string in case
// that the hostname cannot be identified.
func GetHostname() string {
if hostname == "" {
hostname, _ = os.Hostname()
}
return hostname
}

View File

@ -1,6 +1,8 @@
package bugsnag
import (
"context"
"net/http"
"strings"
"github.com/bugsnag/bugsnag-go/errors"
@ -98,35 +100,43 @@ type Event struct {
User *User
// Other MetaData to send to Bugsnag. Appears as a set of tabbed tables in the dashboard.
MetaData MetaData
// Ctx is the context of the session the event occurred in. This allows Bugsnag to associate the event with the session.
Ctx context.Context
// Request is the request information that populates the Request tab in the dashboard.
Request *RequestJSON
// The reason for the severity and original value
handledState HandledState
}
func newEvent(err *errors.Error, rawData []interface{}, notifier *Notifier) (*Event, *Configuration) {
func newEvent(rawData []interface{}, notifier *Notifier) (*Event, *Configuration) {
config := notifier.Config
event := &Event{
Error: err,
RawData: append(notifier.RawData, rawData...),
ErrorClass: err.TypeName(),
Message: err.Error(),
Stacktrace: make([]stackFrame, len(err.StackFrames())),
RawData: append(notifier.RawData, rawData...),
Severity: SeverityWarning,
MetaData: make(MetaData),
handledState: HandledState{
SeverityReasonHandledError,
SeverityWarning,
false,
"",
SeverityReason: SeverityReasonHandledError,
OriginalSeverity: SeverityWarning,
Unhandled: false,
Framework: "",
},
}
var err *errors.Error
for _, datum := range event.RawData {
switch datum := datum.(type) {
case error, errors.Error:
err = errors.New(datum.(error), 1)
event.Error = err
event.ErrorClass = err.TypeName()
event.Message = err.Error()
event.Stacktrace = make([]stackFrame, len(err.StackFrames()))
case bool:
config = config.merge(&Configuration{Synchronous: bool(datum)})
case severity:
event.Severity = datum
event.handledState.OriginalSeverity = datum
@ -135,6 +145,12 @@ func newEvent(err *errors.Error, rawData []interface{}, notifier *Notifier) (*Ev
case Context:
event.Context = datum.String
case context.Context:
populateEventWithContext(datum, event)
case *http.Request:
populateEventWithRequest(datum, event)
case Configuration:
config = config.merge(&datum)
@ -175,3 +191,34 @@ func newEvent(err *errors.Error, rawData []interface{}, notifier *Notifier) (*Ev
return event, config
}
func populateEventWithContext(ctx context.Context, event *Event) {
event.Ctx = ctx
reqJSON, req := extractRequestInfo(ctx)
if event.Request == nil {
event.Request = reqJSON
}
populateEventWithRequest(req, event)
}
func populateEventWithRequest(req *http.Request, event *Event) {
if req == nil {
return
}
event.Request = extractRequestInfoFromReq(req)
if event.Context == "" {
event.Context = req.URL.Path
}
// Default user.id to IP so that the count of users affected works.
if event.User == nil {
ip := req.RemoteAddr
if idx := strings.LastIndex(ip, ":"); idx != -1 {
ip = ip[:idx]
}
event.User = &User{Id: ip}
}
}

View File

@ -0,0 +1,14 @@
package headers
import "time"
//PrefixedHeaders returns a map of Content-Type and the 'Bugsnag-' headers for
//API key, payload version, and the time at which the request is being sent.
func PrefixedHeaders(apiKey, payloadVersion string) map[string]string {
return map[string]string{
"Content-Type": "application/json",
"Bugsnag-Api-Key": apiKey,
"Bugsnag-Payload-Version": payloadVersion,
"Bugsnag-Sent-At": time.Now().UTC().Format(time.RFC3339),
}
}

View File

@ -136,7 +136,7 @@ func (s sanitizer) sanitizeMap(v reflect.Value) interface{} {
newKey := fmt.Sprintf("%v", key.Interface())
if s.shouldRedact(newKey) {
val = "[REDACTED]"
val = "[FILTERED]"
}
ret[newKey] = val
@ -165,7 +165,7 @@ func (s sanitizer) sanitizeStruct(v reflect.Value, t reflect.Type) interface{} {
}
if s.shouldRedact(name) {
ret[name] = "[REDACTED]"
ret[name] = "[FILTERED]"
} else {
sanitized := s.Sanitize(val.Interface())
if str, ok := sanitized.(string); ok {
@ -184,7 +184,7 @@ func (s sanitizer) sanitizeStruct(v reflect.Value, t reflect.Type) interface{} {
func (s sanitizer) shouldRedact(key string) bool {
for _, filter := range s.Filters {
if strings.Contains(strings.ToLower(filter), strings.ToLower(key)) {
if strings.Contains(strings.ToLower(key), strings.ToLower(filter)) {
return true
}
}

View File

@ -2,7 +2,6 @@ package bugsnag
import (
"net/http"
"strings"
)
type (
@ -64,34 +63,11 @@ func catchMiddlewarePanic(event *Event, config *Configuration, next func() error
func httpRequestMiddleware(event *Event, config *Configuration) error {
for _, datum := range event.RawData {
if request, ok := datum.(*http.Request); ok {
proto := "http://"
if request.TLS != nil {
proto = "https://"
}
event.MetaData.Update(MetaData{
"request": {
"clientIp": request.RemoteAddr,
"httpMethod": request.Method,
"url": proto + request.Host + request.RequestURI,
"params": request.URL.Query(),
"headers": request.Header,
"params": request.URL.Query(),
},
})
// Default context to Path
if event.Context == "" {
event.Context = request.URL.Path
}
// Default user.id to IP so that users-affected works.
if event.User == nil {
ip := request.RemoteAddr
if idx := strings.LastIndex(ip, ":"); idx != -1 {
ip = ip[:idx]
}
event.User = &User{Id: ip}
}
}
}
return nil

View File

@ -1,11 +1,11 @@
package bugsnag
import (
"fmt"
"github.com/bugsnag/bugsnag-go/errors"
)
var publisher reportPublisher = new(defaultReportPublisher)
// Notifier sends errors to Bugsnag.
type Notifier struct {
Config *Configuration
@ -30,39 +30,49 @@ func New(rawData ...interface{}) *Notifier {
}
}
// Notify sends an error to Bugsnag. Any rawData you pass here will be sent to
// Bugsnag after being converted to JSON. e.g. bugsnag.SeverityError, bugsnag.Context,
// or bugsnag.MetaData.
func (notifier *Notifier) Notify(err error, rawData ...interface{}) (e error) {
config := notifier.Config
return notifier.NotifySync(err, config.Synchronous, rawData...)
// FlushSessionsOnRepanic takes a boolean that indicates whether sessions
// should be flushed when AutoNotify repanics. In the case of a fatal panic the
// sessions might not get sent to Bugsnag before the application shuts down.
// Many frameworks will have their own error handler, and for these frameworks
// there is no need to flush sessions as the application will survive the panic
// and the sessions can be sent off later. The default value is true, so this
// needs only be called if you wish to inform Bugsnag that there is an error
// handler that will take care of panics that AutoNotify will re-raise.
func (notifier *Notifier) FlushSessionsOnRepanic(shouldFlush bool) {
notifier.Config.flushSessionsOnRepanic = shouldFlush
}
// NotifySync sends an error to Bugsnag. The synchronous parameter specifies
// whether to send the report in the current context. Any rawData you pass here
// will be sent to Bugsnag after being converted to JSON. e.g.
// bugsnag.SeverityError, bugsnag.Context, or bugsnag.MetaData.
func (notifier *Notifier) NotifySync(err error, synchronous bool, rawData ...interface{}) (e error) {
event, config := newEvent(errors.New(err, 1), rawData, notifier)
// Notify sends an error to Bugsnag. Any rawData you pass here will be sent to
// Bugsnag after being converted to JSON. e.g. bugsnag.SeverityError, bugsnag.Context,
// or bugsnag.MetaData. Any bools in rawData overrides the
// notifier.Config.Synchronous flag.
func (notifier *Notifier) Notify(err error, rawData ...interface{}) (e error) {
if e := checkForEmptyError(err); e != nil {
return e
}
// Stripping one stackframe to not include this function in the stacktrace
// for a manual notification.
skipFrames := 1
return notifier.NotifySync(errors.New(err, skipFrames), notifier.Config.Synchronous, rawData...)
}
// NotifySync sends an error to Bugsnag. A boolean parameter specifies whether
// to send the report in the current context (by default false, i.e.
// asynchronous). Any other rawData you pass here will be sent to Bugsnag after
// being converted to JSON. E.g. bugsnag.SeverityError, bugsnag.Context, or
// bugsnag.MetaData.
func (notifier *Notifier) NotifySync(err error, sync bool, rawData ...interface{}) error {
if e := checkForEmptyError(err); e != nil {
return e
}
// Stripping one stackframe to not include this function in the stacktrace
// for a manual notification.
skipFrames := 1
event, config := newEvent(append(rawData, errors.New(err, skipFrames), sync), notifier)
// Never block, start throwing away errors if we have too many.
e = middleware.Run(event, config, func() error {
config.logf("notifying bugsnag: %s", event.Message)
if config.notifyInReleaseStage() {
if synchronous {
return (&payload{event, config}).deliver()
}
// Ensure that any errors are logged if they occur in a goroutine.
go func(event *Event, config *Configuration) {
err := (&payload{event, config}).deliver()
if err != nil {
config.logf("bugsnag.Notify: %v", err)
}
}(event, config)
return nil
}
return fmt.Errorf("not notifying in %s", config.ReleaseStage)
e := middleware.Run(event, config, func() error {
return publisher.publishReport(&payload{event, config})
})
if e != nil {
@ -82,8 +92,13 @@ func (notifier *Notifier) AutoNotify(rawData ...interface{}) {
if err := recover(); err != nil {
severity := notifier.getDefaultSeverity(rawData, SeverityError)
state := HandledState{SeverityReasonHandledPanic, severity, true, ""}
notifier.appendStateIfNeeded(rawData, state)
notifier.Notify(errors.New(err, 2), rawData...)
rawData = notifier.appendStateIfNeeded(rawData, state)
// We strip the following stackframes as they don't add much
// information but would mess with the grouping algorithm
// { "file": "github.com/bugsnag/bugsnag-go/notifier.go", "lineNumber": 116, "method": "(*Notifier).AutoNotify" },
// { "file": "runtime/asm_amd64.s", "lineNumber": 573, "method": "call32" },
skipFrames := 2
notifier.NotifySync(errors.New(err, skipFrames), true, rawData...)
panic(err)
}
}
@ -95,7 +110,7 @@ func (notifier *Notifier) Recover(rawData ...interface{}) {
if err := recover(); err != nil {
severity := notifier.getDefaultSeverity(rawData, SeverityWarning)
state := HandledState{SeverityReasonHandledPanic, severity, false, ""}
notifier.appendStateIfNeeded(rawData, state)
rawData = notifier.appendStateIfNeeded(rawData, state)
notifier.Notify(errors.New(err, 2), rawData...)
}
}

View File

@ -4,6 +4,7 @@ package bugsnag
import (
"github.com/bugsnag/bugsnag-go/errors"
"github.com/bugsnag/bugsnag-go/sessions"
"github.com/bugsnag/panicwrap"
)
@ -14,6 +15,7 @@ import (
// Related: https://godoc.org/github.com/bugsnag/panicwrap#BasicMonitor
func defaultPanicHandler() {
defer defaultNotifier.dontPanic()
ctx := sessions.SendStartupSession(&sessionTrackingConfig)
err := panicwrap.BasicMonitor(func(output string) {
toNotify, err := errors.ParsePanic(output)
@ -22,7 +24,8 @@ func defaultPanicHandler() {
defaultNotifier.Config.logf("bugsnag.handleUncaughtPanic: %v", err)
}
state := HandledState{SeverityReasonUnhandledPanic, SeverityError, true, ""}
defaultNotifier.NotifySync(toNotify, true, state)
defaultNotifier.NotifySync(toNotify, true, state, ctx)
})
if err != nil {

View File

@ -5,8 +5,17 @@ import (
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/bugsnag/bugsnag-go/headers"
"github.com/bugsnag/bugsnag-go/sessions"
)
const notifyPayloadVersion = "4"
var sessionMutex sync.Mutex
type payload struct {
*Event
*Configuration
@ -17,10 +26,10 @@ type hash map[string]interface{}
func (p *payload) deliver() error {
if len(p.APIKey) != 32 {
return fmt.Errorf("bugsnag/payload.deliver: invalid api key")
return fmt.Errorf("bugsnag/payload.deliver: invalid api key: '%s'", p.APIKey)
}
buf, err := json.Marshal(p)
buf, err := p.MarshalJSON()
if err != nil {
return fmt.Errorf("bugsnag/payload.deliver: %v", err)
@ -29,82 +38,91 @@ func (p *payload) deliver() error {
client := http.Client{
Transport: p.Transport,
}
resp, err := client.Post(p.Endpoint, "application/json", bytes.NewBuffer(buf))
req, err := http.NewRequest("POST", p.Endpoints.Notify, bytes.NewBuffer(buf))
if err != nil {
return fmt.Errorf("bugsnag/payload.deliver unable to create request: %v", err)
}
for k, v := range headers.PrefixedHeaders(p.APIKey, notifyPayloadVersion) {
req.Header.Add(k, v)
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("bugsnag/payload.deliver: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("bugsnag/payload.deliver: Got HTTP %s\n", resp.Status)
return fmt.Errorf("bugsnag/payload.deliver: Got HTTP %s", resp.Status)
}
return nil
}
func (p *payload) MarshalJSON() ([]byte, error) {
severityReason := hash{
"type": p.handledState.SeverityReason,
}
if p.handledState.Framework != "" {
severityReason["attributes"] = hash{
"framework": p.handledState.Framework,
}
}
data := hash{
"apiKey": p.APIKey,
"notifier": hash{
"name": "Bugsnag Go",
"url": "https://github.com/bugsnag/bugsnag-go",
"version": VERSION,
},
"events": []hash{
{
"payloadVersion": "2",
"exceptions": []hash{
{
"errorClass": p.ErrorClass,
"message": p.Message,
"stacktrace": p.Stacktrace,
return json.Marshal(reportJSON{
APIKey: p.APIKey,
Events: []eventJSON{
eventJSON{
App: &appJSON{
ReleaseStage: p.ReleaseStage,
Type: p.AppType,
Version: p.AppVersion,
},
Context: p.Context,
Device: &deviceJSON{Hostname: p.Hostname},
Request: p.Request,
Exceptions: []exceptionJSON{
exceptionJSON{
ErrorClass: p.ErrorClass,
Message: p.Message,
Stacktrace: p.Stacktrace,
},
},
"severity": p.Severity.String,
"severityReason": severityReason,
"unhandled": p.handledState.Unhandled,
"app": hash{
"releaseStage": p.ReleaseStage,
},
"user": p.User,
"metaData": p.MetaData.sanitize(p.ParamsFilters),
GroupingHash: p.GroupingHash,
Metadata: p.MetaData.sanitize(p.ParamsFilters),
PayloadVersion: notifyPayloadVersion,
Session: p.makeSession(),
Severity: p.Severity.String,
SeverityReason: p.severityReasonPayload(),
Unhandled: p.handledState.Unhandled,
User: p.User,
},
},
Notifier: notifierJSON{
Name: "Bugsnag Go",
URL: "https://github.com/bugsnag/bugsnag-go",
Version: VERSION,
},
})
}
func (p *payload) makeSession() *sessionJSON {
// If a context has not been applied to the payload then assume that no
// session has started either
if p.Ctx == nil {
return nil
}
event := data["events"].([]hash)[0]
if p.Context != "" {
event["context"] = p.Context
}
if p.GroupingHash != "" {
event["groupingHash"] = p.GroupingHash
}
if p.Hostname != "" {
event["device"] = hash{
"hostname": p.Hostname,
sessionMutex.Lock()
defer sessionMutex.Unlock()
session := sessions.IncrementEventCountAndGetSession(p.Ctx, p.handledState.Unhandled)
if session != nil {
s := *session
return &sessionJSON{
ID: s.ID,
StartedAt: s.StartedAt.UTC().Format(time.RFC3339),
Events: sessions.EventCounts{
Handled: s.EventCounts.Handled,
Unhandled: s.EventCounts.Unhandled,
},
}
}
if p.AppType != "" {
event["app"].(hash)["type"] = p.AppType
}
if p.AppVersion != "" {
event["app"].(hash)["version"] = p.AppVersion
}
return json.Marshal(data)
return nil
}
func (p *payload) severityReasonPayload() *severityReasonJSON {
if reason := p.handledState.SeverityReason; reason != "" {
return &severityReasonJSON{Type: reason}
}
return nil
}

69
vendor/github.com/bugsnag/bugsnag-go/report.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
package bugsnag
import (
"github.com/bugsnag/bugsnag-go/sessions"
uuid "github.com/gofrs/uuid"
)
type reportJSON struct {
APIKey string `json:"apiKey"`
Events []eventJSON `json:"events"`
Notifier notifierJSON `json:"notifier"`
}
type notifierJSON struct {
Name string `json:"name"`
URL string `json:"url"`
Version string `json:"version"`
}
type eventJSON struct {
App *appJSON `json:"app"`
Context string `json:"context,omitempty"`
Device *deviceJSON `json:"device,omitempty"`
Request *RequestJSON `json:"request,omitempty"`
Exceptions []exceptionJSON `json:"exceptions"`
GroupingHash string `json:"groupingHash,omitempty"`
Metadata interface{} `json:"metaData"`
PayloadVersion string `json:"payloadVersion"`
Session *sessionJSON `json:"session,omitempty"`
Severity string `json:"severity"`
SeverityReason *severityReasonJSON `json:"severityReason,omitempty"`
Unhandled bool `json:"unhandled"`
User *User `json:"user,omitempty"`
}
type sessionJSON struct {
StartedAt string `json:"startedAt"`
ID uuid.UUID `json:"id"`
Events sessions.EventCounts `json:"events"`
}
type appJSON struct {
ReleaseStage string `json:"releaseStage"`
Type string `json:"type,omitempty"`
Version string `json:"version,omitempty"`
}
type exceptionJSON struct {
ErrorClass string `json:"errorClass"`
Message string `json:"message"`
Stacktrace []stackFrame `json:"stacktrace"`
}
type severityReasonJSON struct {
Type SeverityReason `json:"type,omitempty"`
}
type deviceJSON struct {
Hostname string `json:"hostname,omitempty"`
}
// RequestJSON is the request information that populates the Request tab in the dashboard.
type RequestJSON struct {
ClientIP string `json:"clientIp,omitempty"`
Headers map[string]string `json:"headers,omitempty"`
HTTPMethod string `json:"httpMethod,omitempty"`
URL string `json:"url,omitempty"`
Referer string `json:"referer,omitempty"`
}

View File

@ -0,0 +1,27 @@
package bugsnag
import "fmt"
type reportPublisher interface {
publishReport(*payload) error
}
type defaultReportPublisher struct{}
func (*defaultReportPublisher) publishReport(p *payload) error {
p.logf("notifying bugsnag: %s", p.Message)
if !p.notifyInReleaseStage() {
return fmt.Errorf("not notifying in %s", p.ReleaseStage)
}
if p.Synchronous {
return p.deliver()
}
go func(p *payload) {
if err := p.deliver(); err != nil {
// Ensure that any errors are logged if they occur in a goroutine.
p.logf("bugsnag/defaultReportPublisher.publishReport: %v", err)
}
}(p)
return nil
}

View File

@ -0,0 +1,79 @@
package bugsnag
import (
"context"
"net/http"
"strings"
)
const requestContextKey requestKey = iota
type requestKey int
// AttachRequestData returns a child of the given context with the request
// object attached for later extraction by the notifier in order to
// automatically record request data
func AttachRequestData(ctx context.Context, r *http.Request) context.Context {
return context.WithValue(ctx, requestContextKey, r)
}
// extractRequestInfo looks for the request object that the notifier
// automatically attaches to the context when using any of the supported
// frameworks or bugsnag.HandlerFunc or bugsnag.Handler, and returns sub-object
// supported by the notify API.
func extractRequestInfo(ctx context.Context) (*RequestJSON, *http.Request) {
if req := getRequestIfPresent(ctx); req != nil {
return extractRequestInfoFromReq(req), req
}
return nil, nil
}
// extractRequestInfoFromReq extracts the request information the notify API
// understands from the given HTTP request. Returns the sub-object supported by
// the notify API.
func extractRequestInfoFromReq(req *http.Request) *RequestJSON {
proto := "http://"
if req.TLS != nil {
proto = "https://"
}
return &RequestJSON{
ClientIP: req.RemoteAddr,
HTTPMethod: req.Method,
URL: proto + req.Host + req.RequestURI,
Referer: req.Referer(),
Headers: parseRequestHeaders(req.Header),
}
}
func parseRequestHeaders(header map[string][]string) map[string]string {
headers := make(map[string]string)
for k, v := range header {
// Headers can have multiple values, in which case we report them as csv
if contains(Config.ParamsFilters, k) {
headers[k] = "[FILTERED]"
} else {
headers[k] = strings.Join(v, ",")
}
}
return headers
}
func contains(slice []string, e string) bool {
for _, s := range slice {
if strings.ToLower(s) == strings.ToLower(e) {
return true
}
}
return false
}
func getRequestIfPresent(ctx context.Context) *http.Request {
if ctx == nil {
return nil
}
val := ctx.Value(requestContextKey)
if val == nil {
return nil
}
return val.(*http.Request)
}

127
vendor/github.com/bugsnag/bugsnag-go/sessions/config.go generated vendored Normal file
View File

@ -0,0 +1,127 @@
package sessions
import (
"log"
"net/http"
"sync"
"time"
)
// SessionTrackingConfiguration defines the configuration options relevant for session tracking.
// These are likely a subset of the global bugsnag.Configuration. Users should
// not modify this struct directly but rather call
// `bugsnag.Configure(bugsnag.Configuration)` which will update this configuration in return.
type SessionTrackingConfiguration struct {
// PublishInterval defines how often the sessions are sent off to the session server.
PublishInterval time.Duration
// AutoCaptureSessions can be set to false to disable automatic session
// tracking. If you want control over what is deemed a session, you can
// switch off automatic session tracking with this configuration, and call
// bugsnag.StartSession() when appropriate for your application. See the
// official docs for instructions and examples of associating handled
// errors with sessions and ensuring error rate accuracy on the Bugsnag
// dashboard. This will default to true, but is stored as an interface to enable
// us to detect when this option has not been set.
AutoCaptureSessions interface{}
// APIKey defines the API key for the Bugsnag project. Same value as for reporting errors.
APIKey string
// Endpoint is the URI of the session server to receive session payloads.
Endpoint string
// Version defines the current version of the notifier.
Version string
// ReleaseStage defines the release stage, e.g. "production" or "staging",
// that this session occurred in. The release stage, in combination with
// the app version make up the release that Bugsnag tracks.
ReleaseStage string
// Hostname defines the host of the server this application is running on.
Hostname string
// AppType defines the type of the application.
AppType string
// AppVersion defines the version of the application.
AppVersion string
// Transport defines the http.RoundTripper to be used for managing HTTP requests.
Transport http.RoundTripper
// The release stages to notify about sessions in. If you set this then
// bugsnag-go will only send sessions to Bugsnag if the release stage
// is listed here.
NotifyReleaseStages []string
// Logger is the logger that Bugsnag should log to. Uses the same defaults
// as go's builtin logging package. This logger gets invoked when any error
// occurs inside the library itself.
Logger interface {
Printf(format string, v ...interface{})
}
mutex sync.Mutex
}
// Update modifies the values inside the receiver to match the non-default properties of the given config.
// Existing properties will not be cleared when given empty fields.
func (c *SessionTrackingConfiguration) Update(config *SessionTrackingConfiguration) {
c.mutex.Lock()
defer c.mutex.Unlock()
if config.PublishInterval != 0 {
c.PublishInterval = config.PublishInterval
}
if config.APIKey != "" {
c.APIKey = config.APIKey
}
if config.Endpoint != "" {
c.Endpoint = config.Endpoint
}
if config.Version != "" {
c.Version = config.Version
}
if config.ReleaseStage != "" {
c.ReleaseStage = config.ReleaseStage
}
if config.Hostname != "" {
c.Hostname = config.Hostname
}
if config.AppType != "" {
c.AppType = config.AppType
}
if config.AppVersion != "" {
c.AppVersion = config.AppVersion
}
if config.Transport != nil {
c.Transport = config.Transport
}
if config.Logger != nil {
c.Logger = config.Logger
}
if config.NotifyReleaseStages != nil {
c.NotifyReleaseStages = config.NotifyReleaseStages
}
if config.AutoCaptureSessions != nil {
c.AutoCaptureSessions = config.AutoCaptureSessions
}
}
func (c *SessionTrackingConfiguration) logf(fmt string, args ...interface{}) {
if c != nil && c.Logger != nil {
c.Logger.Printf(fmt, args...)
} else {
log.Printf(fmt, args...)
}
}
// IsAutoCaptureSessions identifies whether or not the notifier should
// automatically capture sessions as requests come in. It's a convenience
// wrapper that allows automatic session capturing to be enabled by default.
func (c *SessionTrackingConfiguration) IsAutoCaptureSessions() bool {
if c.AutoCaptureSessions == nil {
return true // enabled by default
}
if val, ok := c.AutoCaptureSessions.(bool); ok {
return val
}
// It has been configured to *something* (although not a valid value)
// assume the user wanted to disable this option.
return false
}

View File

@ -0,0 +1,78 @@
package sessions
import (
"runtime"
"time"
"github.com/bugsnag/bugsnag-go/device"
)
// notifierPayload defines the .notifier subobject of the payload
type notifierPayload struct {
Name string `json:"name"`
URL string `json:"url"`
Version string `json:"version"`
}
// appPayload defines the .app subobject of the payload
type appPayload struct {
Type string `json:"type,omitempty"`
ReleaseStage string `json:"releaseStage,omitempty"`
Version string `json:"version,omitempty"`
}
// devicePayload defines the .device subobject of the payload
type devicePayload struct {
OsName string `json:"osName,omitempty"`
Hostname string `json:"hostname,omitempty"`
}
// sessionCountsPayload defines the .sessionCounts subobject of the payload
type sessionCountsPayload struct {
StartedAt string `json:"startedAt"`
SessionsStarted int `json:"sessionsStarted"`
}
// sessionPayload defines the top level payload object
type sessionPayload struct {
Notifier *notifierPayload `json:"notifier"`
App *appPayload `json:"app"`
Device *devicePayload `json:"device"`
SessionCounts []sessionCountsPayload `json:"sessionCounts"`
}
// makeSessionPayload creates a sessionPayload based off of the given sessions and config
func makeSessionPayload(sessions []*Session, config *SessionTrackingConfiguration) *sessionPayload {
releaseStage := config.ReleaseStage
if releaseStage == "" {
releaseStage = "production"
}
hostname := config.Hostname
if hostname == "" {
hostname = device.GetHostname()
}
return &sessionPayload{
Notifier: &notifierPayload{
Name: "Bugsnag Go",
URL: "https://github.com/bugsnag/bugsnag-go",
Version: config.Version,
},
App: &appPayload{
Type: config.AppType,
Version: config.AppVersion,
ReleaseStage: releaseStage,
},
Device: &devicePayload{
OsName: runtime.GOOS,
Hostname: hostname,
},
SessionCounts: []sessionCountsPayload{
{
//This timestamp assumes that we're sending these off once a minute
StartedAt: sessions[0].StartedAt.UTC().Format(time.RFC3339),
SessionsStarted: len(sessions),
},
},
}
}

View File

@ -0,0 +1,87 @@
package sessions
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"github.com/bugsnag/bugsnag-go/headers"
)
// sessionPayloadVersion defines the current version of the payload that's
// being sent to the session server.
const sessionPayloadVersion = "1.0"
type sessionPublisher interface {
publish(sessions []*Session) error
}
type httpClient interface {
Do(*http.Request) (*http.Response, error)
}
type publisher struct {
config *SessionTrackingConfiguration
client httpClient
}
// publish builds a payload from the given sessions and publishes them to the
// session server. Returns any errors that happened as part of publishing.
func (p *publisher) publish(sessions []*Session) error {
if p.config.Endpoint == "" {
// Session tracking is disabled, likely because the notify endpoint was
// changed without changing the sessions endpoint
// We've already logged a warning in this case, so no need to spam the
// log every minute
return nil
}
if apiKey := p.config.APIKey; len(apiKey) != 32 {
return fmt.Errorf("bugsnag/sessions/publisher.publish invalid API key: '%s'", apiKey)
}
nrs, rs := p.config.NotifyReleaseStages, p.config.ReleaseStage
if rs != "" && (nrs != nil && !contains(nrs, rs)) {
// Always send sessions if the release stage is not set, but don't send any
// sessions when notify release stages don't match the current release stage
return nil
}
if len(sessions) == 0 {
return fmt.Errorf("bugsnag/sessions/publisher.publish requested publication of 0")
}
p.config.mutex.Lock()
defer p.config.mutex.Unlock()
payload := makeSessionPayload(sessions, p.config)
buf, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("bugsnag/sessions/publisher.publish unable to marshal json: %v", err)
}
req, err := http.NewRequest("POST", p.config.Endpoint, bytes.NewBuffer(buf))
if err != nil {
return fmt.Errorf("bugsnag/sessions/publisher.publish unable to create request: %v", err)
}
for k, v := range headers.PrefixedHeaders(p.config.APIKey, sessionPayloadVersion) {
req.Header.Add(k, v)
}
res, err := p.client.Do(req)
if err != nil {
return fmt.Errorf("bugsnag/sessions/publisher.publish unable to deliver session: %v", err)
}
defer func(res *http.Response) {
if err := res.Body.Close(); err != nil {
p.config.logf("%v", err)
}
}(res)
if res.StatusCode != 202 {
return fmt.Errorf("bugsnag/session.publish expected 202 response status, got HTTP %s", res.Status)
}
return nil
}
func contains(coll []string, e string) bool {
for _, s := range coll {
if s == e {
return true
}
}
return false
}

View File

@ -0,0 +1,30 @@
package sessions
import (
"time"
uuid "github.com/gofrs/uuid"
)
// EventCounts register how many handled/unhandled events have happened for
// this session
type EventCounts struct {
Handled int `json:"handled"`
Unhandled int `json:"unhandled"`
}
// Session represents a start time and a unique ID that identifies the session.
type Session struct {
StartedAt time.Time
ID uuid.UUID
EventCounts *EventCounts
}
func newSession() *Session {
sessionID, _ := uuid.NewV4()
return &Session{
StartedAt: time.Now(),
ID: sessionID,
EventCounts: &EventCounts{},
}
}

View File

@ -0,0 +1,35 @@
package sessions
import (
"context"
"net/http"
"os"
"github.com/bugsnag/panicwrap"
)
// SendStartupSession is called by Bugsnag on startup, which will send a
// session to Bugsnag and return a context to represent the session of the main
// goroutine. This is the session associated with any fatal panics that are
// caught by panicwrap.
func SendStartupSession(config *SessionTrackingConfiguration) context.Context {
ctx := context.Background()
session := newSession()
if !config.IsAutoCaptureSessions() || isApplicationProcess() {
return ctx
}
publisher := &publisher{
config: config,
client: &http.Client{Transport: config.Transport},
}
go publisher.publish([]*Session{session})
return context.WithValue(ctx, contextSessionKey, session)
}
// Checks to see if this is the application process, as opposed to the process
// that monitors for panics
func isApplicationProcess() bool {
// Application process is run first, and this will only have been set when
// the monitoring process runs
return "" == os.Getenv(panicwrap.DEFAULT_COOKIE_KEY)
}

View File

@ -0,0 +1,153 @@
package sessions
import (
"context"
"net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"
)
const (
//contextSessionKey is a unique key for accessing and setting Bugsnag
//session data on a context.Context object
contextSessionKey ctxKey = 1
)
// ctxKey is a type alias that ensures uniqueness as a context.Context key
type ctxKey int
// SessionTracker exposes a method for starting sessions that are used for
// gauging your application's health
type SessionTracker interface {
StartSession(context.Context) context.Context
FlushSessions()
}
type sessionTracker struct {
sessionChannel chan *Session
sessions []*Session
config *SessionTrackingConfiguration
publisher sessionPublisher
sessionsMutex sync.Mutex
}
// NewSessionTracker creates a new SessionTracker based on the provided config,
func NewSessionTracker(config *SessionTrackingConfiguration) SessionTracker {
publisher := publisher{
config: config,
client: &http.Client{Transport: config.Transport},
}
st := sessionTracker{
sessionChannel: make(chan *Session, 1),
sessions: []*Session{},
config: config,
publisher: &publisher,
}
go st.processSessions()
return &st
}
// IncrementEventCountAndGetSession extracts a Bugsnag session from the given
// context and increments the event count of unhandled or handled events and
// returns the session
func IncrementEventCountAndGetSession(ctx context.Context, unhandled bool) *Session {
if s := ctx.Value(contextSessionKey); s != nil {
if session, ok := s.(*Session); ok && !session.StartedAt.IsZero() {
// It is not just getting back a default value
ec := session.EventCounts
if unhandled {
ec.Unhandled++
} else {
ec.Handled++
}
return session
}
}
return nil
}
func (s *sessionTracker) StartSession(ctx context.Context) context.Context {
session := newSession()
s.sessionChannel <- session
return context.WithValue(ctx, contextSessionKey, session)
}
func (s *sessionTracker) interval() time.Duration {
s.config.mutex.Lock()
defer s.config.mutex.Unlock()
return s.config.PublishInterval
}
func (s *sessionTracker) processSessions() {
tic := time.Tick(s.interval())
shutdown := shutdownSignals()
for {
select {
case session := <-s.sessionChannel:
s.appendSession(session)
case <-tic:
s.publishCollectedSessions()
case sig := <-shutdown:
s.flushSessionsAndRepeatSignal(shutdown, sig.(syscall.Signal))
}
}
}
func (s *sessionTracker) appendSession(session *Session) {
s.sessionsMutex.Lock()
defer s.sessionsMutex.Unlock()
s.sessions = append(s.sessions, session)
}
func (s *sessionTracker) publishCollectedSessions() {
s.sessionsMutex.Lock()
defer s.sessionsMutex.Unlock()
oldSessions := s.sessions
s.sessions = nil
if len(oldSessions) > 0 {
go func(s *sessionTracker) {
err := s.publisher.publish(oldSessions)
if err != nil {
s.config.logf("%v", err)
}
}(s)
}
}
func (s *sessionTracker) flushSessionsAndRepeatSignal(shutdown chan<- os.Signal, sig syscall.Signal) {
s.sessionsMutex.Lock()
defer s.sessionsMutex.Unlock()
signal.Stop(shutdown)
if len(s.sessions) > 0 {
err := s.publisher.publish(s.sessions)
if err != nil {
s.config.logf("%v", err)
}
}
syscall.Kill(syscall.Getpid(), sig)
}
func (s *sessionTracker) FlushSessions() {
s.sessionsMutex.Lock()
defer s.sessionsMutex.Unlock()
sessions := s.sessions
s.sessions = nil
if len(sessions) != 0 {
if err := s.publisher.publish(sessions); err != nil {
s.config.logf("%v", err)
}
}
}
func shutdownSignals() chan os.Signal {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
return c
}

191
vendor/github.com/go-ini/ini/LICENSE generated vendored
View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright 2014 Unknwon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,32 +0,0 @@
// Copyright 2016 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"fmt"
)
type ErrDelimiterNotFound struct {
Line string
}
func IsErrDelimiterNotFound(err error) bool {
_, ok := err.(ErrDelimiterNotFound)
return ok
}
func (err ErrDelimiterNotFound) Error() string {
return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
}

415
vendor/github.com/go-ini/ini/file.go generated vendored
View File

@ -1,415 +0,0 @@
// Copyright 2017 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"sync"
)
// File represents a combination of a or more INI file(s) in memory.
type File struct {
options LoadOptions
dataSources []dataSource
// Should make things safe, but sometimes doesn't matter.
BlockMode bool
lock sync.RWMutex
// To keep data in order.
sectionList []string
// Actual data is stored here.
sections map[string]*Section
NameMapper
ValueMapper
}
// newFile initializes File object with given data sources.
func newFile(dataSources []dataSource, opts LoadOptions) *File {
return &File{
BlockMode: true,
dataSources: dataSources,
sections: make(map[string]*Section),
sectionList: make([]string, 0, 10),
options: opts,
}
}
// Empty returns an empty file object.
func Empty() *File {
// Ignore error here, we sure our data is good.
f, _ := Load([]byte(""))
return f
}
// NewSection creates a new section.
func (f *File) NewSection(name string) (*Section, error) {
if len(name) == 0 {
return nil, errors.New("error creating new section: empty section name")
} else if f.options.Insensitive && name != DEFAULT_SECTION {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if inSlice(name, f.sectionList) {
return f.sections[name], nil
}
f.sectionList = append(f.sectionList, name)
f.sections[name] = newSection(f, name)
return f.sections[name], nil
}
// NewRawSection creates a new section with an unparseable body.
func (f *File) NewRawSection(name, body string) (*Section, error) {
section, err := f.NewSection(name)
if err != nil {
return nil, err
}
section.isRawSection = true
section.rawBody = body
return section, nil
}
// NewSections creates a list of sections.
func (f *File) NewSections(names ...string) (err error) {
for _, name := range names {
if _, err = f.NewSection(name); err != nil {
return err
}
}
return nil
}
// GetSection returns section by given name.
func (f *File) GetSection(name string) (*Section, error) {
if len(name) == 0 {
name = DEFAULT_SECTION
}
if f.options.Insensitive {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
sec := f.sections[name]
if sec == nil {
return nil, fmt.Errorf("section '%s' does not exist", name)
}
return sec, nil
}
// Section assumes named section exists and returns a zero-value when not.
func (f *File) Section(name string) *Section {
sec, err := f.GetSection(name)
if err != nil {
// Note: It's OK here because the only possible error is empty section name,
// but if it's empty, this piece of code won't be executed.
sec, _ = f.NewSection(name)
return sec
}
return sec
}
// Section returns list of Section.
func (f *File) Sections() []*Section {
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
sections := make([]*Section, len(f.sectionList))
for i, name := range f.sectionList {
sections[i] = f.sections[name]
}
return sections
}
// ChildSections returns a list of child sections of given section name.
func (f *File) ChildSections(name string) []*Section {
return f.Section(name).ChildSections()
}
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
copy(list, f.sectionList)
return list
}
// DeleteSection deletes a section.
func (f *File) DeleteSection(name string) {
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if len(name) == 0 {
name = DEFAULT_SECTION
}
for i, s := range f.sectionList {
if s == name {
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
delete(f.sections, name)
return
}
}
}
func (f *File) reload(s dataSource) error {
r, err := s.ReadCloser()
if err != nil {
return err
}
defer r.Close()
return f.parse(r)
}
// Reload reloads and parses all data sources.
func (f *File) Reload() (err error) {
for _, s := range f.dataSources {
if err = f.reload(s); err != nil {
// In loose mode, we create an empty default section for nonexistent files.
if os.IsNotExist(err) && f.options.Loose {
f.parse(bytes.NewBuffer(nil))
continue
}
return err
}
}
return nil
}
// Append appends one or more data sources and reloads automatically.
func (f *File) Append(source interface{}, others ...interface{}) error {
ds, err := parseDataSource(source)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
for _, s := range others {
ds, err = parseDataSource(s)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
}
return f.Reload()
}
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
equalSign := DefaultFormatLeft + "=" + DefaultFormatRight
if PrettyFormat || PrettyEqual {
equalSign = " = "
}
// Use buffer to make sure target is safe until finish encoding.
buf := bytes.NewBuffer(nil)
for i, sname := range f.sectionList {
sec := f.Section(sname)
if len(sec.Comment) > 0 {
// Support multiline comments
lines := strings.Split(sec.Comment, LineBreak)
for i := range lines {
if lines[i][0] != '#' && lines[i][0] != ';' {
lines[i] = "; " + lines[i]
} else {
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
}
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
return nil, err
}
}
}
if i > 0 || DefaultHeader {
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
return nil, err
}
} else {
// Write nothing if default section is empty
if len(sec.keyList) == 0 {
continue
}
}
if sec.isRawSection {
if _, err := buf.WriteString(sec.rawBody); err != nil {
return nil, err
}
if PrettySection {
// Put a line between sections
if _, err := buf.WriteString(LineBreak); err != nil {
return nil, err
}
}
continue
}
// Count and generate alignment length and buffer spaces using the
// longest key. Keys may be modifed if they contain certain characters so
// we need to take that into account in our calculation.
alignLength := 0
if PrettyFormat {
for _, kname := range sec.keyList {
keyLength := len(kname)
// First case will surround key by ` and second by """
if strings.ContainsAny(kname, "\"=:") {
keyLength += 2
} else if strings.Contains(kname, "`") {
keyLength += 6
}
if keyLength > alignLength {
alignLength = keyLength
}
}
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
KEY_LIST:
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
// Support multiline comments
lines := strings.Split(key.Comment, LineBreak)
for i := range lines {
if lines[i][0] != '#' && lines[i][0] != ';' {
lines[i] = "; " + lines[i]
} else {
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
}
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
return nil, err
}
}
}
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
switch {
case key.isAutoIncrement:
kname = "-"
case strings.ContainsAny(kname, "\"=:"):
kname = "`" + kname + "`"
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
for _, val := range key.ValueWithShadows() {
if _, err := buf.WriteString(kname); err != nil {
return nil, err
}
if key.isBooleanType {
if kname != sec.keyList[len(sec.keyList)-1] {
buf.WriteString(LineBreak)
}
continue KEY_LIST
}
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
return nil, err
}
}
for _, val := range key.nestedValues {
if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
return nil, err
}
}
}
if PrettySection {
// Put a line between sections
if _, err := buf.WriteString(LineBreak); err != nil {
return nil, err
}
}
}
return buf, nil
}
// WriteToIndent writes content into io.Writer with given indention.
// If PrettyFormat has been set to be true,
// it will align "=" sign with spaces under each section.
func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
buf, err := f.writeToBuffer(indent)
if err != nil {
return 0, err
}
return buf.WriteTo(w)
}
// WriteTo writes file content into io.Writer.
func (f *File) WriteTo(w io.Writer) (int64, error) {
return f.WriteToIndent(w, "")
}
// SaveToIndent writes content to file system with given value indention.
func (f *File) SaveToIndent(filename, indent string) error {
// Note: Because we are truncating with os.Create,
// so it's safer to save to a temporary file location and rename afte done.
buf, err := f.writeToBuffer(indent)
if err != nil {
return err
}
return ioutil.WriteFile(filename, buf.Bytes(), 0666)
}
// SaveTo writes content to file system.
func (f *File) SaveTo(filename string) error {
return f.SaveToIndent(filename, "")
}

215
vendor/github.com/go-ini/ini/ini.go generated vendored
View File

@ -1,215 +0,0 @@
// +build go1.6
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Package ini provides INI file read and write functionality in Go.
package ini
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"runtime"
)
const (
// Name for default section. You can use this constant or the string literal.
// In most of cases, an empty string is all you need to access the section.
DEFAULT_SECTION = "DEFAULT"
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.38.3"
)
// Version returns current package version literal.
func Version() string {
return _VERSION
}
var (
// Delimiter to determine or compose a new line.
// This variable will be changed to "\r\n" automatically on Windows
// at package init time.
LineBreak = "\n"
// Place custom spaces when PrettyFormat and PrettyEqual are both disabled
DefaultFormatLeft = ""
DefaultFormatRight = ""
// Variable regexp pattern: %(variable)s
varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
// Indicate whether to align "=" sign with spaces to produce pretty output
// or reduce all possible spaces for compact format.
PrettyFormat = true
// Place spaces around "=" sign even when PrettyFormat is false
PrettyEqual = false
// Explicitly write DEFAULT section header
DefaultHeader = false
// Indicate whether to put a line between sections
PrettySection = true
)
func init() {
if runtime.GOOS == "windows" {
LineBreak = "\r\n"
}
}
func inSlice(str string, s []string) bool {
for _, v := range s {
if str == v {
return true
}
}
return false
}
// dataSource is an interface that returns object which can be read and closed.
type dataSource interface {
ReadCloser() (io.ReadCloser, error)
}
// sourceFile represents an object that contains content on the local file system.
type sourceFile struct {
name string
}
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
return os.Open(s.name)
}
// sourceData represents an object that contains content in memory.
type sourceData struct {
data []byte
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
}
// sourceReadCloser represents an input stream with Close method.
type sourceReadCloser struct {
reader io.ReadCloser
}
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
return s.reader, nil
}
func parseDataSource(source interface{}) (dataSource, error) {
switch s := source.(type) {
case string:
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
case io.ReadCloser:
return &sourceReadCloser{s}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
}
}
type LoadOptions struct {
// Loose indicates whether the parser should ignore nonexistent files or return error.
Loose bool
// Insensitive indicates whether the parser forces all section and key names to lowercase.
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
IgnoreInlineComment bool
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
SkipUnrecognizableLines bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
// AllowShadows indicates whether to keep track of keys with same name under same section.
AllowShadows bool
// AllowNestedValues indicates whether to allow AWS-like nested values.
// Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
AllowNestedValues bool
// AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
// Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
// Relevant quote: Values can also span multiple lines, as long as they are indented deeper
// than the first line of the value.
AllowPythonMultilineValues bool
// SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
// Docs: https://docs.python.org/2/library/configparser.html
// Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
// In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
SpaceBeforeInlineComment bool
// UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
// when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
UnescapeValueDoubleQuotes bool
// UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
// when value is NOT surrounded by any quotes.
// Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
UnescapeValueCommentSymbols bool
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
}
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
sources := make([]dataSource, len(others)+1)
sources[0], err = parseDataSource(source)
if err != nil {
return nil, err
}
for i := range others {
sources[i+1], err = parseDataSource(others[i])
if err != nil {
return nil, err
}
}
f := newFile(sources, opts)
if err = f.Reload(); err != nil {
return nil, err
}
return f, nil
}
// Load loads and parses from INI data sources.
// Arguments can be mixed of file name with string type, or raw data in []byte.
// It will return error if list contains nonexistent files.
func Load(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{}, source, others...)
}
// LooseLoad has exactly same functionality as Load function
// except it ignores nonexistent files instead of returning error.
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Loose: true}, source, others...)
}
// InsensitiveLoad has exactly same functionality as Load function
// except it forces all section and key names to be lowercased.
func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
// ShadowLoad has exactly same functionality as Load function
// except it allows have shadow keys.
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
}

Some files were not shown because too many files have changed in this diff Show More