diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 000000000..c5f520b70 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,26 @@ +# Golang CircleCI 2.0 configuration file +# +# Check https://circleci.com/docs/2.0/language-go/ for more details +version: 2 +jobs: + build: + docker: + # specify the version + - image: circleci/golang:1.9 + + # Specify service dependencies here if necessary + # CircleCI maintains a library of pre-built images + # documented at https://circleci.com/docs/2.0/circleci-images/ + # - image: circleci/postgres:9.4 + + #### TEMPLATE_NOTE: go expects specific checkout path representing url + #### expecting it in the form of + #### /go/src/github.com/circleci/go-tool + #### /go/src/bitbucket.org/circleci/go-tool + working_directory: /go/src/github.com/jesseduffield/lazygit + steps: + - checkout + + # specify any bash command here prefixed with `run: ` + - run: go test -v ./... + - run: bash <(curl -s https://codecov.io/bash) diff --git a/.goreleaser.yml b/.goreleaser.yml index 6e1b1b1bc..a7b2bcfdb 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,23 +1,24 @@ # This is an example goreleaser.yaml file with some sane defaults. # Make sure to check the documentation at http://goreleaser.com builds: -- env: - - CGO_ENABLED=0 - goos: - - freebsd - - windows - - darwin - - linux - goarch: - - amd64 - - arm - - arm64 + - env: + - CGO_ENABLED=0 + goos: + - freebsd + - windows + - darwin + - linux + goarch: + - amd64 + - arm + - arm64 + - 386 archive: replacements: darwin: Darwin linux: Linux windows: Windows - 386: i386 + 386: 32-bit amd64: x86_64 format_overrides: - goos: windows @@ -25,13 +26,13 @@ archive: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + name_template: '{{ .Tag }}-next' changelog: sort: asc filters: exclude: - - '^docs:' - - '^test:' + - '^docs:' + - '^test:' brew: # Reporitory to push the tap to. github: @@ -40,20 +41,19 @@ brew: # Your app's homepage. # Default is empty. - homepage: "https://github.com/jesseduffield/lazygit/" + homepage: 'https://github.com/jesseduffield/lazygit/' # Your app's description. # Default is empty. - description: "A simple terminal UI for git commands, written in Go" + description: 'A simple terminal UI for git commands, written in Go' # # Packages your package depends on. # dependencies: # - git # - zsh - # # Packages that conflict with your package. # conflicts: # - svn # - bash -# test comment to see if goreleaser only releases on new commits \ No newline at end of file +# test comment to see if goreleaser only releases on new commits diff --git a/Gopkg.lock b/Gopkg.lock index eef8e12ac..17bc8748c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -41,6 +41,14 @@ revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4" version = "v1.7.0" +[[projects]] + digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" + name = "github.com/fsnotify/fsnotify" + packages = ["."] + pruneopts = "NUT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + [[projects]] branch = "master" name = "github.com/golang-collections/collections" @@ -49,6 +57,26 @@ [[projects]] branch = "master" + digest = "1:11c6c696067d3127ecf332b10f89394d386d9083f82baf71f40f2da31841a009" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/printer", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token", + ] + pruneopts = "NUT" + revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" + +[[projects]] + branch = "master" + digest = "1:62fe3a7ea2050ecbd753a71889026f83d73329337ada66325cbafd5dea5f713d" name = "github.com/jbenet/go-context" packages = ["io"] revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4" @@ -79,6 +107,15 @@ version = "0.4" [[projects]] + digest = "1:d244f8666a838fe6ad70ec8fe77f50ebc29fdc3331a2729ba5886bef8435d10d" + name = "github.com/magiconair/properties" + packages = ["."] + pruneopts = "NUT" + revision = "c2353362d570a7bfa228149c62842019201cfb71" + version = "v1.8.0" + +[[projects]] + digest = "1:08c231ec84231a7e23d67e4b58f975e1423695a32467a362ee55a803f9de8061" name = "github.com/mattn/go-colorable" packages = ["."] revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" @@ -108,6 +145,14 @@ packages = ["."] revision = "58046073cbffe2f25d425fe1331102f55cf719de" +[[projects]] + branch = "master" + digest = "1:5fe20cfe4ef484c237cec9f947b2a6fa90bad4b8610fd014f0e4211e13d82d5d" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "NUT" + revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" + [[projects]] digest = "1:2c34c77bf3ec848da26e48af58fc511ed52750961fa848399d122882b8890928" name = "github.com/nicksnyder/go-i18n" @@ -133,6 +178,15 @@ version = "v0.2.0" [[projects]] + digest = "1:51ea800cff51752ff68e12e04106f5887b4daec6f9356721238c28019f0b42db" + name = "github.com/pelletier/go-toml" + packages = ["."] + pruneopts = "NUT" + revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" + version = "v1.2.0" + +[[projects]] + digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" name = "github.com/sergi/go-diff" packages = ["diffmatchpatch"] revision = "1744e2970ca51c86172c8190fadad617561ed6e7" @@ -145,6 +199,57 @@ revision = "59b7046e48ad6bac800c5e1dd5142282cbfcf154" [[projects]] + digest = "1:41618aee8828e62dfe62d44f579c06892d0e98907d1c6d5bcd83bfe8536ec5a3" + name = "github.com/shibukawa/configdir" + packages = ["."] + pruneopts = "NUT" + revision = "e180dbdc8da04c4fa04272e875ce64949f38bd3e" + +[[projects]] + digest = "1:330e9062b308ac597e28485699c02223bd052437a6eed32a173c9227dcb9d95a" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "NUT" + revision = "787d034dfe70e44075ccc060d346146ef53270ad" + version = "v1.1.1" + +[[projects]] + digest = "1:3fa7947ca83b98ae553590d993886e845a4bff19b7b007e869c6e0dd3b9da9cd" + name = "github.com/spf13/cast" + packages = ["."] + pruneopts = "NUT" + revision = "8965335b8c7107321228e3e3702cab9832751bac" + version = "v1.2.0" + +[[projects]] + branch = "master" + digest = "1:f29f83301ed096daed24a90f4af591b7560cb14b9cc3e1827abbf04db7269ab5" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + pruneopts = "NUT" + revision = "14d3d4c518341bea657dd8a226f5121c0ff8c9f2" + +[[projects]] + digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "NUT" + revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" + version = "v1.0.2" + +[[projects]] + digest = "1:454979540e2a1582f375a17c106cf4e11e3bcac4baffb4af23e515c87f87de13" + name = "github.com/spf13/viper" + packages = ["."] + pruneopts = "NUT" + revision = "907c19d40d9a6c9bb55f040ff4ae45271a4754b9" + version = "v1.1.0" + +[[projects]] + digest = "1:ccca1dcd18bc54e23b517a3c5babeff2e3924a7d8fc1932162225876cfe4bfb0" name = "github.com/src-d/gcfg" packages = [ ".", @@ -286,6 +391,14 @@ revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b" version = "v0.1.2" +[[projects]] + digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "NUT" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 @@ -298,6 +411,8 @@ "github.com/jesseduffield/gocui", "github.com/mgutz/str", "github.com/nicksnyder/go-i18n/v2/i18n", + "github.com/shibukawa/configdir", + "github.com/spf13/viper", "github.com/tcnksm/go-gitconfig", "golang.org/x/text/language", "gopkg.in/src-d/go-git.v4", diff --git a/README.md b/README.md index 0990db695..721dc0862 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,19 @@ brew install lazygit ### Ubuntu Packages for Ubuntu 16.04, 18.04 and 18.10 are available via [Launchpad PPA](https://launchpad.net/~lazygit-team). -They are built daily, straight from master branch. +**Release builds** + +Built from git tags. Supposed to be more stable. + +```sh +sudo add-apt-repository ppa:lazygit-team/release +sudo apt-get update +sudo apt-get install lazygit +``` + +**Daily builds** + +Built from master branch once in 24 hours (or more sometimes). ```sh sudo add-apt-repository ppa:lazygit-team/daily diff --git a/VERSION b/VERSION index e26c92f2c..60dc34827 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v0.1.61 \ No newline at end of file +v0.1.66 \ No newline at end of file diff --git a/docs/Config.md b/docs/Config.md new file mode 100644 index 000000000..e6e2c3974 --- /dev/null +++ b/docs/Config.md @@ -0,0 +1,39 @@ +# User Config: + +## Default: + +``` + gui: + # stuff relating to the UI + scrollHeight: 2 # how many lines you scroll by + theme: + activeBorderColor: + - white + - bold + inactiveBorderColor: + - white + optionsTextColor: + - blue + git: + # stuff relating to git + os: + # stuff relating to the OS +``` + +## Color Attributes: + +For color attributes you can choose an array of attributes (with max one color attribute) +The available attributes are: + +- default +- black +- red +- green +- yellow +- blue +- magenta +- cyan +- white +- bold +- reverse # useful for high-contrast +- underline diff --git a/docs/Keybindings.md b/docs/Keybindings.md index 2b9f9d952..2a1b1817f 100644 --- a/docs/Keybindings.md +++ b/docs/Keybindings.md @@ -1,16 +1,25 @@ # Keybindings: ## Global: +
   /hjkl:               navigate
-  PgUp/PgDn or ctrl+u/ctrl+d:   scroll diff panel 
+  PgUp/PgDn or ctrl+u/ctrl+d:   scroll diff panel
                                      (for PgUp and PgDn, use fn+up/fn+down on osx)
   q:                                quit
   p:                                pull
   shift+P:                         push
 
+## Status Panel: + +
+  e:        edit config file
+  o:        open config file
+
+ ## Files Panel: +
   space:    toggle staged
   c:        commit changes
@@ -27,6 +36,7 @@
 
## Branches Panel: +
   space:   checkout branch
   f:       force checkout branch
@@ -37,6 +47,7 @@
 
## Commits Panel: +
   s:       squash down (only available for topmost commit)
   r:       rename commit
@@ -44,6 +55,7 @@
 
## Stash Panel: +
   space:   apply
   g:       pop
@@ -51,6 +63,7 @@
 
## Popup Panel: +
   esc:     close/cancel
   enter:   confirm
@@ -58,6 +71,7 @@
 
## Resolving Merge Conflicts (Diff Panel): +
   /hl: navigate conflicts
   /kj: select hunk
diff --git a/main.go b/main.go
index c7fec1ae1..85e0f36bb 100644
--- a/main.go
+++ b/main.go
@@ -4,9 +4,7 @@ import (
 	"flag"
 	"fmt"
 	"io/ioutil"
-	"log"
 	"os"
-	"os/user"
 	"path/filepath"
 
 	"github.com/jesseduffield/lazygit/pkg/app"
@@ -22,14 +20,6 @@ var (
 	versionFlag   = flag.Bool("v", false, "Print the current version")
 )
 
-func homeDirectory() string {
-	usr, err := user.Current()
-	if err != nil {
-		log.Fatal(err)
-	}
-	return usr.HomeDir
-}
-
 func projectPath(path string) string {
 	gopath := os.Getenv("GOPATH")
 	return filepath.FromSlash(gopath + "/src/github.com/jesseduffield/lazygit/" + path)
@@ -56,13 +46,11 @@ func main() {
 		fmt.Printf("commit=%s, build date=%s, version=%s\n", commit, date, version)
 		os.Exit(0)
 	}
-	appConfig := &config.AppConfig{
-		Name:      "lazygit",
-		Version:   version,
-		Commit:    commit,
-		BuildDate: date,
-		Debug:     *debuggingFlag,
+	appConfig, err := config.NewAppConfig("lazygit", version, commit, date, debuggingFlag)
+	if err != nil {
+		panic(err)
 	}
+
 	app, err := app.NewApp(appConfig)
 	app.Log.Info(err)
 	app.GitCommand.SetupGit()
diff --git a/pkg/app/app.go b/pkg/app/app.go
index 3e227a395..98da0b0fa 100644
--- a/pkg/app/app.go
+++ b/pkg/app/app.go
@@ -60,7 +60,7 @@ func NewApp(config config.AppConfigurer) (*App, error) {
 	if err != nil {
 		return nil, err
 	}
-	app.Gui, err = gui.NewGui(app.Log, app.GitCommand, app.OSCommand, app.Tr, config.GetVersion())
+	app.Gui, err = gui.NewGui(app.Log, app.GitCommand, app.OSCommand, app.Tr, config)
 	if err != nil {
 		return nil, err
 	}
diff --git a/pkg/commands/git.go b/pkg/commands/git.go
index 44fd57f1c..3349e4860 100644
--- a/pkg/commands/git.go
+++ b/pkg/commands/git.go
@@ -359,7 +359,7 @@ func (c *GitCommand) IsInMergeState() (bool, error) {
 func (c *GitCommand) RemoveFile(file File) error {
 	// if the file isn't tracked, we assume you want to delete it
 	if !file.Tracked {
-		return c.OSCommand.RunCommand("rm -rf ./" + file.Name)
+		return os.RemoveAll(file.Name)
 	}
 	// if the file is tracked, we assume you want to just check it out
 	return c.OSCommand.RunCommand("git checkout " + file.Name)
diff --git a/pkg/commands/os.go b/pkg/commands/os.go
index d4ea75e74..32514a419 100644
--- a/pkg/commands/os.go
+++ b/pkg/commands/os.go
@@ -5,6 +5,7 @@ import (
 	"os"
 	"os/exec"
 	"runtime"
+	"strings"
 
 	"github.com/davecgh/go-spew/spew"
 
@@ -130,7 +131,7 @@ func (c *OSCommand) OpenFile(filename string) (*exec.Cmd, error) {
 	if err != nil {
 		return nil, err
 	}
-	err = c.RunCommand(cmdName + " " + filename + cmdTrail) // TODO: test on linux
+	err = c.RunCommand(cmdName + " " + c.Quote(filename) + cmdTrail) // TODO: test on linux
 	return nil, err
 }
 
@@ -163,5 +164,6 @@ func (c *OSCommand) PrepareSubProcess(cmdName string, commandArgs ...string) (*e
 
 // Quote wraps a message in platform-specific quotation marks
 func (c *OSCommand) Quote(message string) string {
+	message = strings.Replace(message, "`", "\\`", -1)
 	return c.Platform.escapedQuote + message + c.Platform.escapedQuote
 }
diff --git a/pkg/commands/os_test.go b/pkg/commands/os_test.go
new file mode 100644
index 000000000..29540aff6
--- /dev/null
+++ b/pkg/commands/os_test.go
@@ -0,0 +1,16 @@
+package commands
+
+import "testing"
+
+func TestQuote(t *testing.T) {
+	osCommand := &OSCommand{
+		Log:      nil,
+		Platform: getPlatform(),
+	}
+	test := "hello `test`"
+	expected := osCommand.Platform.escapedQuote + "hello \\`test\\`" + osCommand.Platform.escapedQuote
+	test = osCommand.Quote(test)
+	if test != expected {
+		t.Error("Expected " + expected + ", got " + test)
+	}
+}
diff --git a/pkg/config/app_config.go b/pkg/config/app_config.go
index 98e56dea2..9f258d4a6 100644
--- a/pkg/config/app_config.go
+++ b/pkg/config/app_config.go
@@ -1,12 +1,20 @@
 package config
 
+import (
+	"bytes"
+
+	"github.com/shibukawa/configdir"
+	"github.com/spf13/viper"
+)
+
 // AppConfig contains the base configuration fields required for lazygit.
 type AppConfig struct {
-	Debug     bool   `long:"debug" env:"DEBUG" default:"false"`
-	Version   string `long:"version" env:"VERSION" default:"unversioned"`
-	Commit    string `long:"commit" env:"COMMIT"`
-	BuildDate string `long:"build-date" env:"BUILD_DATE"`
-	Name      string `long:"name" env:"NAME" default:"lazygit"`
+	Debug      bool   `long:"debug" env:"DEBUG" default:"false"`
+	Version    string `long:"version" env:"VERSION" default:"unversioned"`
+	Commit     string `long:"commit" env:"COMMIT"`
+	BuildDate  string `long:"build-date" env:"BUILD_DATE"`
+	Name       string `long:"name" env:"NAME" default:"lazygit"`
+	UserConfig *viper.Viper
 }
 
 // AppConfigurer interface allows individual app config structs to inherit Fields
@@ -17,6 +25,26 @@ type AppConfigurer interface {
 	GetCommit() string
 	GetBuildDate() string
 	GetName() string
+	GetUserConfig() *viper.Viper
+	InsertToUserConfig(string, string) error
+}
+
+// NewAppConfig makes a new app config
+func NewAppConfig(name, version, commit, date string, debuggingFlag *bool) (*AppConfig, error) {
+	userConfig, err := LoadUserConfig()
+	if err != nil {
+		panic(err)
+	}
+
+	appConfig := &AppConfig{
+		Name:       "lazygit",
+		Version:    version,
+		Commit:     commit,
+		BuildDate:  date,
+		Debug:      *debuggingFlag,
+		UserConfig: userConfig,
+	}
+	return appConfig, nil
 }
 
 // GetDebug returns debug flag
@@ -43,3 +71,106 @@ func (c *AppConfig) GetBuildDate() string {
 func (c *AppConfig) GetName() string {
 	return c.Name
 }
+
+// GetUserConfig returns the user config
+func (c *AppConfig) GetUserConfig() *viper.Viper {
+	return c.UserConfig
+}
+
+func newViper() (*viper.Viper, error) {
+	v := viper.New()
+	v.SetConfigType("yaml")
+	v.SetConfigName("config")
+	return v, nil
+}
+
+// LoadUserConfig gets the user's config
+func LoadUserConfig() (*viper.Viper, error) {
+	v, err := newViper()
+	if err != nil {
+		panic(err)
+	}
+	if err = LoadDefaultConfig(v); err != nil {
+		return nil, err
+	}
+	if err = LoadUserConfigFromFile(v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// LoadDefaultConfig loads in the defaults defined in this file
+func LoadDefaultConfig(v *viper.Viper) error {
+	defaults := getDefaultConfig()
+	return v.ReadConfig(bytes.NewBuffer(defaults))
+}
+
+// LoadUserConfigFromFile Loads the user config from their config file, creating
+// the file as an empty config if it does not exist
+func LoadUserConfigFromFile(v *viper.Viper) error {
+	// chucking my name there is not for vanity purposes, the xdg spec (and that
+	// function) requires a vendor name. May as well line up with github
+	configDirs := configdir.New("jesseduffield", "lazygit")
+	folder := configDirs.QueryFolderContainsFile("config.yml")
+	if folder == nil {
+		// create the file as an empty config and load it
+		folders := configDirs.QueryFolders(configdir.Global)
+		if err := folders[0].WriteFile("config.yml", []byte{}); err != nil {
+			return err
+		}
+		folder = configDirs.QueryFolderContainsFile("config.yml")
+	}
+	v.AddConfigPath(folder.Path)
+	if err := v.MergeInConfig(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// InsertToUserConfig adds a key/value pair to the user's config and saves it
+func (c *AppConfig) InsertToUserConfig(key, value string) error {
+	// making a new viper object so that we're not writing any defaults back
+	// to the user's config file
+	v, err := newViper()
+	if err != nil {
+		return err
+	}
+	if err = LoadUserConfigFromFile(v); err != nil {
+		return err
+	}
+	v.Set(key, value)
+	if err := v.WriteConfig(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func getDefaultConfig() []byte {
+	return []byte(`
+  gui:
+    ## stuff relating to the UI
+    scrollHeight: 2
+    theme:
+      activeBorderColor:
+        - white
+        - bold
+      inactiveBorderColor:
+        - white
+      optionsTextColor:
+        - blue
+  git:
+    # stuff relating to git
+  os:
+    # stuff relating to the OS
+
+`)
+}
+
+// // commenting this out until we use it again
+// func homeDirectory() string {
+// 	usr, err := user.Current()
+// 	if err != nil {
+// 		log.Fatal(err)
+// 	}
+// 	return usr.HomeDir
+// }
diff --git a/pkg/gui/files_panel.go b/pkg/gui/files_panel.go
index 27589bc19..166af4108 100644
--- a/pkg/gui/files_panel.go
+++ b/pkg/gui/files_panel.go
@@ -228,15 +228,9 @@ func (gui *Gui) PrepareSubProcess(g *gocui.Gui, commands ...string) error {
 	return nil
 }
 
-func (gui *Gui) genericFileOpen(g *gocui.Gui, v *gocui.View, open func(string) (*exec.Cmd, error)) error {
-	file, err := gui.getSelectedFile(g)
-	if err != nil {
-		if err != gui.Errors.ErrNoFiles {
-			return err
-		}
-		return nil
-	}
-	sub, err := open(file.Name)
+func (gui *Gui) genericFileOpen(g *gocui.Gui, v *gocui.View, filename string, open func(string) (*exec.Cmd, error)) error {
+
+	sub, err := open(filename)
 	if err != nil {
 		return gui.createErrorPanel(g, err.Error())
 	}
@@ -248,19 +242,35 @@ func (gui *Gui) genericFileOpen(g *gocui.Gui, v *gocui.View, open func(string) (
 }
 
 func (gui *Gui) handleFileEdit(g *gocui.Gui, v *gocui.View) error {
-	return gui.genericFileOpen(g, v, gui.OSCommand.EditFile)
+	file, err := gui.getSelectedFile(g)
+	if err != nil {
+		return err
+	}
+	return gui.genericFileOpen(g, v, file.Name, gui.OSCommand.EditFile)
 }
 
 func (gui *Gui) handleFileOpen(g *gocui.Gui, v *gocui.View) error {
-	return gui.genericFileOpen(g, v, gui.OSCommand.OpenFile)
+	file, err := gui.getSelectedFile(g)
+	if err != nil {
+		return err
+	}
+	return gui.genericFileOpen(g, v, file.Name, gui.OSCommand.OpenFile)
 }
 
 func (gui *Gui) handleSublimeFileOpen(g *gocui.Gui, v *gocui.View) error {
-	return gui.genericFileOpen(g, v, gui.OSCommand.SublimeOpenFile)
+	file, err := gui.getSelectedFile(g)
+	if err != nil {
+		return err
+	}
+	return gui.genericFileOpen(g, v, file.Name, gui.OSCommand.SublimeOpenFile)
 }
 
 func (gui *Gui) handleVsCodeFileOpen(g *gocui.Gui, v *gocui.View) error {
-	return gui.genericFileOpen(g, v, gui.OSCommand.VsCodeOpenFile)
+	file, err := gui.getSelectedFile(g)
+	if err != nil {
+		return err
+	}
+	return gui.genericFileOpen(g, v, file.Name, gui.OSCommand.VsCodeOpenFile)
 }
 
 func (gui *Gui) handleRefreshFiles(g *gocui.Gui, v *gocui.View) error {
diff --git a/pkg/gui/gui.go b/pkg/gui/gui.go
index a5bdb9e07..1e7b6156b 100644
--- a/pkg/gui/gui.go
+++ b/pkg/gui/gui.go
@@ -19,6 +19,7 @@ import (
 	"github.com/golang-collections/collections/stack"
 	"github.com/jesseduffield/gocui"
 	"github.com/jesseduffield/lazygit/pkg/commands"
+	"github.com/jesseduffield/lazygit/pkg/config"
 	"github.com/jesseduffield/lazygit/pkg/i18n"
 )
 
@@ -58,9 +59,9 @@ type Gui struct {
 	Log        *logrus.Logger
 	GitCommand *commands.GitCommand
 	OSCommand  *commands.OSCommand
-	Version    string
 	SubProcess *exec.Cmd
 	State      guiState
+	Config     config.AppConfigurer
 	Tr         *i18n.Localizer
 	Errors     SentinelErrors
 }
@@ -77,11 +78,10 @@ type guiState struct {
 	Conflicts         []commands.Conflict
 	EditHistory       *stack.Stack
 	Platform          commands.Platform
-	Version           string
 }
 
 // NewGui builds a new gui handler
-func NewGui(log *logrus.Logger, gitCommand *commands.GitCommand, oSCommand *commands.OSCommand, tr *i18n.Localizer, version string) (*Gui, error) {
+func NewGui(log *logrus.Logger, gitCommand *commands.GitCommand, oSCommand *commands.OSCommand, tr *i18n.Localizer, config config.AppConfigurer) (*Gui, error) {
 	initialState := guiState{
 		Files:         make([]commands.File, 0),
 		PreviousView:  "files",
@@ -92,15 +92,14 @@ func NewGui(log *logrus.Logger, gitCommand *commands.GitCommand, oSCommand *comm
 		Conflicts:     make([]commands.Conflict, 0),
 		EditHistory:   stack.New(),
 		Platform:      *oSCommand.Platform,
-		Version:       version,
 	}
 
 	gui := &Gui{
 		Log:        log,
 		GitCommand: gitCommand,
 		OSCommand:  oSCommand,
-		Version:    version,
 		State:      initialState,
+		Config:     config,
 		Tr:         tr,
 	}
 
@@ -113,7 +112,7 @@ func (gui *Gui) scrollUpMain(g *gocui.Gui, v *gocui.View) error {
 	mainView, _ := g.View("main")
 	ox, oy := mainView.Origin()
 	if oy >= 1 {
-		return mainView.SetOrigin(ox, oy-1)
+		return mainView.SetOrigin(ox, oy-gui.Config.GetUserConfig().GetInt("gui.scrollHeight"))
 	}
 	return nil
 }
@@ -122,7 +121,7 @@ func (gui *Gui) scrollDownMain(g *gocui.Gui, v *gocui.View) error {
 	mainView, _ := g.View("main")
 	ox, oy := mainView.Origin()
 	if oy < len(mainView.BufferLines()) {
-		return mainView.SetOrigin(ox, oy+1)
+		return mainView.SetOrigin(ox, oy+gui.Config.GetUserConfig().GetInt("gui.scrollHeight"))
 	}
 	return nil
 }
@@ -141,7 +140,6 @@ func max(a, b int) int {
 // layout is called for every screen re-render e.g. when the screen is resized
 func (gui *Gui) layout(g *gocui.Gui) error {
 	g.Highlight = true
-	g.SelFgColor = gocui.ColorWhite | gocui.AttrBold
 	width, height := g.Size()
 	leftSideWidth := width / 3
 	statusFilesBoundary := 2
@@ -150,6 +148,7 @@ func (gui *Gui) layout(g *gocui.Gui) error {
 	commitsStashBoundary := height - 5        // height - 5
 	minimumHeight := 16
 	minimumWidth := 10
+	version := gui.Config.GetVersion()
 
 	panelSpacing := 1
 	if OverlappingEdges {
@@ -228,12 +227,14 @@ func (gui *Gui) layout(g *gocui.Gui) error {
 		v.FgColor = gocui.ColorWhite
 	}
 
-	if v, err := g.SetView("options", -1, optionsTop, width-len(gui.Version)-2, optionsTop+2, 0); err != nil {
+	if v, err := g.SetView("options", -1, optionsTop, width-len(version)-2, optionsTop+2, 0); err != nil {
 		if err != gocui.ErrUnknownView {
 			return err
 		}
-		v.FgColor = gocui.ColorBlue
 		v.Frame = false
+		if v.FgColor, err = gui.GetOptionsPanelTextColor(); err != nil {
+			return err
+		}
 	}
 
 	if gui.getCommitMessageView(g) == nil {
@@ -249,14 +250,16 @@ func (gui *Gui) layout(g *gocui.Gui) error {
 		}
 	}
 
-	if v, err := g.SetView("version", width-len(gui.Version)-1, optionsTop, width, optionsTop+2, 0); err != nil {
+	if v, err := g.SetView("version", width-len(version)-1, optionsTop, width, optionsTop+2, 0); err != nil {
 		if err != gocui.ErrUnknownView {
 			return err
 		}
 		v.BgColor = gocui.ColorDefault
 		v.FgColor = gocui.ColorGreen
 		v.Frame = false
-		gui.renderString(g, "version", gui.Version)
+		if err := gui.renderString(g, "version", version); err != nil {
+			return err
+		}
 
 		// these are only called once
 		gui.handleFileSelect(g, filesView)
@@ -264,7 +267,9 @@ func (gui *Gui) layout(g *gocui.Gui) error {
 		gui.refreshBranches(g)
 		gui.refreshCommits(g)
 		gui.refreshStashEntries(g)
-		gui.nextView(g, nil)
+		if err := gui.switchFocus(g, nil, filesView); err != nil {
+			return err
+		}
 	}
 
 	gui.resizePopupPanels(g)
@@ -315,7 +320,9 @@ func (gui *Gui) Run() error {
 
 	gui.g = g // TODO: always use gui.g rather than passing g around everywhere
 
-	g.FgColor = gocui.ColorDefault
+	if err := gui.SetColorScheme(); err != nil {
+		return err
+	}
 
 	gui.goEvery(g, time.Second*60, gui.fetch)
 	gui.goEvery(g, time.Second*10, gui.refreshFiles)
diff --git a/pkg/gui/keybindings.go b/pkg/gui/keybindings.go
index b4f2bdc57..68cccda6b 100644
--- a/pkg/gui/keybindings.go
+++ b/pkg/gui/keybindings.go
@@ -23,6 +23,8 @@ func (gui *Gui) keybindings(g *gocui.Gui) error {
 		{ViewName: "", Key: 'P', Modifier: gocui.ModNone, Handler: gui.pushFiles},
 		{ViewName: "", Key: 'p', Modifier: gocui.ModNone, Handler: gui.pullFiles},
 		{ViewName: "", Key: 'R', Modifier: gocui.ModNone, Handler: gui.handleRefresh},
+		{ViewName: "status", Key: 'e', Modifier: gocui.ModNone, Handler: gui.handleEditConfig},
+		{ViewName: "status", Key: 'o', Modifier: gocui.ModNone, Handler: gui.handleOpenConfig},
 		{ViewName: "files", Key: 'c', Modifier: gocui.ModNone, Handler: gui.handleCommitPress},
 		{ViewName: "files", Key: 'C', Modifier: gocui.ModNone, Handler: gui.handleCommitEditorPress},
 		{ViewName: "files", Key: gocui.KeySpace, Modifier: gocui.ModNone, Handler: gui.handleFilePress},
@@ -70,7 +72,7 @@ func (gui *Gui) keybindings(g *gocui.Gui) error {
 
 	// Would make these keybindings global but that interferes with editing
 	// input in the confirmation panel
-	for _, viewName := range []string{"files", "branches", "commits", "stash"} {
+	for _, viewName := range []string{"status", "files", "branches", "commits", "stash"} {
 		bindings = append(bindings, []Binding{
 			{ViewName: viewName, Key: gocui.KeyTab, Modifier: gocui.ModNone, Handler: gui.nextView},
 			{ViewName: viewName, Key: gocui.KeyArrowLeft, Modifier: gocui.ModNone, Handler: gui.previousView},
diff --git a/pkg/gui/status_panel.go b/pkg/gui/status_panel.go
index 67f133738..88134096e 100644
--- a/pkg/gui/status_panel.go
+++ b/pkg/gui/status_panel.go
@@ -40,3 +40,48 @@ func (gui *Gui) refreshStatus(g *gocui.Gui) error {
 
 	return nil
 }
+
+func (gui *Gui) renderStatusOptions(g *gocui.Gui) error {
+	return gui.renderOptionsMap(g, map[string]string{
+		"o": gui.Tr.SLocalize("OpenConfig"),
+		"e": gui.Tr.SLocalize("EditConfig"),
+	})
+}
+
+func (gui *Gui) handleStatusSelect(g *gocui.Gui, v *gocui.View) error {
+	dashboardString := fmt.Sprintf(
+		"%s\n\n%s\n\n%s\n\n%s\n\n%s",
+		lazygitTitle(),
+		"Keybindings: https://github.com/jesseduffield/lazygit/blob/master/docs/Keybindings.md",
+		"Config Options: https://github.com/jesseduffield/lazygit/blob/master/docs/Config.md",
+		"Tutorial: https://www.youtube.com/watch?v=VDXvbHZYeKY",
+		"Raise an Issue: https://github.com/jesseduffield/lazygit/issues",
+	)
+
+	if err := gui.renderString(g, "main", dashboardString); err != nil {
+		return err
+	}
+	return gui.renderStatusOptions(g)
+}
+
+func (gui *Gui) handleOpenConfig(g *gocui.Gui, v *gocui.View) error {
+	filename := gui.Config.GetUserConfig().ConfigFileUsed()
+	return gui.genericFileOpen(g, v, filename, gui.OSCommand.OpenFile)
+}
+
+func (gui *Gui) handleEditConfig(g *gocui.Gui, v *gocui.View) error {
+	filename := gui.Config.GetUserConfig().ConfigFileUsed()
+	return gui.genericFileOpen(g, v, filename, gui.OSCommand.EditFile)
+}
+
+func lazygitTitle() string {
+	return `
+   _                       _ _
+  | |                     (_) |
+  | | __ _ _____   _  __ _ _| |_
+  | |/ _` + "`" + ` |_  / | | |/ _` + "`" + ` | | __|
+  | | (_| |/ /| |_| | (_| | | |_
+  |_|\__,_/___|\__, |\__, |_|\__|
+                __/ | __/ |
+               |___/ |___/       `
+}
diff --git a/pkg/gui/theme.go b/pkg/gui/theme.go
new file mode 100644
index 000000000..dbc8b904b
--- /dev/null
+++ b/pkg/gui/theme.go
@@ -0,0 +1,54 @@
+package gui
+
+import (
+	"github.com/jesseduffield/gocui"
+)
+
+// GetAttribute gets the gocui color attribute from the string
+func (gui *Gui) GetAttribute(key string) gocui.Attribute {
+	colorMap := map[string]gocui.Attribute{
+		"default":   gocui.ColorDefault,
+		"black":     gocui.ColorBlack,
+		"red":       gocui.ColorRed,
+		"green":     gocui.ColorGreen,
+		"yellow":    gocui.ColorYellow,
+		"blue":      gocui.ColorBlue,
+		"magenta":   gocui.ColorMagenta,
+		"cyan":      gocui.ColorCyan,
+		"white":     gocui.ColorWhite,
+		"bold":      gocui.AttrBold,
+		"reverse":   gocui.AttrReverse,
+		"underline": gocui.AttrUnderline,
+	}
+	value, present := colorMap[key]
+	if present {
+		return value
+	}
+	return gocui.ColorWhite
+}
+
+// GetColor bitwise OR's a list of attributes obtained via the given keys
+func (gui *Gui) GetColor(keys []string) gocui.Attribute {
+	var attribute gocui.Attribute
+	for _, key := range keys {
+		attribute = attribute | gui.GetAttribute(key)
+	}
+	return attribute
+}
+
+// GetOptionsPanelTextColor gets the color of the options panel text
+func (gui *Gui) GetOptionsPanelTextColor() (gocui.Attribute, error) {
+	userConfig := gui.Config.GetUserConfig()
+	optionsColor := userConfig.GetStringSlice("gui.theme.optionsTextColor")
+	return gui.GetColor(optionsColor), nil
+}
+
+// SetColorScheme sets the color scheme for the app based on the user config
+func (gui *Gui) SetColorScheme() error {
+	userConfig := gui.Config.GetUserConfig()
+	activeBorderColor := userConfig.GetStringSlice("gui.theme.activeBorderColor")
+	inactiveBorderColor := userConfig.GetStringSlice("gui.theme.inactiveBorderColor")
+	gui.g.FgColor = gui.GetColor(inactiveBorderColor)
+	gui.g.SelFgColor = gui.GetColor(activeBorderColor)
+	return nil
+}
diff --git a/pkg/gui/view_helpers.go b/pkg/gui/view_helpers.go
index 3bcf97570..b870508b1 100644
--- a/pkg/gui/view_helpers.go
+++ b/pkg/gui/view_helpers.go
@@ -10,7 +10,7 @@ import (
 	"github.com/spkg/bom"
 )
 
-var cyclableViews = []string{"files", "branches", "commits", "stash"}
+var cyclableViews = []string{"status", "files", "branches", "commits", "stash"}
 
 func (gui *Gui) refreshSidePanels(g *gocui.Gui) error {
 	gui.refreshBranches(g)
@@ -82,6 +82,8 @@ func (gui *Gui) newLineFocused(g *gocui.Gui, v *gocui.View) error {
 	mainView.SetOrigin(0, 0)
 
 	switch v.Name() {
+	case "status":
+		return gui.handleStatusSelect(g, v)
 	case "files":
 		return gui.handleFileSelect(g, v)
 	case "branches":
diff --git a/pkg/i18n/english.go b/pkg/i18n/english.go
index 6e2af8e22..c6d1d1379 100644
--- a/pkg/i18n/english.go
+++ b/pkg/i18n/english.go
@@ -68,7 +68,7 @@ func addEnglish(i18nObject *i18n.Bundle) {
 			Other: "refresh",
 		}, &i18n.Message{
 			ID:    "addPatch",
-			Other: "add path",
+			Other: "add patch",
 		}, &i18n.Message{
 			ID:    "edit",
 			Other: "edit",
@@ -294,6 +294,12 @@ func addEnglish(i18nObject *i18n.Bundle) {
 		}, &i18n.Message{
 			ID:    "MergeAborted",
 			Other: "Merge aborted",
+		}, &i18n.Message{
+			ID:    "OpenConfig",
+			Other: "open config file",
+		}, &i18n.Message{
+			ID:    "EditConfig",
+			Other: "edit config file",
 		},
 	)
 }
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 000000000..5ab5d41c5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,52 @@
+# Names should be added to this file as
+#	Name or Organization 
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+#   $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Aaron L 
+Adrien Bustany 
+Amit Krishnan 
+Anmol Sethi 
+Bjørn Erik Pedersen 
+Bruno Bigras 
+Caleb Spare 
+Case Nelson 
+Chris Howey  
+Christoffer Buchholz 
+Daniel Wagner-Hall 
+Dave Cheney 
+Evan Phoenix 
+Francisco Souza 
+Hari haran 
+John C Barstow
+Kelvin Fo 
+Ken-ichirou MATSUZAWA 
+Matt Layher 
+Nathan Youngman 
+Nickolai Zeldovich 
+Patrick 
+Paul Hammond 
+Pawel Knap 
+Pieter Droogendijk 
+Pursuit92 
+Riku Voipio 
+Rob Figueiredo 
+Rodrigo Chiossi 
+Slawek Ligus 
+Soge Zhang 
+Tiffany Jernigan 
+Tilak Sharma 
+Tom Payne 
+Travis Cline 
+Tudor Golubenco 
+Vahe Khachikyan 
+Yukang 
+bronze1man 
+debrando 
+henrikedwards 
+铁哥 
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 000000000..f21e54080
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 000000000..ced39cb88
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+	"errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events chan Event
+	Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 000000000..190bf0de5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,66 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+	Name string // Relative path to the file or directory.
+	Op   Op     // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+	Create Op = 1 << iota
+	Write
+	Remove
+	Rename
+	Chmod
+)
+
+func (op Op) String() string {
+	// Use a buffer for efficient string concatenation
+	var buffer bytes.Buffer
+
+	if op&Create == Create {
+		buffer.WriteString("|CREATE")
+	}
+	if op&Remove == Remove {
+		buffer.WriteString("|REMOVE")
+	}
+	if op&Write == Write {
+		buffer.WriteString("|WRITE")
+	}
+	if op&Rename == Rename {
+		buffer.WriteString("|RENAME")
+	}
+	if op&Chmod == Chmod {
+		buffer.WriteString("|CHMOD")
+	}
+	if buffer.Len() == 0 {
+		return ""
+	}
+	return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+	return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 000000000..d9fd1b88a
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,337 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	mu       sync.Mutex // Map access
+	fd       int
+	poller   *fdPoller
+	watches  map[string]*watch // Map of inotify watches (key: path)
+	paths    map[int]string    // Map of watched paths (key: watch descriptor)
+	done     chan struct{}     // Channel for sending a "quit message" to the reader goroutine
+	doneResp chan struct{}     // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	// Create inotify fd
+	fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+	if fd == -1 {
+		return nil, errno
+	}
+	// Create epoll
+	poller, err := newFdPoller(fd)
+	if err != nil {
+		unix.Close(fd)
+		return nil, err
+	}
+	w := &Watcher{
+		fd:       fd,
+		poller:   poller,
+		watches:  make(map[string]*watch),
+		paths:    make(map[int]string),
+		Events:   make(chan Event),
+		Errors:   make(chan error),
+		done:     make(chan struct{}),
+		doneResp: make(chan struct{}),
+	}
+
+	go w.readEvents()
+	return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+	select {
+	case <-w.done:
+		return true
+	default:
+		return false
+	}
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	if w.isClosed() {
+		return nil
+	}
+
+	// Send 'close' signal to goroutine, and set the Watcher to closed.
+	close(w.done)
+
+	// Wake up goroutine
+	w.poller.wake()
+
+	// Wait for goroutine to close
+	<-w.doneResp
+
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	name = filepath.Clean(name)
+	if w.isClosed() {
+		return errors.New("inotify instance already closed")
+	}
+
+	const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+		unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+		unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+	var flags uint32 = agnosticEvents
+
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	watchEntry := w.watches[name]
+	if watchEntry != nil {
+		flags |= watchEntry.flags | unix.IN_MASK_ADD
+	}
+	wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+	if wd == -1 {
+		return errno
+	}
+
+	if watchEntry == nil {
+		w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+		w.paths[wd] = name
+	} else {
+		watchEntry.wd = uint32(wd)
+		watchEntry.flags = flags
+	}
+
+	return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	name = filepath.Clean(name)
+
+	// Fetch the watch.
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	watch, ok := w.watches[name]
+
+	// Remove it from inotify.
+	if !ok {
+		return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+	}
+
+	// We successfully removed the watch if InotifyRmWatch doesn't return an
+	// error, we need to clean up our internal state to ensure it matches
+	// inotify's kernel state.
+	delete(w.paths, int(watch.wd))
+	delete(w.watches, name)
+
+	// inotify_rm_watch will return EINVAL if the file has been deleted;
+	// the inotify will already have been removed.
+	// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+	// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+	// so that EINVAL means that the wd is being rm_watch()ed or its file removed
+	// by another thread and we have not received IN_IGNORE event.
+	success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+	if success == -1 {
+		// TODO: Perhaps it's not helpful to return an error here in every case.
+		// the only two possible errors are:
+		// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+		// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+		// Watch descriptors are invalidated when they are removed explicitly or implicitly;
+		// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+		return errno
+	}
+
+	return nil
+}
+
+type watch struct {
+	wd    uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+	flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+	var (
+		buf   [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+		n     int                                  // Number of bytes read with read()
+		errno error                                // Syscall errno
+		ok    bool                                 // For poller.wait
+	)
+
+	defer close(w.doneResp)
+	defer close(w.Errors)
+	defer close(w.Events)
+	defer unix.Close(w.fd)
+	defer w.poller.close()
+
+	for {
+		// See if we have been closed.
+		if w.isClosed() {
+			return
+		}
+
+		ok, errno = w.poller.wait()
+		if errno != nil {
+			select {
+			case w.Errors <- errno:
+			case <-w.done:
+				return
+			}
+			continue
+		}
+
+		if !ok {
+			continue
+		}
+
+		n, errno = unix.Read(w.fd, buf[:])
+		// If a signal interrupted execution, see if we've been asked to close, and try again.
+		// http://man7.org/linux/man-pages/man7/signal.7.html :
+		// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+		if errno == unix.EINTR {
+			continue
+		}
+
+		// unix.Read might have been woken up by Close. If so, we're done.
+		if w.isClosed() {
+			return
+		}
+
+		if n < unix.SizeofInotifyEvent {
+			var err error
+			if n == 0 {
+				// If EOF is received. This should really never happen.
+				err = io.EOF
+			} else if n < 0 {
+				// If an error occurred while reading.
+				err = errno
+			} else {
+				// Read was too short.
+				err = errors.New("notify: short read in readEvents()")
+			}
+			select {
+			case w.Errors <- err:
+			case <-w.done:
+				return
+			}
+			continue
+		}
+
+		var offset uint32
+		// We don't know how many events we just read into the buffer
+		// While the offset points to at least one whole event...
+		for offset <= uint32(n-unix.SizeofInotifyEvent) {
+			// Point "raw" to the event in the buffer
+			raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+			mask := uint32(raw.Mask)
+			nameLen := uint32(raw.Len)
+
+			if mask&unix.IN_Q_OVERFLOW != 0 {
+				select {
+				case w.Errors <- ErrEventOverflow:
+				case <-w.done:
+					return
+				}
+			}
+
+			// If the event happened to the watched directory or the watched file, the kernel
+			// doesn't append the filename to the event, but we would like to always fill the
+			// the "Name" field with a valid filename. We retrieve the path of the watch from
+			// the "paths" map.
+			w.mu.Lock()
+			name, ok := w.paths[int(raw.Wd)]
+			// IN_DELETE_SELF occurs when the file/directory being watched is removed.
+			// This is a sign to clean up the maps, otherwise we are no longer in sync
+			// with the inotify kernel state which has already deleted the watch
+			// automatically.
+			if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+				delete(w.paths, int(raw.Wd))
+				delete(w.watches, name)
+			}
+			w.mu.Unlock()
+
+			if nameLen > 0 {
+				// Point "bytes" at the first byte of the filename
+				bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+				// The filename is padded with NULL bytes. TrimRight() gets rid of those.
+				name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+			}
+
+			event := newEvent(name, mask)
+
+			// Send the events that are not ignored on the events channel
+			if !event.ignoreLinux(mask) {
+				select {
+				case w.Events <- event:
+				case <-w.done:
+					return
+				}
+			}
+
+			// Move to the next event in the buffer
+			offset += unix.SizeofInotifyEvent + nameLen
+		}
+	}
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+	// Ignore anything the inotify API says to ignore
+	if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+		return true
+	}
+
+	// If the event is not a DELETE or RENAME, the file must exist.
+	// Otherwise the event is ignored.
+	// *Note*: this was put in place because it was seen that a MODIFY
+	// event was sent after the DELETE. This ignores that MODIFY and
+	// assumes a DELETE will come or has come if the file doesn't exist.
+	if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+		_, statErr := os.Lstat(e.Name)
+		return os.IsNotExist(statErr)
+	}
+	return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+		e.Op |= Create
+	}
+	if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+		e.Op |= Remove
+	}
+	if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+		e.Op |= Write
+	}
+	if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+		e.Op |= Rename
+	}
+	if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 000000000..cc7db4b22
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+	"errors"
+
+	"golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+	fd   int    // File descriptor (as returned by the inotify_init() syscall)
+	epfd int    // Epoll file descriptor
+	pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+	poller := new(fdPoller)
+	poller.fd = fd
+	poller.epfd = -1
+	poller.pipe[0] = -1
+	poller.pipe[1] = -1
+	return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+	var errno error
+	poller := emptyPoller(fd)
+	defer func() {
+		if errno != nil {
+			poller.close()
+		}
+	}()
+	poller.fd = fd
+
+	// Create epoll fd
+	poller.epfd, errno = unix.EpollCreate1(0)
+	if poller.epfd == -1 {
+		return nil, errno
+	}
+	// Create pipe; pipe[0] is the read end, pipe[1] the write end.
+	errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+	if errno != nil {
+		return nil, errno
+	}
+
+	// Register inotify fd with epoll
+	event := unix.EpollEvent{
+		Fd:     int32(poller.fd),
+		Events: unix.EPOLLIN,
+	}
+	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+	if errno != nil {
+		return nil, errno
+	}
+
+	// Register pipe fd with epoll
+	event = unix.EpollEvent{
+		Fd:     int32(poller.pipe[0]),
+		Events: unix.EPOLLIN,
+	}
+	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+	if errno != nil {
+		return nil, errno
+	}
+
+	return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+	// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+	// I don't know whether epoll_wait returns the number of events returned,
+	// or the total number of events ready.
+	// I decided to catch both by making the buffer one larger than the maximum.
+	events := make([]unix.EpollEvent, 7)
+	for {
+		n, errno := unix.EpollWait(poller.epfd, events, -1)
+		if n == -1 {
+			if errno == unix.EINTR {
+				continue
+			}
+			return false, errno
+		}
+		if n == 0 {
+			// If there are no events, try again.
+			continue
+		}
+		if n > 6 {
+			// This should never happen. More events were returned than should be possible.
+			return false, errors.New("epoll_wait returned more events than I know what to do with")
+		}
+		ready := events[:n]
+		epollhup := false
+		epollerr := false
+		epollin := false
+		for _, event := range ready {
+			if event.Fd == int32(poller.fd) {
+				if event.Events&unix.EPOLLHUP != 0 {
+					// This should not happen, but if it does, treat it as a wakeup.
+					epollhup = true
+				}
+				if event.Events&unix.EPOLLERR != 0 {
+					// If an error is waiting on the file descriptor, we should pretend
+					// something is ready to read, and let unix.Read pick up the error.
+					epollerr = true
+				}
+				if event.Events&unix.EPOLLIN != 0 {
+					// There is data to read.
+					epollin = true
+				}
+			}
+			if event.Fd == int32(poller.pipe[0]) {
+				if event.Events&unix.EPOLLHUP != 0 {
+					// Write pipe descriptor was closed, by us. This means we're closing down the
+					// watcher, and we should wake up.
+				}
+				if event.Events&unix.EPOLLERR != 0 {
+					// If an error is waiting on the pipe file descriptor.
+					// This is an absolute mystery, and should never ever happen.
+					return false, errors.New("Error on the pipe descriptor.")
+				}
+				if event.Events&unix.EPOLLIN != 0 {
+					// This is a regular wakeup, so we have to clear the buffer.
+					err := poller.clearWake()
+					if err != nil {
+						return false, err
+					}
+				}
+			}
+		}
+
+		if epollhup || epollerr || epollin {
+			return true, nil
+		}
+		return false, nil
+	}
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+	buf := make([]byte, 1)
+	n, errno := unix.Write(poller.pipe[1], buf)
+	if n == -1 {
+		if errno == unix.EAGAIN {
+			// Buffer is full, poller will wake.
+			return nil
+		}
+		return errno
+	}
+	return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+	// You have to be woken up a LOT in order to get to 100!
+	buf := make([]byte, 100)
+	n, errno := unix.Read(poller.pipe[0], buf)
+	if n == -1 {
+		if errno == unix.EAGAIN {
+			// Buffer is empty, someone else cleared our wake.
+			return nil
+		}
+		return errno
+	}
+	return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+	if poller.pipe[1] != -1 {
+		unix.Close(poller.pipe[1])
+	}
+	if poller.pipe[0] != -1 {
+		unix.Close(poller.pipe[0])
+	}
+	if poller.epfd != -1 {
+		unix.Close(poller.epfd)
+	}
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 000000000..86e76a3d6
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,521 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events chan Event
+	Errors chan error
+	done   chan struct{} // Channel for sending a "quit message" to the reader goroutine
+
+	kq int // File descriptor (as returned by the kqueue() syscall).
+
+	mu              sync.Mutex        // Protects access to watcher data
+	watches         map[string]int    // Map of watched file descriptors (key: path).
+	externalWatches map[string]bool   // Map of watches added by user of the library.
+	dirFlags        map[string]uint32 // Map of watched directories to fflags used in kqueue.
+	paths           map[int]pathInfo  // Map file descriptors to path names for processing kqueue events.
+	fileExists      map[string]bool   // Keep track of if we know this file exists (to stop duplicate create events).
+	isClosed        bool              // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+	name  string
+	isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	kq, err := kqueue()
+	if err != nil {
+		return nil, err
+	}
+
+	w := &Watcher{
+		kq:              kq,
+		watches:         make(map[string]int),
+		dirFlags:        make(map[string]uint32),
+		paths:           make(map[int]pathInfo),
+		fileExists:      make(map[string]bool),
+		externalWatches: make(map[string]bool),
+		Events:          make(chan Event),
+		Errors:          make(chan error),
+		done:            make(chan struct{}),
+	}
+
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	w.mu.Lock()
+	if w.isClosed {
+		w.mu.Unlock()
+		return nil
+	}
+	w.isClosed = true
+
+	// copy paths to remove while locked
+	var pathsToRemove = make([]string, 0, len(w.watches))
+	for name := range w.watches {
+		pathsToRemove = append(pathsToRemove, name)
+	}
+	w.mu.Unlock()
+	// unlock before calling Remove, which also locks
+
+	for _, name := range pathsToRemove {
+		w.Remove(name)
+	}
+
+	// send a "quit" message to the reader goroutine
+	close(w.done)
+
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	w.mu.Lock()
+	w.externalWatches[name] = true
+	w.mu.Unlock()
+	_, err := w.addWatch(name, noteAllEvents)
+	return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	name = filepath.Clean(name)
+	w.mu.Lock()
+	watchfd, ok := w.watches[name]
+	w.mu.Unlock()
+	if !ok {
+		return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+	}
+
+	const registerRemove = unix.EV_DELETE
+	if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+		return err
+	}
+
+	unix.Close(watchfd)
+
+	w.mu.Lock()
+	isDir := w.paths[watchfd].isDir
+	delete(w.watches, name)
+	delete(w.paths, watchfd)
+	delete(w.dirFlags, name)
+	w.mu.Unlock()
+
+	// Find all watched paths that are in this directory that are not external.
+	if isDir {
+		var pathsToRemove []string
+		w.mu.Lock()
+		for _, path := range w.paths {
+			wdir, _ := filepath.Split(path.name)
+			if filepath.Clean(wdir) == name {
+				if !w.externalWatches[path.name] {
+					pathsToRemove = append(pathsToRemove, path.name)
+				}
+			}
+		}
+		w.mu.Unlock()
+		for _, name := range pathsToRemove {
+			// Since these are internal, not much sense in propagating error
+			// to the user, as that will just confuse them with an error about
+			// a path they did not explicitly watch themselves.
+			w.Remove(name)
+		}
+	}
+
+	return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+	var isDir bool
+	// Make ./name and name equivalent
+	name = filepath.Clean(name)
+
+	w.mu.Lock()
+	if w.isClosed {
+		w.mu.Unlock()
+		return "", errors.New("kevent instance already closed")
+	}
+	watchfd, alreadyWatching := w.watches[name]
+	// We already have a watch, but we can still override flags.
+	if alreadyWatching {
+		isDir = w.paths[watchfd].isDir
+	}
+	w.mu.Unlock()
+
+	if !alreadyWatching {
+		fi, err := os.Lstat(name)
+		if err != nil {
+			return "", err
+		}
+
+		// Don't watch sockets.
+		if fi.Mode()&os.ModeSocket == os.ModeSocket {
+			return "", nil
+		}
+
+		// Don't watch named pipes.
+		if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+			return "", nil
+		}
+
+		// Follow Symlinks
+		// Unfortunately, Linux can add bogus symlinks to watch list without
+		// issue, and Windows can't do symlinks period (AFAIK). To  maintain
+		// consistency, we will act like everything is fine. There will simply
+		// be no file events for broken symlinks.
+		// Hence the returns of nil on errors.
+		if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+			name, err = filepath.EvalSymlinks(name)
+			if err != nil {
+				return "", nil
+			}
+
+			w.mu.Lock()
+			_, alreadyWatching = w.watches[name]
+			w.mu.Unlock()
+
+			if alreadyWatching {
+				return name, nil
+			}
+
+			fi, err = os.Lstat(name)
+			if err != nil {
+				return "", nil
+			}
+		}
+
+		watchfd, err = unix.Open(name, openMode, 0700)
+		if watchfd == -1 {
+			return "", err
+		}
+
+		isDir = fi.IsDir()
+	}
+
+	const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+	if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+		unix.Close(watchfd)
+		return "", err
+	}
+
+	if !alreadyWatching {
+		w.mu.Lock()
+		w.watches[name] = watchfd
+		w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+		w.mu.Unlock()
+	}
+
+	if isDir {
+		// Watch the directory if it has not been watched before,
+		// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+		w.mu.Lock()
+
+		watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+			(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+		// Store flags so this watch can be updated later
+		w.dirFlags[name] = flags
+		w.mu.Unlock()
+
+		if watchDir {
+			if err := w.watchDirectoryFiles(name); err != nil {
+				return "", err
+			}
+		}
+	}
+	return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+	eventBuffer := make([]unix.Kevent_t, 10)
+
+loop:
+	for {
+		// See if there is a message on the "done" channel
+		select {
+		case <-w.done:
+			break loop
+		default:
+		}
+
+		// Get new events
+		kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+		// EINTR is okay, the syscall was interrupted before timeout expired.
+		if err != nil && err != unix.EINTR {
+			select {
+			case w.Errors <- err:
+			case <-w.done:
+				break loop
+			}
+			continue
+		}
+
+		// Flush the events we received to the Events channel
+		for len(kevents) > 0 {
+			kevent := &kevents[0]
+			watchfd := int(kevent.Ident)
+			mask := uint32(kevent.Fflags)
+			w.mu.Lock()
+			path := w.paths[watchfd]
+			w.mu.Unlock()
+			event := newEvent(path.name, mask)
+
+			if path.isDir && !(event.Op&Remove == Remove) {
+				// Double check to make sure the directory exists. This can happen when
+				// we do a rm -fr on a recursively watched folders and we receive a
+				// modification event first but the folder has been deleted and later
+				// receive the delete event
+				if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+					// mark is as delete event
+					event.Op |= Remove
+				}
+			}
+
+			if event.Op&Rename == Rename || event.Op&Remove == Remove {
+				w.Remove(event.Name)
+				w.mu.Lock()
+				delete(w.fileExists, event.Name)
+				w.mu.Unlock()
+			}
+
+			if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+				w.sendDirectoryChangeEvents(event.Name)
+			} else {
+				// Send the event on the Events channel.
+				select {
+				case w.Events <- event:
+				case <-w.done:
+					break loop
+				}
+			}
+
+			if event.Op&Remove == Remove {
+				// Look for a file that may have overwritten this.
+				// For example, mv f1 f2 will delete f2, then create f2.
+				if path.isDir {
+					fileDir := filepath.Clean(event.Name)
+					w.mu.Lock()
+					_, found := w.watches[fileDir]
+					w.mu.Unlock()
+					if found {
+						// make sure the directory exists before we watch for changes. When we
+						// do a recursive watch and perform rm -fr, the parent directory might
+						// have gone missing, ignore the missing directory and let the
+						// upcoming delete event remove the watch from the parent directory.
+						if _, err := os.Lstat(fileDir); err == nil {
+							w.sendDirectoryChangeEvents(fileDir)
+						}
+					}
+				} else {
+					filePath := filepath.Clean(event.Name)
+					if fileInfo, err := os.Lstat(filePath); err == nil {
+						w.sendFileCreatedEventIfNew(filePath, fileInfo)
+					}
+				}
+			}
+
+			// Move to next event
+			kevents = kevents[1:]
+		}
+	}
+
+	// cleanup
+	err := unix.Close(w.kq)
+	if err != nil {
+		// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+		select {
+		case w.Errors <- err:
+		default:
+		}
+	}
+	close(w.Events)
+	close(w.Errors)
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+		e.Op |= Remove
+	}
+	if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+		e.Op |= Write
+	}
+	if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+		e.Op |= Rename
+	}
+	if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
+
+func newCreateEvent(name string) Event {
+	return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+	// Get all files
+	files, err := ioutil.ReadDir(dirPath)
+	if err != nil {
+		return err
+	}
+
+	for _, fileInfo := range files {
+		filePath := filepath.Join(dirPath, fileInfo.Name())
+		filePath, err = w.internalWatch(filePath, fileInfo)
+		if err != nil {
+			return err
+		}
+
+		w.mu.Lock()
+		w.fileExists[filePath] = true
+		w.mu.Unlock()
+	}
+
+	return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+	// Get all files
+	files, err := ioutil.ReadDir(dirPath)
+	if err != nil {
+		select {
+		case w.Errors <- err:
+		case <-w.done:
+			return
+		}
+	}
+
+	// Search for new files
+	for _, fileInfo := range files {
+		filePath := filepath.Join(dirPath, fileInfo.Name())
+		err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+		if err != nil {
+			return
+		}
+	}
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+	w.mu.Lock()
+	_, doesExist := w.fileExists[filePath]
+	w.mu.Unlock()
+	if !doesExist {
+		// Send create event
+		select {
+		case w.Events <- newCreateEvent(filePath):
+		case <-w.done:
+			return
+		}
+	}
+
+	// like watchDirectoryFiles (but without doing another ReadDir)
+	filePath, err = w.internalWatch(filePath, fileInfo)
+	if err != nil {
+		return err
+	}
+
+	w.mu.Lock()
+	w.fileExists[filePath] = true
+	w.mu.Unlock()
+
+	return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+	if fileInfo.IsDir() {
+		// mimic Linux providing delete events for subdirectories
+		// but preserve the flags used if currently watching subdirectory
+		w.mu.Lock()
+		flags := w.dirFlags[name]
+		w.mu.Unlock()
+
+		flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+		return w.addWatch(name, flags)
+	}
+
+	// watch file to mimic Linux inotify
+	return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+	kq, err = unix.Kqueue()
+	if kq == -1 {
+		return kq, err
+	}
+	return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+	changes := make([]unix.Kevent_t, len(fds))
+
+	for i, fd := range fds {
+		// SetKevent converts int to the platform-specific types:
+		unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+		changes[i].Fflags = fflags
+	}
+
+	// register the events
+	success, err := unix.Kevent(kq, changes, nil, nil)
+	if success == -1 {
+		return err
+	}
+	return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+	n, err := unix.Kevent(kq, nil, events, timeout)
+	if err != nil {
+		return nil, err
+	}
+	return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+	return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
new file mode 100644
index 000000000..7d8de1451
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 000000000..9139e1716
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 000000000..09436f31d
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sync"
+	"syscall"
+	"unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	isClosed bool           // Set to true when Close() is first called
+	mu       sync.Mutex     // Map access
+	port     syscall.Handle // Handle to completion port
+	watches  watchMap       // Map of watches (key: i-number)
+	input    chan *input    // Inputs to the reader are sent on this channel
+	quit     chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+	if e != nil {
+		return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+	}
+	w := &Watcher{
+		port:    port,
+		watches: make(watchMap),
+		input:   make(chan *input, 1),
+		Events:  make(chan Event, 50),
+		Errors:  make(chan error),
+		quit:    make(chan chan<- error, 1),
+	}
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	if w.isClosed {
+		return nil
+	}
+	w.isClosed = true
+
+	// Send "quit" message to the reader goroutine
+	ch := make(chan error)
+	w.quit <- ch
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	if w.isClosed {
+		return errors.New("watcher already closed")
+	}
+	in := &input{
+		op:    opAddWatch,
+		path:  filepath.Clean(name),
+		flags: sysFSALLEVENTS,
+		reply: make(chan error),
+	}
+	w.input <- in
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	in := &input{
+		op:    opRemoveWatch,
+		path:  filepath.Clean(name),
+		reply: make(chan error),
+	}
+	w.input <- in
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-in.reply
+}
+
+const (
+	// Options for AddWatch
+	sysFSONESHOT = 0x80000000
+	sysFSONLYDIR = 0x1000000
+
+	// Events
+	sysFSACCESS     = 0x1
+	sysFSALLEVENTS  = 0xfff
+	sysFSATTRIB     = 0x4
+	sysFSCLOSE      = 0x18
+	sysFSCREATE     = 0x100
+	sysFSDELETE     = 0x200
+	sysFSDELETESELF = 0x400
+	sysFSMODIFY     = 0x2
+	sysFSMOVE       = 0xc0
+	sysFSMOVEDFROM  = 0x40
+	sysFSMOVEDTO    = 0x80
+	sysFSMOVESELF   = 0x800
+
+	// Special events
+	sysFSIGNORED   = 0x8000
+	sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+		e.Op |= Create
+	}
+	if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+		e.Op |= Remove
+	}
+	if mask&sysFSMODIFY == sysFSMODIFY {
+		e.Op |= Write
+	}
+	if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+		e.Op |= Rename
+	}
+	if mask&sysFSATTRIB == sysFSATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
+
+const (
+	opAddWatch = iota
+	opRemoveWatch
+)
+
+const (
+	provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+	op    int
+	path  string
+	flags uint32
+	reply chan error
+}
+
+type inode struct {
+	handle syscall.Handle
+	volume uint32
+	index  uint64
+}
+
+type watch struct {
+	ov     syscall.Overlapped
+	ino    *inode            // i-number
+	path   string            // Directory path
+	mask   uint64            // Directory itself is being watched with these notify flags
+	names  map[string]uint64 // Map of names being watched and their notify flags
+	rename string            // Remembers the old name while renaming a file
+	buf    [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+	e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+	if e != nil {
+		return os.NewSyscallError("PostQueuedCompletionStatus", e)
+	}
+	return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+	attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+	if e != nil {
+		return "", os.NewSyscallError("GetFileAttributes", e)
+	}
+	if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+		dir = pathname
+	} else {
+		dir, _ = filepath.Split(pathname)
+		dir = filepath.Clean(dir)
+	}
+	return
+}
+
+func getIno(path string) (ino *inode, err error) {
+	h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+		syscall.FILE_LIST_DIRECTORY,
+		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+		nil, syscall.OPEN_EXISTING,
+		syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+	if e != nil {
+		return nil, os.NewSyscallError("CreateFile", e)
+	}
+	var fi syscall.ByHandleFileInformation
+	if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+		syscall.CloseHandle(h)
+		return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+	}
+	ino = &inode{
+		handle: h,
+		volume: fi.VolumeSerialNumber,
+		index:  uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+	}
+	return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+	if i := m[ino.volume]; i != nil {
+		return i[ino.index]
+	}
+	return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+	i := m[ino.volume]
+	if i == nil {
+		i = make(indexMap)
+		m[ino.volume] = i
+	}
+	i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+	dir, err := getDir(pathname)
+	if err != nil {
+		return err
+	}
+	if flags&sysFSONLYDIR != 0 && pathname != dir {
+		return nil
+	}
+	ino, err := getIno(dir)
+	if err != nil {
+		return err
+	}
+	w.mu.Lock()
+	watchEntry := w.watches.get(ino)
+	w.mu.Unlock()
+	if watchEntry == nil {
+		if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+			syscall.CloseHandle(ino.handle)
+			return os.NewSyscallError("CreateIoCompletionPort", e)
+		}
+		watchEntry = &watch{
+			ino:   ino,
+			path:  dir,
+			names: make(map[string]uint64),
+		}
+		w.mu.Lock()
+		w.watches.set(ino, watchEntry)
+		w.mu.Unlock()
+		flags |= provisional
+	} else {
+		syscall.CloseHandle(ino.handle)
+	}
+	if pathname == dir {
+		watchEntry.mask |= flags
+	} else {
+		watchEntry.names[filepath.Base(pathname)] |= flags
+	}
+	if err = w.startRead(watchEntry); err != nil {
+		return err
+	}
+	if pathname == dir {
+		watchEntry.mask &= ^provisional
+	} else {
+		watchEntry.names[filepath.Base(pathname)] &= ^provisional
+	}
+	return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+	dir, err := getDir(pathname)
+	if err != nil {
+		return err
+	}
+	ino, err := getIno(dir)
+	if err != nil {
+		return err
+	}
+	w.mu.Lock()
+	watch := w.watches.get(ino)
+	w.mu.Unlock()
+	if watch == nil {
+		return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+	}
+	if pathname == dir {
+		w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+		watch.mask = 0
+	} else {
+		name := filepath.Base(pathname)
+		w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+		delete(watch.names, name)
+	}
+	return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+	for name, mask := range watch.names {
+		if mask&provisional == 0 {
+			w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+		}
+		delete(watch.names, name)
+	}
+	if watch.mask != 0 {
+		if watch.mask&provisional == 0 {
+			w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+		}
+		watch.mask = 0
+	}
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+	if e := syscall.CancelIo(watch.ino.handle); e != nil {
+		w.Errors <- os.NewSyscallError("CancelIo", e)
+		w.deleteWatch(watch)
+	}
+	mask := toWindowsFlags(watch.mask)
+	for _, m := range watch.names {
+		mask |= toWindowsFlags(m)
+	}
+	if mask == 0 {
+		if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+			w.Errors <- os.NewSyscallError("CloseHandle", e)
+		}
+		w.mu.Lock()
+		delete(w.watches[watch.ino.volume], watch.ino.index)
+		w.mu.Unlock()
+		return nil
+	}
+	e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+		uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+	if e != nil {
+		err := os.NewSyscallError("ReadDirectoryChanges", e)
+		if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+			// Watched directory was probably removed
+			if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+				if watch.mask&sysFSONESHOT != 0 {
+					watch.mask = 0
+				}
+			}
+			err = nil
+		}
+		w.deleteWatch(watch)
+		w.startRead(watch)
+		return err
+	}
+	return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+	var (
+		n, key uint32
+		ov     *syscall.Overlapped
+	)
+	runtime.LockOSThread()
+
+	for {
+		e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+		watch := (*watch)(unsafe.Pointer(ov))
+
+		if watch == nil {
+			select {
+			case ch := <-w.quit:
+				w.mu.Lock()
+				var indexes []indexMap
+				for _, index := range w.watches {
+					indexes = append(indexes, index)
+				}
+				w.mu.Unlock()
+				for _, index := range indexes {
+					for _, watch := range index {
+						w.deleteWatch(watch)
+						w.startRead(watch)
+					}
+				}
+				var err error
+				if e := syscall.CloseHandle(w.port); e != nil {
+					err = os.NewSyscallError("CloseHandle", e)
+				}
+				close(w.Events)
+				close(w.Errors)
+				ch <- err
+				return
+			case in := <-w.input:
+				switch in.op {
+				case opAddWatch:
+					in.reply <- w.addWatch(in.path, uint64(in.flags))
+				case opRemoveWatch:
+					in.reply <- w.remWatch(in.path)
+				}
+			default:
+			}
+			continue
+		}
+
+		switch e {
+		case syscall.ERROR_MORE_DATA:
+			if watch == nil {
+				w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+			} else {
+				// The i/o succeeded but the buffer is full.
+				// In theory we should be building up a full packet.
+				// In practice we can get away with just carrying on.
+				n = uint32(unsafe.Sizeof(watch.buf))
+			}
+		case syscall.ERROR_ACCESS_DENIED:
+			// Watched directory was probably removed
+			w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+			w.deleteWatch(watch)
+			w.startRead(watch)
+			continue
+		case syscall.ERROR_OPERATION_ABORTED:
+			// CancelIo was called on this handle
+			continue
+		default:
+			w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+			continue
+		case nil:
+		}
+
+		var offset uint32
+		for {
+			if n == 0 {
+				w.Events <- newEvent("", sysFSQOVERFLOW)
+				w.Errors <- errors.New("short read in readEvents()")
+				break
+			}
+
+			// Point "raw" to the event in the buffer
+			raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+			buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+			name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+			fullname := filepath.Join(watch.path, name)
+
+			var mask uint64
+			switch raw.Action {
+			case syscall.FILE_ACTION_REMOVED:
+				mask = sysFSDELETESELF
+			case syscall.FILE_ACTION_MODIFIED:
+				mask = sysFSMODIFY
+			case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+				watch.rename = name
+			case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+				if watch.names[watch.rename] != 0 {
+					watch.names[name] |= watch.names[watch.rename]
+					delete(watch.names, watch.rename)
+					mask = sysFSMOVESELF
+				}
+			}
+
+			sendNameEvent := func() {
+				if w.sendEvent(fullname, watch.names[name]&mask) {
+					if watch.names[name]&sysFSONESHOT != 0 {
+						delete(watch.names, name)
+					}
+				}
+			}
+			if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+				sendNameEvent()
+			}
+			if raw.Action == syscall.FILE_ACTION_REMOVED {
+				w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+				delete(watch.names, name)
+			}
+			if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+				if watch.mask&sysFSONESHOT != 0 {
+					watch.mask = 0
+				}
+			}
+			if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+				fullname = filepath.Join(watch.path, watch.rename)
+				sendNameEvent()
+			}
+
+			// Move to the next event in the buffer
+			if raw.NextEntryOffset == 0 {
+				break
+			}
+			offset += raw.NextEntryOffset
+
+			// Error!
+			if offset >= n {
+				w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+				break
+			}
+		}
+
+		if err := w.startRead(watch); err != nil {
+			w.Errors <- err
+		}
+	}
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+	if mask == 0 {
+		return false
+	}
+	event := newEvent(name, uint32(mask))
+	select {
+	case ch := <-w.quit:
+		w.quit <- ch
+	case w.Events <- event:
+	}
+	return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+	var m uint32
+	if mask&sysFSACCESS != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+	}
+	if mask&sysFSMODIFY != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+	}
+	if mask&sysFSATTRIB != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+	}
+	if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+	}
+	return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+	switch action {
+	case syscall.FILE_ACTION_ADDED:
+		return sysFSCREATE
+	case syscall.FILE_ACTION_REMOVED:
+		return sysFSDELETE
+	case syscall.FILE_ACTION_MODIFIED:
+		return sysFSMODIFY
+	case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+		return sysFSMOVEDFROM
+	case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+		return sysFSMOVEDTO
+	}
+	return 0
+}
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 000000000..c33dcc7c9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 000000000..bed9ebbe1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,729 @@
+package hcl
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/parser"
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+	// nodeType holds a reference to the type of ast.Node
+	nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+	root, err := parse(bs)
+	if err != nil {
+		return err
+	}
+
+	return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+	obj, err := Parse(in)
+	if err != nil {
+		return err
+	}
+
+	return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+	val := reflect.ValueOf(out)
+	if val.Kind() != reflect.Ptr {
+		return errors.New("result must be a pointer")
+	}
+
+	// If we have the file, we really decode the root node
+	if f, ok := n.(*ast.File); ok {
+		n = f.Node
+	}
+
+	var d decoder
+	return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+	stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+	k := result
+
+	// If we have an interface with a valid value, we use that
+	// for the check.
+	if result.Kind() == reflect.Interface {
+		elem := result.Elem()
+		if elem.IsValid() {
+			k = elem
+		}
+	}
+
+	// Push current onto stack unless it is an interface.
+	if k.Kind() != reflect.Interface {
+		d.stack = append(d.stack, k.Kind())
+
+		// Schedule a pop
+		defer func() {
+			d.stack = d.stack[:len(d.stack)-1]
+		}()
+	}
+
+	switch k.Kind() {
+	case reflect.Bool:
+		return d.decodeBool(name, node, result)
+	case reflect.Float32, reflect.Float64:
+		return d.decodeFloat(name, node, result)
+	case reflect.Int, reflect.Int32, reflect.Int64:
+		return d.decodeInt(name, node, result)
+	case reflect.Interface:
+		// When we see an interface, we make our own thing
+		return d.decodeInterface(name, node, result)
+	case reflect.Map:
+		return d.decodeMap(name, node, result)
+	case reflect.Ptr:
+		return d.decodePtr(name, node, result)
+	case reflect.Slice:
+		return d.decodeSlice(name, node, result)
+	case reflect.String:
+		return d.decodeString(name, node, result)
+	case reflect.Struct:
+		return d.decodeStruct(name, node, result)
+	default:
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+		}
+	}
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		if n.Token.Type == token.BOOL {
+			v, err := strconv.ParseBool(n.Token.Text)
+			if err != nil {
+				return err
+			}
+
+			result.Set(reflect.ValueOf(v))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
+			v, err := strconv.ParseFloat(n.Token.Text, 64)
+			if err != nil {
+				return err
+			}
+
+			result.Set(reflect.ValueOf(v).Convert(result.Type()))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.NUMBER:
+			v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+			if err != nil {
+				return err
+			}
+
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
+			return nil
+		case token.STRING:
+			v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+			if err != nil {
+				return err
+			}
+
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+	// When we see an ast.Node, we retain the value to enable deferred decoding.
+	// Very useful in situations where we want to preserve ast.Node information
+	// like Pos
+	if result.Type() == nodeType && result.CanSet() {
+		result.Set(reflect.ValueOf(node))
+		return nil
+	}
+
+	var set reflect.Value
+	redecode := true
+
+	// For testing types, ObjectType should just be treated as a list. We
+	// set this to a temporary var because we want to pass in the real node.
+	testNode := node
+	if ot, ok := node.(*ast.ObjectType); ok {
+		testNode = ot.List
+	}
+
+	switch n := testNode.(type) {
+	case *ast.ObjectList:
+		// If we're at the root or we're directly within a slice, then we
+		// decode objects into map[string]interface{}, otherwise we decode
+		// them into lists.
+		if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+			var temp map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeMap(
+				reflect.MapOf(
+					reflect.TypeOf(""),
+					tempVal.Type().Elem()))
+
+			set = result
+		} else {
+			var temp []map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeSlice(
+				reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+			set = result
+		}
+	case *ast.ObjectType:
+		// If we're at the root or we're directly within a slice, then we
+		// decode objects into map[string]interface{}, otherwise we decode
+		// them into lists.
+		if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+			var temp map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeMap(
+				reflect.MapOf(
+					reflect.TypeOf(""),
+					tempVal.Type().Elem()))
+
+			set = result
+		} else {
+			var temp []map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeSlice(
+				reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+			set = result
+		}
+	case *ast.ListType:
+		var temp []interface{}
+		tempVal := reflect.ValueOf(temp)
+		result := reflect.MakeSlice(
+			reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+		set = result
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.BOOL:
+			var result bool
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.FLOAT:
+			var result float64
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.NUMBER:
+			var result int
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.STRING, token.HEREDOC:
+			set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+		default:
+			return &parser.PosError{
+				Pos: node.Pos(),
+				Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+			}
+		}
+	default:
+		return fmt.Errorf(
+			"%s: cannot decode into interface: %T",
+			name, node)
+	}
+
+	// Set the result to what its supposed to be, then reset
+	// result so we don't reflect into this method anymore.
+	result.Set(set)
+
+	if redecode {
+		// Revisit the node so that we can use the newly instantiated
+		// thing and populate it.
+		if err := d.decode(name, node, result); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+	if item, ok := node.(*ast.ObjectItem); ok {
+		node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+	}
+
+	if ot, ok := node.(*ast.ObjectType); ok {
+		node = ot.List
+	}
+
+	n, ok := node.(*ast.ObjectList)
+	if !ok {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+		}
+	}
+
+	// If we have an interface, then we can address the interface,
+	// but not the slice itself, so get the element but set the interface
+	set := result
+	if result.Kind() == reflect.Interface {
+		result = result.Elem()
+	}
+
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	resultKeyType := resultType.Key()
+	if resultKeyType.Kind() != reflect.String {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: map must have string keys", name),
+		}
+	}
+
+	// Make a map if it is nil
+	resultMap := result
+	if result.IsNil() {
+		resultMap = reflect.MakeMap(
+			reflect.MapOf(resultKeyType, resultElemType))
+	}
+
+	// Go through each element and decode it.
+	done := make(map[string]struct{})
+	for _, item := range n.Items {
+		if item.Val == nil {
+			continue
+		}
+
+		// github.com/hashicorp/terraform/issue/5740
+		if len(item.Keys) == 0 {
+			return &parser.PosError{
+				Pos: node.Pos(),
+				Err: fmt.Errorf("%s: map must have string keys", name),
+			}
+		}
+
+		// Get the key we're dealing with, which is the first item
+		keyStr := item.Keys[0].Token.Value().(string)
+
+		// If we've already processed this key, then ignore it
+		if _, ok := done[keyStr]; ok {
+			continue
+		}
+
+		// Determine the value. If we have more than one key, then we
+		// get the objectlist of only these keys.
+		itemVal := item.Val
+		if len(item.Keys) > 1 {
+			itemVal = n.Filter(keyStr)
+			done[keyStr] = struct{}{}
+		}
+
+		// Make the field name
+		fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+		// Get the key/value as reflection values
+		key := reflect.ValueOf(keyStr)
+		val := reflect.Indirect(reflect.New(resultElemType))
+
+		// If we have a pre-existing value in the map, use that
+		oldVal := resultMap.MapIndex(key)
+		if oldVal.IsValid() {
+			val.Set(oldVal)
+		}
+
+		// Decode!
+		if err := d.decode(fieldName, itemVal, val); err != nil {
+			return err
+		}
+
+		// Set the value on the map
+		resultMap.SetMapIndex(key, val)
+	}
+
+	// Set the final map if we can
+	set.Set(resultMap)
+	return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	val := reflect.New(resultElemType)
+	if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+		return err
+	}
+
+	result.Set(val)
+	return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+	// If we have an interface, then we can address the interface,
+	// but not the slice itself, so get the element but set the interface
+	set := result
+	if result.Kind() == reflect.Interface {
+		result = result.Elem()
+	}
+	// Create the slice if it isn't nil
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	if result.IsNil() {
+		resultSliceType := reflect.SliceOf(resultElemType)
+		result = reflect.MakeSlice(
+			resultSliceType, 0, 0)
+	}
+
+	// Figure out the items we'll be copying into the slice
+	var items []ast.Node
+	switch n := node.(type) {
+	case *ast.ObjectList:
+		items = make([]ast.Node, len(n.Items))
+		for i, item := range n.Items {
+			items[i] = item
+		}
+	case *ast.ObjectType:
+		items = []ast.Node{n}
+	case *ast.ListType:
+		items = n.List
+	default:
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("unknown slice type: %T", node),
+		}
+	}
+
+	for i, item := range items {
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+		// Decode
+		val := reflect.Indirect(reflect.New(resultElemType))
+
+		// if item is an object that was decoded from ambiguous JSON and
+		// flattened, make sure it's expanded if it needs to decode into a
+		// defined structure.
+		item := expandObject(item, val)
+
+		if err := d.decode(fieldName, item, val); err != nil {
+			return err
+		}
+
+		// Append it onto the slice
+		result = reflect.Append(result, val)
+	}
+
+	set.Set(result)
+	return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+	item, ok := node.(*ast.ObjectItem)
+	if !ok {
+		return node
+	}
+
+	elemType := result.Type()
+
+	// our target type must be a struct
+	switch elemType.Kind() {
+	case reflect.Ptr:
+		switch elemType.Elem().Kind() {
+		case reflect.Struct:
+			//OK
+		default:
+			return node
+		}
+	case reflect.Struct:
+		//OK
+	default:
+		return node
+	}
+
+	// A list value will have a key and field name. If it had more fields,
+	// it wouldn't have been flattened.
+	if len(item.Keys) != 2 {
+		return node
+	}
+
+	keyToken := item.Keys[0].Token
+	item.Keys = item.Keys[1:]
+
+	// we need to un-flatten the ast enough to decode
+	newNode := &ast.ObjectItem{
+		Keys: []*ast.ObjectKey{
+			&ast.ObjectKey{
+				Token: keyToken,
+			},
+		},
+		Val: &ast.ObjectType{
+			List: &ast.ObjectList{
+				Items: []*ast.ObjectItem{item},
+			},
+		},
+	}
+
+	return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.NUMBER:
+			result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+			return nil
+		case token.STRING, token.HEREDOC:
+			result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+	}
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+	var item *ast.ObjectItem
+	if it, ok := node.(*ast.ObjectItem); ok {
+		item = it
+		node = it.Val
+	}
+
+	if ot, ok := node.(*ast.ObjectType); ok {
+		node = ot.List
+	}
+
+	// Handle the special case where the object itself is a literal. Previously
+	// the yacc parser would always ensure top-level elements were arrays. The new
+	// parser does not make the same guarantees, thus we need to convert any
+	// top-level literal elements into a list.
+	if _, ok := node.(*ast.LiteralType); ok && item != nil {
+		node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+	}
+
+	list, ok := node.(*ast.ObjectList)
+	if !ok {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+		}
+	}
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = result
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	type field struct {
+		field reflect.StructField
+		val   reflect.Value
+	}
+	fields := []field{}
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+			// Ignore fields with tag name "-"
+			if tagParts[0] == "-" {
+				continue
+			}
+
+			if fieldType.Anonymous {
+				fieldKind := fieldType.Type.Kind()
+				if fieldKind != reflect.Struct {
+					return &parser.PosError{
+						Pos: node.Pos(),
+						Err: fmt.Errorf("%s: unsupported type to struct: %s",
+							fieldType.Name, fieldKind),
+					}
+				}
+
+				// We have an embedded field. We "squash" the fields down
+				// if specified in the tag.
+				squash := false
+				for _, tag := range tagParts[1:] {
+					if tag == "squash" {
+						squash = true
+						break
+					}
+				}
+
+				if squash {
+					structs = append(
+						structs, result.FieldByName(fieldType.Name))
+					continue
+				}
+			}
+
+			// Normal struct field, store it away
+			fields = append(fields, field{fieldType, structVal.Field(i)})
+		}
+	}
+
+	usedKeys := make(map[string]struct{})
+	decodedFields := make([]string, 0, len(fields))
+	decodedFieldsVal := make([]reflect.Value, 0)
+	unusedKeysVal := make([]reflect.Value, 0)
+	for _, f := range fields {
+		field, fieldValue := f.field, f.val
+		if !fieldValue.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !fieldValue.CanSet() {
+			continue
+		}
+
+		fieldName := field.Name
+
+		tagValue := field.Tag.Get(tagName)
+		tagParts := strings.SplitN(tagValue, ",", 2)
+		if len(tagParts) >= 2 {
+			switch tagParts[1] {
+			case "decodedFields":
+				decodedFieldsVal = append(decodedFieldsVal, fieldValue)
+				continue
+			case "key":
+				if item == nil {
+					return &parser.PosError{
+						Pos: node.Pos(),
+						Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+							name, fieldName),
+					}
+				}
+
+				fieldValue.SetString(item.Keys[0].Token.Value().(string))
+				continue
+			case "unusedKeys":
+				unusedKeysVal = append(unusedKeysVal, fieldValue)
+				continue
+			}
+		}
+
+		if tagParts[0] != "" {
+			fieldName = tagParts[0]
+		}
+
+		// Determine the element we'll use to decode. If it is a single
+		// match (only object with the field), then we decode it exactly.
+		// If it is a prefix match, then we decode the matches.
+		filter := list.Filter(fieldName)
+
+		prefixMatches := filter.Children()
+		matches := filter.Elem()
+		if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+			continue
+		}
+
+		// Track the used key
+		usedKeys[fieldName] = struct{}{}
+
+		// Create the field name and decode. We range over the elements
+		// because we actually want the value.
+		fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		if len(prefixMatches.Items) > 0 {
+			if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
+				return err
+			}
+		}
+		for _, match := range matches.Items {
+			var decodeNode ast.Node = match.Val
+			if ot, ok := decodeNode.(*ast.ObjectType); ok {
+				decodeNode = &ast.ObjectList{Items: ot.List.Items}
+			}
+
+			if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
+				return err
+			}
+		}
+
+		decodedFields = append(decodedFields, field.Name)
+	}
+
+	if len(decodedFieldsVal) > 0 {
+		// Sort it so that it is deterministic
+		sort.Strings(decodedFields)
+
+		for _, v := range decodedFieldsVal {
+			v.Set(reflect.ValueOf(decodedFields))
+		}
+	}
+
+	return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+	var nodeContainer struct {
+		Node ast.Node
+	}
+	value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+	return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 000000000..575a20b50
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 000000000..6e5ef654b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+	node()
+	Pos() token.Pos
+}
+
+func (File) node()         {}
+func (ObjectList) node()   {}
+func (ObjectKey) node()    {}
+func (ObjectItem) node()   {}
+func (Comment) node()      {}
+func (CommentGroup) node() {}
+func (ObjectType) node()   {}
+func (LiteralType) node()  {}
+func (ListType) node()     {}
+
+// File represents a single HCL file
+type File struct {
+	Node     Node            // usually a *ObjectList
+	Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+	return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+	Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+	o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		// If there aren't enough keys, then ignore this
+		if len(item.Keys) < len(keys) {
+			continue
+		}
+
+		match := true
+		for i, key := range item.Keys[:len(keys)] {
+			key := key.Token.Value().(string)
+			if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+				match = false
+				break
+			}
+		}
+		if !match {
+			continue
+		}
+
+		// Strip off the prefix from the children
+		newItem := *item
+		newItem.Keys = newItem.Keys[len(keys):]
+		result.Add(&newItem)
+	}
+
+	return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		if len(item.Keys) > 0 {
+			result.Add(item)
+		}
+	}
+
+	return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		if len(item.Keys) == 0 {
+			result.Add(item)
+		}
+	}
+
+	return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+	// always returns the uninitiliazed position
+	return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+	// keys is only one length long if it's of type assignment. If it's a
+	// nested object it can be larger than one. In that case "assign" is
+	// invalid as there is no assignments for a nested object.
+	Keys []*ObjectKey
+
+	// assign contains the position of "=", if any
+	Assign token.Pos
+
+	// val is the item itself. It can be an object,list, number, bool or a
+	// string. If key length is larger than one, val can be only of type
+	// Object.
+	Val Node
+
+	LeadComment *CommentGroup // associated lead comment
+	LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+	// I'm not entirely sure what causes this, but removing this causes
+	// a test failure. We should investigate at some point.
+	if len(o.Keys) == 0 {
+		return token.Pos{}
+	}
+
+	return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+	Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+	return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+	Token token.Token
+
+	// comment types, only used when in a list
+	LeadComment *CommentGroup
+	LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+	return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+	Lbrack token.Pos // position of "["
+	Rbrack token.Pos // position of "]"
+	List   []Node    // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+	return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+	l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+	Lbrace token.Pos   // position of "{"
+	Rbrace token.Pos   // position of "}"
+	List   *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+	return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+	Start token.Pos // position of / or #
+	Text  string
+}
+
+func (c *Comment) Pos() token.Pos {
+	return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+	List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+	return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string  { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 000000000..ba07ad42b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+	rewritten, ok := fn(node)
+	if !ok {
+		return rewritten
+	}
+
+	switch n := node.(type) {
+	case *File:
+		n.Node = Walk(n.Node, fn)
+	case *ObjectList:
+		for i, item := range n.Items {
+			n.Items[i] = Walk(item, fn).(*ObjectItem)
+		}
+	case *ObjectKey:
+		// nothing to do
+	case *ObjectItem:
+		for i, k := range n.Keys {
+			n.Keys[i] = Walk(k, fn).(*ObjectKey)
+		}
+
+		if n.Val != nil {
+			n.Val = Walk(n.Val, fn)
+		}
+	case *LiteralType:
+		// nothing to do
+	case *ListType:
+		for i, l := range n.List {
+			n.List[i] = Walk(l, fn)
+		}
+	case *ObjectType:
+		n.List = Walk(n.List, fn).(*ObjectList)
+	default:
+		// should we panic here?
+		fmt.Printf("unknown type: %T\n", n)
+	}
+
+	fn(nil)
+	return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 000000000..5c99381df
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+	Pos token.Pos
+	Err error
+}
+
+func (e *PosError) Error() string {
+	return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 000000000..64c83bcfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,532 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/scanner"
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+	sc *scanner.Scanner
+
+	// Last read token
+	tok       token.Token
+	commaPrev token.Token
+
+	comments    []*ast.CommentGroup
+	leadComment *ast.CommentGroup // last lead comment
+	lineComment *ast.CommentGroup // last line comment
+
+	enableTrace bool
+	indent      int
+	n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+	return &Parser{
+		sc: scanner.New(src),
+	}
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+	// normalize all line endings
+	// since the scanner and output only work with "\n" line endings, we may
+	// end up with dangling "\r" characters in the parsed data.
+	src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+	p := newParser(src)
+	return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+	f := &ast.File{}
+	var err, scerr error
+	p.sc.Error = func(pos token.Pos, msg string) {
+		scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+	}
+
+	f.Node, err = p.objectList(false)
+	if scerr != nil {
+		return nil, scerr
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	f.Comments = p.comments
+	return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+	defer un(trace(p, "ParseObjectList"))
+	node := &ast.ObjectList{}
+
+	for {
+		if obj {
+			tok := p.scan()
+			p.unscan()
+			if tok.Type == token.RBRACE {
+				break
+			}
+		}
+
+		n, err := p.objectItem()
+		if err == errEofToken {
+			break // we are finished
+		}
+
+		// we don't return a nil node, because might want to use already
+		// collected items.
+		if err != nil {
+			return node, err
+		}
+
+		node.Add(n)
+
+		// object lists can be optionally comma-delimited e.g. when a list of maps
+		// is being expressed, so a comma is allowed here - it's simply consumed
+		tok := p.scan()
+		if tok.Type != token.COMMA {
+			p.unscan()
+		}
+	}
+	return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+	endline = p.tok.Pos.Line
+
+	// count the endline if it's multiline comment, ie starting with /*
+	if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+		// don't use range here - no need to decode Unicode code points
+		for i := 0; i < len(p.tok.Text); i++ {
+			if p.tok.Text[i] == '\n' {
+				endline++
+			}
+		}
+	}
+
+	comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+	p.tok = p.sc.Scan()
+	return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+	var list []*ast.Comment
+	endline = p.tok.Pos.Line
+
+	for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+		var comment *ast.Comment
+		comment, endline = p.consumeComment()
+		list = append(list, comment)
+	}
+
+	// add comment group to the comments list
+	comments = &ast.CommentGroup{List: list}
+	p.comments = append(p.comments, comments)
+
+	return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+	defer un(trace(p, "ParseObjectItem"))
+
+	keys, err := p.objectKey()
+	if len(keys) > 0 && err == errEofToken {
+		// We ignore eof token here since it is an error if we didn't
+		// receive a value (but we did receive a key) for the item.
+		err = nil
+	}
+	if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+		// This is a strange boolean statement, but what it means is:
+		// We have keys with no value, and we're likely in an object
+		// (since RBrace ends an object). For this, we set err to nil so
+		// we continue and get the error below of having the wrong value
+		// type.
+		err = nil
+
+		// Reset the token type so we don't think it completed fine. See
+		// objectType which uses p.tok.Type to check if we're done with
+		// the object.
+		p.tok.Type = token.EOF
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	o := &ast.ObjectItem{
+		Keys: keys,
+	}
+
+	if p.leadComment != nil {
+		o.LeadComment = p.leadComment
+		p.leadComment = nil
+	}
+
+	switch p.tok.Type {
+	case token.ASSIGN:
+		o.Assign = p.tok.Pos
+		o.Val, err = p.object()
+		if err != nil {
+			return nil, err
+		}
+	case token.LBRACE:
+		o.Val, err = p.objectType()
+		if err != nil {
+			return nil, err
+		}
+	default:
+		keyStr := make([]string, 0, len(keys))
+		for _, k := range keys {
+			keyStr = append(keyStr, k.Token.Text)
+		}
+
+		return nil, &PosError{
+			Pos: p.tok.Pos,
+			Err: fmt.Errorf(
+				"key '%s' expected start of object ('{') or assignment ('=')",
+				strings.Join(keyStr, " ")),
+		}
+	}
+
+	// key=#comment
+	// val
+	if p.lineComment != nil {
+		o.LineComment, p.lineComment = p.lineComment, nil
+	}
+
+	// do a look-ahead for line comment
+	p.scan()
+	if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+		o.LineComment = p.lineComment
+		p.lineComment = nil
+	}
+	p.unscan()
+	return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+	keyCount := 0
+	keys := make([]*ast.ObjectKey, 0)
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.EOF:
+			// It is very important to also return the keys here as well as
+			// the error. This is because we need to be able to tell if we
+			// did parse keys prior to finding the EOF, or if we just found
+			// a bare EOF.
+			return keys, errEofToken
+		case token.ASSIGN:
+			// assignment or object only, but not nested objects. this is not
+			// allowed: `foo bar = {}`
+			if keyCount > 1 {
+				return nil, &PosError{
+					Pos: p.tok.Pos,
+					Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+				}
+			}
+
+			if keyCount == 0 {
+				return nil, &PosError{
+					Pos: p.tok.Pos,
+					Err: errors.New("no object keys found!"),
+				}
+			}
+
+			return keys, nil
+		case token.LBRACE:
+			var err error
+
+			// If we have no keys, then it is a syntax error. i.e. {{}} is not
+			// allowed.
+			if len(keys) == 0 {
+				err = &PosError{
+					Pos: p.tok.Pos,
+					Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+				}
+			}
+
+			// object
+			return keys, err
+		case token.IDENT, token.STRING:
+			keyCount++
+			keys = append(keys, &ast.ObjectKey{Token: p.tok})
+		case token.ILLEGAL:
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("illegal character"),
+			}
+		default:
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+			}
+		}
+	}
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+	defer un(trace(p, "ParseType"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+		return p.literalType()
+	case token.LBRACE:
+		return p.objectType()
+	case token.LBRACK:
+		return p.listType()
+	case token.COMMENT:
+		// implement comment
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, &PosError{
+		Pos: tok.Pos,
+		Err: fmt.Errorf("Unknown token: %+v", tok),
+	}
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseObjectType"))
+
+	// we assume that the currently scanned token is a LBRACE
+	o := &ast.ObjectType{
+		Lbrace: p.tok.Pos,
+	}
+
+	l, err := p.objectList(true)
+
+	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+	// not a RBRACE, it's an syntax error and we just return it.
+	if err != nil && p.tok.Type != token.RBRACE {
+		return nil, err
+	}
+
+	// No error, scan and expect the ending to be a brace
+	if tok := p.scan(); tok.Type != token.RBRACE {
+		return nil, &PosError{
+			Pos: tok.Pos,
+			Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+		}
+	}
+
+	o.List = l
+	o.Rbrace = p.tok.Pos // advanced via parseObjectList
+	return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+	defer un(trace(p, "ParseListType"))
+
+	// we assume that the currently scanned token is a LBRACK
+	l := &ast.ListType{
+		Lbrack: p.tok.Pos,
+	}
+
+	needComma := false
+	for {
+		tok := p.scan()
+		if needComma {
+			switch tok.Type {
+			case token.COMMA, token.RBRACK:
+			default:
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error parsing list, expected comma or list end, got: %s",
+						tok.Type),
+				}
+			}
+		}
+		switch tok.Type {
+		case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+			node, err := p.literalType()
+			if err != nil {
+				return nil, err
+			}
+
+			// If there is a lead comment, apply it
+			if p.leadComment != nil {
+				node.LeadComment = p.leadComment
+				p.leadComment = nil
+			}
+
+			l.Add(node)
+			needComma = true
+		case token.COMMA:
+			// get next list item or we are at the end
+			// do a look-ahead for line comment
+			p.scan()
+			if p.lineComment != nil && len(l.List) > 0 {
+				lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+				if ok {
+					lit.LineComment = p.lineComment
+					l.List[len(l.List)-1] = lit
+					p.lineComment = nil
+				}
+			}
+			p.unscan()
+
+			needComma = false
+			continue
+		case token.LBRACE:
+			// Looks like a nested object, so parse it out
+			node, err := p.objectType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse object within list: %s", err),
+				}
+			}
+			l.Add(node)
+			needComma = true
+		case token.LBRACK:
+			node, err := p.listType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse list within list: %s", err),
+				}
+			}
+			l.Add(node)
+		case token.RBRACK:
+			// finished
+			l.Rbrack = p.tok.Pos
+			return l, nil
+		default:
+			return nil, &PosError{
+				Pos: tok.Pos,
+				Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+			}
+		}
+	}
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+	defer un(trace(p, "ParseLiteral"))
+
+	return &ast.LiteralType{
+		Token: p.tok,
+	}, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+	// If we have a token on the buffer, then return it.
+	if p.n != 0 {
+		p.n = 0
+		return p.tok
+	}
+
+	// Otherwise read the next token from the scanner and Save it to the buffer
+	// in case we unscan later.
+	prev := p.tok
+	p.tok = p.sc.Scan()
+
+	if p.tok.Type == token.COMMENT {
+		var comment *ast.CommentGroup
+		var endline int
+
+		// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+		// p.tok.Pos.Line, prev.Pos.Line, endline)
+		if p.tok.Pos.Line == prev.Pos.Line {
+			// The comment is on same line as the previous token; it
+			// cannot be a lead comment but may be a line comment.
+			comment, endline = p.consumeCommentGroup(0)
+			if p.tok.Pos.Line != endline {
+				// The next token is on a different line, thus
+				// the last comment group is a line comment.
+				p.lineComment = comment
+			}
+		}
+
+		// consume successor comments, if any
+		endline = -1
+		for p.tok.Type == token.COMMENT {
+			comment, endline = p.consumeCommentGroup(1)
+		}
+
+		if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+			switch p.tok.Type {
+			case token.RBRACE, token.RBRACK:
+				// Do not count for these cases
+			default:
+				// The next token is following on the line immediately after the
+				// comment group, thus the last comment group is a lead comment.
+				p.leadComment = comment
+			}
+		}
+
+	}
+
+	return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+	p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+	p.indent--
+	p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
new file mode 100644
index 000000000..7c038d12a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
@@ -0,0 +1,789 @@
+package printer
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+const (
+	blank    = byte(' ')
+	newline  = byte('\n')
+	tab      = byte('\t')
+	infinity = 1 << 30 // offset or line
+)
+
+var (
+	unindent = []byte("\uE123") // in the private use space
+)
+
+type printer struct {
+	cfg  Config
+	prev token.Pos
+
+	comments           []*ast.CommentGroup // may be nil, contains all comments
+	standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
+
+	enableTrace bool
+	indentTrace int
+}
+
+type ByPosition []*ast.CommentGroup
+
+func (b ByPosition) Len() int           { return len(b) }
+func (b ByPosition) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
+func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
+
+// collectComments comments all standalone comments which are not lead or line
+// comment
+func (p *printer) collectComments(node ast.Node) {
+	// first collect all comments. This is already stored in
+	// ast.File.(comments)
+	ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+		switch t := nn.(type) {
+		case *ast.File:
+			p.comments = t.Comments
+			return nn, false
+		}
+		return nn, true
+	})
+
+	standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
+	for _, c := range p.comments {
+		standaloneComments[c.Pos()] = c
+	}
+
+	// next remove all lead and line comments from the overall comment map.
+	// This will give us comments which are standalone, comments which are not
+	// assigned to any kind of node.
+	ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+		switch t := nn.(type) {
+		case *ast.LiteralType:
+			if t.LeadComment != nil {
+				for _, comment := range t.LeadComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+
+			if t.LineComment != nil {
+				for _, comment := range t.LineComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+		case *ast.ObjectItem:
+			if t.LeadComment != nil {
+				for _, comment := range t.LeadComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+
+			if t.LineComment != nil {
+				for _, comment := range t.LineComment.List {
+					if _, ok := standaloneComments[comment.Pos()]; ok {
+						delete(standaloneComments, comment.Pos())
+					}
+				}
+			}
+		}
+
+		return nn, true
+	})
+
+	for _, c := range standaloneComments {
+		p.standaloneComments = append(p.standaloneComments, c)
+	}
+
+	sort.Sort(ByPosition(p.standaloneComments))
+}
+
+// output prints creates b printable HCL output and returns it.
+func (p *printer) output(n interface{}) []byte {
+	var buf bytes.Buffer
+
+	switch t := n.(type) {
+	case *ast.File:
+		// File doesn't trace so we add the tracing here
+		defer un(trace(p, "File"))
+		return p.output(t.Node)
+	case *ast.ObjectList:
+		defer un(trace(p, "ObjectList"))
+
+		var index int
+		for {
+			// Determine the location of the next actual non-comment
+			// item. If we're at the end, the next item is at "infinity"
+			var nextItem token.Pos
+			if index != len(t.Items) {
+				nextItem = t.Items[index].Pos()
+			} else {
+				nextItem = token.Pos{Offset: infinity, Line: infinity}
+			}
+
+			// Go through the standalone comments in the file and print out
+			// the comments that we should be for this object item.
+			for _, c := range p.standaloneComments {
+				// Go through all the comments in the group. The group
+				// should be printed together, not separated by double newlines.
+				printed := false
+				newlinePrinted := false
+				for _, comment := range c.List {
+					// We only care about comments after the previous item
+					// we've printed so that comments are printed in the
+					// correct locations (between two objects for example).
+					// And before the next item.
+					if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+						// if we hit the end add newlines so we can print the comment
+						// we don't do this if prev is invalid which means the
+						// beginning of the file since the first comment should
+						// be at the first line.
+						if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
+							buf.Write([]byte{newline, newline})
+							newlinePrinted = true
+						}
+
+						// Write the actual comment.
+						buf.WriteString(comment.Text)
+						buf.WriteByte(newline)
+
+						// Set printed to true to note that we printed something
+						printed = true
+					}
+				}
+
+				// If we're not at the last item, write a new line so
+				// that there is a newline separating this comment from
+				// the next object.
+				if printed && index != len(t.Items) {
+					buf.WriteByte(newline)
+				}
+			}
+
+			if index == len(t.Items) {
+				break
+			}
+
+			buf.Write(p.output(t.Items[index]))
+			if index != len(t.Items)-1 {
+				// Always write a newline to separate us from the next item
+				buf.WriteByte(newline)
+
+				// Need to determine if we're going to separate the next item
+				// with a blank line. The logic here is simple, though there
+				// are a few conditions:
+				//
+				//   1. The next object is more than one line away anyways,
+				//      so we need an empty line.
+				//
+				//   2. The next object is not a "single line" object, so
+				//      we need an empty line.
+				//
+				//   3. This current object is not a single line object,
+				//      so we need an empty line.
+				current := t.Items[index]
+				next := t.Items[index+1]
+				if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+					!p.isSingleLineObject(next) ||
+					!p.isSingleLineObject(current) {
+					buf.WriteByte(newline)
+				}
+			}
+			index++
+		}
+	case *ast.ObjectKey:
+		buf.WriteString(t.Token.Text)
+	case *ast.ObjectItem:
+		p.prev = t.Pos()
+		buf.Write(p.objectItem(t))
+	case *ast.LiteralType:
+		buf.Write(p.literalType(t))
+	case *ast.ListType:
+		buf.Write(p.list(t))
+	case *ast.ObjectType:
+		buf.Write(p.objectType(t))
+	default:
+		fmt.Printf(" unknown type: %T\n", n)
+	}
+
+	return buf.Bytes()
+}
+
+func (p *printer) literalType(lit *ast.LiteralType) []byte {
+	result := []byte(lit.Token.Text)
+	switch lit.Token.Type {
+	case token.HEREDOC:
+		// Clear the trailing newline from heredocs
+		if result[len(result)-1] == '\n' {
+			result = result[:len(result)-1]
+		}
+
+		// Poison lines 2+ so that we don't indent them
+		result = p.heredocIndent(result)
+	case token.STRING:
+		// If this is a multiline string, poison lines 2+ so we don't
+		// indent them.
+		if bytes.IndexRune(result, '\n') >= 0 {
+			result = p.heredocIndent(result)
+		}
+	}
+
+	return result
+}
+
+// objectItem returns the printable HCL form of an object item. An object type
+// starts with one/multiple keys and has a value. The value might be of any
+// type.
+func (p *printer) objectItem(o *ast.ObjectItem) []byte {
+	defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
+	var buf bytes.Buffer
+
+	if o.LeadComment != nil {
+		for _, comment := range o.LeadComment.List {
+			buf.WriteString(comment.Text)
+			buf.WriteByte(newline)
+		}
+	}
+
+	// If key and val are on different lines, treat line comments like lead comments.
+	if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
+		for _, comment := range o.LineComment.List {
+			buf.WriteString(comment.Text)
+			buf.WriteByte(newline)
+		}
+	}
+
+	for i, k := range o.Keys {
+		buf.WriteString(k.Token.Text)
+		buf.WriteByte(blank)
+
+		// reach end of key
+		if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
+			buf.WriteString("=")
+			buf.WriteByte(blank)
+		}
+	}
+
+	buf.Write(p.output(o.Val))
+
+	if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
+		buf.WriteByte(blank)
+		for _, comment := range o.LineComment.List {
+			buf.WriteString(comment.Text)
+		}
+	}
+
+	return buf.Bytes()
+}
+
+// objectType returns the printable HCL form of an object type. An object type
+// begins with a brace and ends with a brace.
+func (p *printer) objectType(o *ast.ObjectType) []byte {
+	defer un(trace(p, "ObjectType"))
+	var buf bytes.Buffer
+	buf.WriteString("{")
+
+	var index int
+	var nextItem token.Pos
+	var commented, newlinePrinted bool
+	for {
+		// Determine the location of the next actual non-comment
+		// item. If we're at the end, the next item is the closing brace
+		if index != len(o.List.Items) {
+			nextItem = o.List.Items[index].Pos()
+		} else {
+			nextItem = o.Rbrace
+		}
+
+		// Go through the standalone comments in the file and print out
+		// the comments that we should be for this object item.
+		for _, c := range p.standaloneComments {
+			printed := false
+			var lastCommentPos token.Pos
+			for _, comment := range c.List {
+				// We only care about comments after the previous item
+				// we've printed so that comments are printed in the
+				// correct locations (between two objects for example).
+				// And before the next item.
+				if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+					// If there are standalone comments and the initial newline has not
+					// been printed yet, do it now.
+					if !newlinePrinted {
+						newlinePrinted = true
+						buf.WriteByte(newline)
+					}
+
+					// add newline if it's between other printed nodes
+					if index > 0 {
+						commented = true
+						buf.WriteByte(newline)
+					}
+
+					// Store this position
+					lastCommentPos = comment.Pos()
+
+					// output the comment itself
+					buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+					// Set printed to true to note that we printed something
+					printed = true
+
+					/*
+						if index != len(o.List.Items) {
+							buf.WriteByte(newline) // do not print on the end
+						}
+					*/
+				}
+			}
+
+			// Stuff to do if we had comments
+			if printed {
+				// Always write a newline
+				buf.WriteByte(newline)
+
+				// If there is another item in the object and our comment
+				// didn't hug it directly, then make sure there is a blank
+				// line separating them.
+				if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
+					buf.WriteByte(newline)
+				}
+			}
+		}
+
+		if index == len(o.List.Items) {
+			p.prev = o.Rbrace
+			break
+		}
+
+		// At this point we are sure that it's not a totally empty block: print
+		// the initial newline if it hasn't been printed yet by the previous
+		// block about standalone comments.
+		if !newlinePrinted {
+			buf.WriteByte(newline)
+			newlinePrinted = true
+		}
+
+		// check if we have adjacent one liner items. If yes we'll going to align
+		// the comments.
+		var aligned []*ast.ObjectItem
+		for _, item := range o.List.Items[index:] {
+			// we don't group one line lists
+			if len(o.List.Items) == 1 {
+				break
+			}
+
+			// one means a oneliner with out any lead comment
+			// two means a oneliner with lead comment
+			// anything else might be something else
+			cur := lines(string(p.objectItem(item)))
+			if cur > 2 {
+				break
+			}
+
+			curPos := item.Pos()
+
+			nextPos := token.Pos{}
+			if index != len(o.List.Items)-1 {
+				nextPos = o.List.Items[index+1].Pos()
+			}
+
+			prevPos := token.Pos{}
+			if index != 0 {
+				prevPos = o.List.Items[index-1].Pos()
+			}
+
+			// fmt.Println("DEBUG ----------------")
+			// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+			// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+			// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+			if curPos.Line+1 == nextPos.Line {
+				aligned = append(aligned, item)
+				index++
+				continue
+			}
+
+			if curPos.Line-1 == prevPos.Line {
+				aligned = append(aligned, item)
+				index++
+
+				// finish if we have a new line or comment next. This happens
+				// if the next item is not adjacent
+				if curPos.Line+1 != nextPos.Line {
+					break
+				}
+				continue
+			}
+
+			break
+		}
+
+		// put newlines if the items are between other non aligned items.
+		// newlines are also added if there is a standalone comment already, so
+		// check it too
+		if !commented && index != len(aligned) {
+			buf.WriteByte(newline)
+		}
+
+		if len(aligned) >= 1 {
+			p.prev = aligned[len(aligned)-1].Pos()
+
+			items := p.alignedItems(aligned)
+			buf.Write(p.indent(items))
+		} else {
+			p.prev = o.List.Items[index].Pos()
+
+			buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+			index++
+		}
+
+		buf.WriteByte(newline)
+	}
+
+	buf.WriteString("}")
+	return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+	var buf bytes.Buffer
+
+	// find the longest key and value length, needed for alignment
+	var longestKeyLen int // longest key length
+	var longestValLen int // longest value length
+	for _, item := range items {
+		key := len(item.Keys[0].Token.Text)
+		val := len(p.output(item.Val))
+
+		if key > longestKeyLen {
+			longestKeyLen = key
+		}
+
+		if val > longestValLen {
+			longestValLen = val
+		}
+	}
+
+	for i, item := range items {
+		if item.LeadComment != nil {
+			for _, comment := range item.LeadComment.List {
+				buf.WriteString(comment.Text)
+				buf.WriteByte(newline)
+			}
+		}
+
+		for i, k := range item.Keys {
+			keyLen := len(k.Token.Text)
+			buf.WriteString(k.Token.Text)
+			for i := 0; i < longestKeyLen-keyLen+1; i++ {
+				buf.WriteByte(blank)
+			}
+
+			// reach end of key
+			if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+				buf.WriteString("=")
+				buf.WriteByte(blank)
+			}
+		}
+
+		val := p.output(item.Val)
+		valLen := len(val)
+		buf.Write(val)
+
+		if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+			for i := 0; i < longestValLen-valLen+1; i++ {
+				buf.WriteByte(blank)
+			}
+
+			for _, comment := range item.LineComment.List {
+				buf.WriteString(comment.Text)
+			}
+		}
+
+		// do not print for the last item
+		if i != len(items)-1 {
+			buf.WriteByte(newline)
+		}
+	}
+
+	return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+	if p.isSingleLineList(l) {
+		return p.singleLineList(l)
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString("[")
+	buf.WriteByte(newline)
+
+	var longestLine int
+	for _, item := range l.List {
+		// for now we assume that the list only contains literal types
+		if lit, ok := item.(*ast.LiteralType); ok {
+			lineLen := len(lit.Token.Text)
+			if lineLen > longestLine {
+				longestLine = lineLen
+			}
+		}
+	}
+
+	haveEmptyLine := false
+	for i, item := range l.List {
+		// If we have a lead comment, then we want to write that first
+		leadComment := false
+		if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+			leadComment = true
+
+			// Ensure an empty line before every element with a
+			// lead comment (except the first item in a list).
+			if !haveEmptyLine && i != 0 {
+				buf.WriteByte(newline)
+			}
+
+			for _, comment := range lit.LeadComment.List {
+				buf.Write(p.indent([]byte(comment.Text)))
+				buf.WriteByte(newline)
+			}
+		}
+
+		// also indent each line
+		val := p.output(item)
+		curLen := len(val)
+		buf.Write(p.indent(val))
+
+		// if this item is a heredoc, then we output the comma on
+		// the next line. This is the only case this happens.
+		comma := []byte{','}
+		if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+			buf.WriteByte(newline)
+			comma = p.indent(comma)
+		}
+
+		buf.Write(comma)
+
+		if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+			// if the next item doesn't have any comments, do not align
+			buf.WriteByte(blank) // align one space
+			for i := 0; i < longestLine-curLen; i++ {
+				buf.WriteByte(blank)
+			}
+
+			for _, comment := range lit.LineComment.List {
+				buf.WriteString(comment.Text)
+			}
+		}
+
+		buf.WriteByte(newline)
+
+		// Ensure an empty line after every element with a
+		// lead comment (except the first item in a list).
+		haveEmptyLine = leadComment && i != len(l.List)-1
+		if haveEmptyLine {
+			buf.WriteByte(newline)
+		}
+	}
+
+	buf.WriteString("]")
+	return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+	for _, item := range l.List {
+		if item.Pos().Line != l.Lbrack.Line {
+			return false
+		}
+
+		lit, ok := item.(*ast.LiteralType)
+		if !ok {
+			return false
+		}
+
+		if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+			return false
+		}
+
+		if lit.LineComment != nil {
+			return false
+		}
+	}
+
+	return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+	buf := &bytes.Buffer{}
+
+	buf.WriteString("[")
+	for i, item := range l.List {
+		if i != 0 {
+			buf.WriteString(", ")
+		}
+
+		// Output the item itself
+		buf.Write(p.output(item))
+
+		// The heredoc marker needs to be at the end of line.
+		if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+			buf.WriteByte(newline)
+		}
+	}
+
+	buf.WriteString("]")
+	return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+	var prefix []byte
+	if p.cfg.SpacesWidth != 0 {
+		for i := 0; i < p.cfg.SpacesWidth; i++ {
+			prefix = append(prefix, blank)
+		}
+	} else {
+		prefix = []byte{tab}
+	}
+
+	var res []byte
+	bol := true
+	for _, c := range buf {
+		if bol && c != '\n' {
+			res = append(res, prefix...)
+		}
+
+		res = append(res, c)
+		bol = c == '\n'
+	}
+	return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+	var res []byte
+	for i := 0; i < len(buf); i++ {
+		skip := len(buf)-i <= len(unindent)
+		if !skip {
+			skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+		}
+		if skip {
+			res = append(res, buf[i])
+			continue
+		}
+
+		// We have a marker. we have to backtrace here and clean out
+		// any whitespace ahead of our tombstone up to a \n
+		for j := len(res) - 1; j >= 0; j-- {
+			if res[j] == '\n' {
+				break
+			}
+
+			res = res[:j]
+		}
+
+		// Skip the entire unindent marker
+		i += len(unindent) - 1
+	}
+
+	return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+	var res []byte
+	bol := false
+	for _, c := range buf {
+		if bol && c != '\n' {
+			res = append(res, unindent...)
+		}
+		res = append(res, c)
+		bol = c == '\n'
+	}
+	return res
+}
+
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+//   * has no lead comments (hence multi-line)
+//   * has no assignment
+//   * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+	// If there is a lead comment, can't be one line
+	if val.LeadComment != nil {
+		return false
+	}
+
+	// If there is assignment, we always break by line
+	if val.Assign.IsValid() {
+		return false
+	}
+
+	// If it isn't an object type, then its not a single line object
+	ot, ok := val.Val.(*ast.ObjectType)
+	if !ok {
+		return false
+	}
+
+	// If the object has no items, it is single line!
+	return len(ot.List.Items) == 0
+}
+
+func lines(txt string) int {
+	endline := 1
+	for i := 0; i < len(txt); i++ {
+		if txt[i] == '\n' {
+			endline++
+		}
+	}
+	return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	i := 2 * p.indentTrace
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+	p.printTrace(msg, "(")
+	p.indentTrace++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+	p.indentTrace--
+	p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
new file mode 100644
index 000000000..6617ab8e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -0,0 +1,66 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+	"bytes"
+	"io"
+	"text/tabwriter"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+	SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+	SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+	p := &printer{
+		cfg:                *c,
+		comments:           make([]*ast.CommentGroup, 0),
+		standaloneComments: make([]*ast.CommentGroup, 0),
+		// enableTrace:        true,
+	}
+
+	p.collectComments(node)
+
+	if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+		return err
+	}
+
+	// flush tabwriter, if any
+	var err error
+	if tw, _ := output.(*tabwriter.Writer); tw != nil {
+		err = tw.Flush()
+	}
+
+	return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+	return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+	node, err := parser.Parse(src)
+	if err != nil {
+		return nil, err
+	}
+
+	var buf bytes.Buffer
+	if err := DefaultConfig.Fprint(&buf, node); err != nil {
+		return nil, err
+	}
+
+	// Add trailing newline to result
+	buf.WriteString("\n")
+	return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 000000000..624a18fe3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,652 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"regexp"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+	buf *bytes.Buffer // Source buffer for advancing and scanning
+	src []byte        // Source buffer for immutable access
+
+	// Source Position
+	srcPos  token.Pos // current position
+	prevPos token.Pos // previous position, used for peek() method
+
+	lastCharLen int // length of last character in bytes
+	lastLineLen int // length of last line in characters (for correct column reporting)
+
+	tokStart int // token text start position
+	tokEnd   int // token text end  position
+
+	// Error is called for each error encountered. If no Error
+	// function is set, the error is reported to os.Stderr.
+	Error func(pos token.Pos, msg string)
+
+	// ErrorCount is incremented by one for each error encountered.
+	ErrorCount int
+
+	// tokPos is the start position of most recently scanned token; set by
+	// Scan. The Filename field is always left untouched by the Scanner.  If
+	// an error is reported (via Error) and Position is invalid, the scanner is
+	// not inside a token.
+	tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+	// even though we accept a src, we read from a io.Reader compatible type
+	// (*bytes.Buffer). So in the future we might easily change it to streaming
+	// read.
+	b := bytes.NewBuffer(src)
+	s := &Scanner{
+		buf: b,
+		src: src,
+	}
+
+	// srcPosition always starts with 1
+	s.srcPos.Line = 1
+	return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+	ch, size, err := s.buf.ReadRune()
+	if err != nil {
+		// advance for error reporting
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		return eof
+	}
+
+	// remember last position
+	s.prevPos = s.srcPos
+
+	s.srcPos.Column++
+	s.lastCharLen = size
+	s.srcPos.Offset += size
+
+	if ch == utf8.RuneError && size == 1 {
+		s.err("illegal UTF-8 encoding")
+		return ch
+	}
+
+	if ch == '\n' {
+		s.srcPos.Line++
+		s.lastLineLen = s.srcPos.Column
+		s.srcPos.Column = 0
+	}
+
+	if ch == '\x00' {
+		s.err("unexpected null character (0x00)")
+		return eof
+	}
+
+	if ch == '\uE123' {
+		s.err("unicode code point U+E123 reserved for internal use")
+		return utf8.RuneError
+	}
+
+	// debug
+	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+	return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+	if err := s.buf.UnreadRune(); err != nil {
+		panic(err) // this is user fault, we should catch it
+	}
+	s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+	peek, _, err := s.buf.ReadRune()
+	if err != nil {
+		return eof
+	}
+
+	s.buf.UnreadRune()
+	return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+	ch := s.next()
+
+	// skip white space
+	for isWhitespace(ch) {
+		ch = s.next()
+	}
+
+	var tok token.Type
+
+	// token text markings
+	s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+	// token position, initial next() is moving the offset by one(size of rune
+	// actually), though we are interested with the starting point
+	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+	if s.srcPos.Column > 0 {
+		// common case: last character was not a '\n'
+		s.tokPos.Line = s.srcPos.Line
+		s.tokPos.Column = s.srcPos.Column
+	} else {
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		s.tokPos.Line = s.srcPos.Line - 1
+		s.tokPos.Column = s.lastLineLen
+	}
+
+	switch {
+	case isLetter(ch):
+		tok = token.IDENT
+		lit := s.scanIdentifier()
+		if lit == "true" || lit == "false" {
+			tok = token.BOOL
+		}
+	case isDecimal(ch):
+		tok = s.scanNumber(ch)
+	default:
+		switch ch {
+		case eof:
+			tok = token.EOF
+		case '"':
+			tok = token.STRING
+			s.scanString()
+		case '#', '/':
+			tok = token.COMMENT
+			s.scanComment(ch)
+		case '.':
+			tok = token.PERIOD
+			ch = s.peek()
+			if isDecimal(ch) {
+				tok = token.FLOAT
+				ch = s.scanMantissa(ch)
+				ch = s.scanExponent(ch)
+			}
+		case '<':
+			tok = token.HEREDOC
+			s.scanHeredoc()
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			tok = token.RBRACE
+		case ',':
+			tok = token.COMMA
+		case '=':
+			tok = token.ASSIGN
+		case '+':
+			tok = token.ADD
+		case '-':
+			if isDecimal(s.peek()) {
+				ch := s.next()
+				tok = s.scanNumber(ch)
+			} else {
+				tok = token.SUB
+			}
+		default:
+			s.err("illegal char")
+		}
+	}
+
+	// finish token ending
+	s.tokEnd = s.srcPos.Offset
+
+	// create token literal
+	var tokenText string
+	if s.tokStart >= 0 {
+		tokenText = string(s.src[s.tokStart:s.tokEnd])
+	}
+	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+	return token.Token{
+		Type: tok,
+		Pos:  s.tokPos,
+		Text: tokenText,
+	}
+}
+
+func (s *Scanner) scanComment(ch rune) {
+	// single line comments
+	if ch == '#' || (ch == '/' && s.peek() != '*') {
+		if ch == '/' && s.peek() != '/' {
+			s.err("expected '/' for comment")
+			return
+		}
+
+		ch = s.next()
+		for ch != '\n' && ch >= 0 && ch != eof {
+			ch = s.next()
+		}
+		if ch != eof && ch >= 0 {
+			s.unread()
+		}
+		return
+	}
+
+	// be sure we get the character after /* This allows us to find comment's
+	// that are not erminated
+	if ch == '/' {
+		s.next()
+		ch = s.next() // read character after "/*"
+	}
+
+	// look for /* - style comments
+	for {
+		if ch < 0 || ch == eof {
+			s.err("comment not terminated")
+			break
+		}
+
+		ch0 := ch
+		ch = s.next()
+		if ch0 == '*' && ch == '/' {
+			break
+		}
+	}
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+	if ch == '0' {
+		// check for hexadecimal, octal or float
+		ch = s.next()
+		if ch == 'x' || ch == 'X' {
+			// hexadecimal
+			ch = s.next()
+			found := false
+			for isHexadecimal(ch) {
+				ch = s.next()
+				found = true
+			}
+
+			if !found {
+				s.err("illegal hexadecimal number")
+			}
+
+			if ch != eof {
+				s.unread()
+			}
+
+			return token.NUMBER
+		}
+
+		// now it's either something like: 0421(octal) or 0.1231(float)
+		illegalOctal := false
+		for isDecimal(ch) {
+			ch = s.next()
+			if ch == '8' || ch == '9' {
+				// this is just a possibility. For example 0159 is illegal, but
+				// 0159.23 is valid. So we mark a possible illegal octal. If
+				// the next character is not a period, we'll print the error.
+				illegalOctal = true
+			}
+		}
+
+		if ch == 'e' || ch == 'E' {
+			ch = s.scanExponent(ch)
+			return token.FLOAT
+		}
+
+		if ch == '.' {
+			ch = s.scanFraction(ch)
+
+			if ch == 'e' || ch == 'E' {
+				ch = s.next()
+				ch = s.scanExponent(ch)
+			}
+			return token.FLOAT
+		}
+
+		if illegalOctal {
+			s.err("illegal octal number")
+		}
+
+		if ch != eof {
+			s.unread()
+		}
+		return token.NUMBER
+	}
+
+	s.scanMantissa(ch)
+	ch = s.next() // seek forward
+	if ch == 'e' || ch == 'E' {
+		ch = s.scanExponent(ch)
+		return token.FLOAT
+	}
+
+	if ch == '.' {
+		ch = s.scanFraction(ch)
+		if ch == 'e' || ch == 'E' {
+			ch = s.next()
+			ch = s.scanExponent(ch)
+		}
+		return token.FLOAT
+	}
+
+	if ch != eof {
+		s.unread()
+	}
+	return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+	scanned := false
+	for isDecimal(ch) {
+		ch = s.next()
+		scanned = true
+	}
+
+	if scanned && ch != eof {
+		s.unread()
+	}
+	return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+	if ch == '.' {
+		ch = s.peek() // we peek just to see if we can move forward
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+	if ch == 'e' || ch == 'E' {
+		ch = s.next()
+		if ch == '-' || ch == '+' {
+			ch = s.next()
+		}
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+	// Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+				break
+			}
+
+			// Not an anchor match, record the start of a new line
+			lineStart = s.srcPos.Offset
+		}
+
+		if ch == eof {
+			s.err("heredoc not terminated")
+			return
+		}
+	}
+
+	return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+	braces := 0
+	for {
+		// '"' opening already consumed
+		// read character after quote
+		ch := s.next()
+
+		if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+			s.err("literal not terminated")
+			return
+		}
+
+		if ch == '"' && braces == 0 {
+			break
+		}
+
+		// If we're going into a ${} then we can ignore quotes for awhile
+		if braces == 0 && ch == '$' && s.peek() == '{' {
+			braces++
+			s.next()
+		} else if braces > 0 && ch == '{' {
+			braces++
+		}
+		if braces > 0 && ch == '}' {
+			braces--
+		}
+
+		if ch == '\\' {
+			s.scanEscape()
+		}
+	}
+
+	return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+	// http://en.cppreference.com/w/cpp/language/escape
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+		// nothing to do
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		// octal notation
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		// hexademical notation
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.err("illegal char escape")
+	}
+	return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+	start := n
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		if ch == eof {
+			// If we see an EOF, we halt any more scanning of digits
+			// immediately.
+			break
+		}
+
+		n--
+	}
+	if n > 0 {
+		s.err("illegal char escape")
+	}
+
+	if n != start && ch != eof {
+		// we scanned all digits, put the last non digit char back,
+		// only if we read anything at all
+		s.unread()
+	}
+
+	return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+	offs := s.srcPos.Offset - s.lastCharLen
+	ch := s.next()
+	for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+		ch = s.next()
+	}
+
+	if ch != eof {
+		s.unread() // we got identifier, put back latest char
+	}
+
+	return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+	pos.Offset = s.srcPos.Offset - s.lastCharLen
+	switch {
+	case s.srcPos.Column > 0:
+		// common case: last character was not a '\n'
+		pos.Line = s.srcPos.Line
+		pos.Column = s.srcPos.Column
+	case s.lastLineLen > 0:
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		pos.Line = s.srcPos.Line - 1
+		pos.Column = s.lastLineLen
+	default:
+		// at the beginning of the source
+		pos.Line = 1
+		pos.Column = 1
+	}
+	return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+	s.ErrorCount++
+	pos := s.recentPosition()
+
+	if s.Error != nil {
+		s.Error(pos, msg)
+		return
+	}
+
+	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 000000000..5f981eaa2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+	"errors"
+	"unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes.  (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+	n := len(s)
+	if n < 2 {
+		return "", ErrSyntax
+	}
+	quote := s[0]
+	if quote != s[n-1] {
+		return "", ErrSyntax
+	}
+	s = s[1 : n-1]
+
+	if quote != '"' {
+		return "", ErrSyntax
+	}
+	if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+		return "", ErrSyntax
+	}
+
+	// Is it trivial?  Avoid allocation.
+	if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+		switch quote {
+		case '"':
+			return s, nil
+		case '\'':
+			r, size := utf8.DecodeRuneInString(s)
+			if size == len(s) && (r != utf8.RuneError || size != 1) {
+				return s, nil
+			}
+		}
+	}
+
+	var runeTmp [utf8.UTFMax]byte
+	buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+	for len(s) > 0 {
+		// If we're starting a '${}' then let it through un-unquoted.
+		// Specifically: we don't unquote any characters within the `${}`
+		// section.
+		if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+			buf = append(buf, '$', '{')
+			s = s[2:]
+
+			// Continue reading until we find the closing brace, copying as-is
+			braces := 1
+			for len(s) > 0 && braces > 0 {
+				r, size := utf8.DecodeRuneInString(s)
+				if r == utf8.RuneError {
+					return "", ErrSyntax
+				}
+
+				s = s[size:]
+
+				n := utf8.EncodeRune(runeTmp[:], r)
+				buf = append(buf, runeTmp[:n]...)
+
+				switch r {
+				case '{':
+					braces++
+				case '}':
+					braces--
+				}
+			}
+			if braces != 0 {
+				return "", ErrSyntax
+			}
+			if len(s) == 0 {
+				// If there's no string left, we're done!
+				break
+			} else {
+				// If there's more left, we need to pop back up to the top of the loop
+				// in case there's another interpolation in this string.
+				continue
+			}
+		}
+
+		if s[0] == '\n' {
+			return "", ErrSyntax
+		}
+
+		c, multibyte, ss, err := unquoteChar(s, quote)
+		if err != nil {
+			return "", err
+		}
+		s = ss
+		if c < utf8.RuneSelf || !multibyte {
+			buf = append(buf, byte(c))
+		} else {
+			n := utf8.EncodeRune(runeTmp[:], c)
+			buf = append(buf, runeTmp[:n]...)
+		}
+		if quote == '\'' && len(s) != 0 {
+			// single-quoted must be single character
+			return "", ErrSyntax
+		}
+	}
+	return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] == c {
+			return true
+		}
+	}
+	return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+	c := rune(b)
+	switch {
+	case '0' <= c && c <= '9':
+		return c - '0', true
+	case 'a' <= c && c <= 'f':
+		return c - 'a' + 10, true
+	case 'A' <= c && c <= 'F':
+		return c - 'A' + 10, true
+	}
+	return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+	// easy cases
+	switch c := s[0]; {
+	case c == quote && (quote == '\'' || quote == '"'):
+		err = ErrSyntax
+		return
+	case c >= utf8.RuneSelf:
+		r, size := utf8.DecodeRuneInString(s)
+		return r, true, s[size:], nil
+	case c != '\\':
+		return rune(s[0]), false, s[1:], nil
+	}
+
+	// hard case: c is backslash
+	if len(s) <= 1 {
+		err = ErrSyntax
+		return
+	}
+	c := s[1]
+	s = s[2:]
+
+	switch c {
+	case 'a':
+		value = '\a'
+	case 'b':
+		value = '\b'
+	case 'f':
+		value = '\f'
+	case 'n':
+		value = '\n'
+	case 'r':
+		value = '\r'
+	case 't':
+		value = '\t'
+	case 'v':
+		value = '\v'
+	case 'x', 'u', 'U':
+		n := 0
+		switch c {
+		case 'x':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		var v rune
+		if len(s) < n {
+			err = ErrSyntax
+			return
+		}
+		for j := 0; j < n; j++ {
+			x, ok := unhex(s[j])
+			if !ok {
+				err = ErrSyntax
+				return
+			}
+			v = v<<4 | x
+		}
+		s = s[n:]
+		if c == 'x' {
+			// single-byte string, possibly not UTF-8
+			value = v
+			break
+		}
+		if v > utf8.MaxRune {
+			err = ErrSyntax
+			return
+		}
+		value = v
+		multibyte = true
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		v := rune(c) - '0'
+		if len(s) < 2 {
+			err = ErrSyntax
+			return
+		}
+		for j := 0; j < 2; j++ { // one digit already; two more
+			x := rune(s[j]) - '0'
+			if x < 0 || x > 7 {
+				err = ErrSyntax
+				return
+			}
+			v = (v << 3) | x
+		}
+		s = s[2:]
+		if v > 255 {
+			err = ErrSyntax
+			return
+		}
+		value = v
+	case '\\':
+		value = '\\'
+	case '\'', '"':
+		if c != quote {
+			err = ErrSyntax
+			return
+		}
+		value = rune(c)
+	default:
+		err = ErrSyntax
+		return
+	}
+	tail = s
+	return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 000000000..59c1bb72d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	line:column         valid position without file name
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (p Pos) String() string {
+	s := p.Filename
+	if p.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+	return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+	return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 000000000..e37c0664e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+	Type Type
+	Pos  Pos
+	Text string
+	JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+	// Special tokens
+	ILLEGAL Type = iota
+	EOF
+	COMMENT
+
+	identifier_beg
+	IDENT // literals
+	literal_beg
+	NUMBER  // 12345
+	FLOAT   // 123.45
+	BOOL    // true,false
+	STRING  // "abc"
+	HEREDOC // < 0 {
+			// Pop the current item
+			n := len(frontier)
+			item := frontier[n-1]
+			frontier = frontier[:n-1]
+
+			switch v := item.Val.(type) {
+			case *ast.ObjectType:
+				items, frontier = flattenObjectType(v, item, items, frontier)
+			case *ast.ListType:
+				items, frontier = flattenListType(v, item, items, frontier)
+			default:
+				items = append(items, item)
+			}
+		}
+
+		// Reverse the list since the frontier model runs things backwards
+		for i := len(items)/2 - 1; i >= 0; i-- {
+			opp := len(items) - 1 - i
+			items[i], items[opp] = items[opp], items[i]
+		}
+
+		// Done! Set the original items
+		list.Items = items
+		return n, true
+	})
+}
+
+func flattenListType(
+	ot *ast.ListType,
+	item *ast.ObjectItem,
+	items []*ast.ObjectItem,
+	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list is empty, keep the original list
+	if len(ot.List) == 0 {
+		items = append(items, item)
+		return items, frontier
+	}
+
+	// All the elements of this object must also be objects!
+	for _, subitem := range ot.List {
+		if _, ok := subitem.(*ast.ObjectType); !ok {
+			items = append(items, item)
+			return items, frontier
+		}
+	}
+
+	// Great! We have a match go through all the items and flatten
+	for _, elem := range ot.List {
+		// Add it to the frontier so that we can recurse
+		frontier = append(frontier, &ast.ObjectItem{
+			Keys:        item.Keys,
+			Assign:      item.Assign,
+			Val:         elem,
+			LeadComment: item.LeadComment,
+			LineComment: item.LineComment,
+		})
+	}
+
+	return items, frontier
+}
+
+func flattenObjectType(
+	ot *ast.ObjectType,
+	item *ast.ObjectItem,
+	items []*ast.ObjectItem,
+	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list has no items we do not have to flatten anything
+	if ot.List.Items == nil {
+		items = append(items, item)
+		return items, frontier
+	}
+
+	// All the elements of this object must also be objects!
+	for _, subitem := range ot.List.Items {
+		if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+			items = append(items, item)
+			return items, frontier
+		}
+	}
+
+	// Great! We have a match go through all the items and flatten
+	for _, subitem := range ot.List.Items {
+		// Copy the new key
+		keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+		copy(keys, item.Keys)
+		copy(keys[len(item.Keys):], subitem.Keys)
+
+		// Add it to the frontier so that we can recurse
+		frontier = append(frontier, &ast.ObjectItem{
+			Keys:        keys,
+			Assign:      item.Assign,
+			Val:         subitem.Val,
+			LeadComment: item.LeadComment,
+			LineComment: item.LineComment,
+		})
+	}
+
+	return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 000000000..125a5f072
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	hcltoken "github.com/hashicorp/hcl/hcl/token"
+	"github.com/hashicorp/hcl/json/scanner"
+	"github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+	sc *scanner.Scanner
+
+	// Last read token
+	tok       token.Token
+	commaPrev token.Token
+
+	enableTrace bool
+	indent      int
+	n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+	return &Parser{
+		sc: scanner.New(src),
+	}
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+	p := newParser(src)
+	return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+	f := &ast.File{}
+	var err, scerr error
+	p.sc.Error = func(pos token.Pos, msg string) {
+		scerr = fmt.Errorf("%s: %s", pos, msg)
+	}
+
+	// The root must be an object in JSON
+	object, err := p.object()
+	if scerr != nil {
+		return nil, scerr
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	// We make our final node an object list so it is more HCL compatible
+	f.Node = object.List
+
+	// Flatten it, which finds patterns and turns them into more HCL-like
+	// AST trees.
+	flattenObjects(f.Node)
+
+	return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+	defer un(trace(p, "ParseObjectList"))
+	node := &ast.ObjectList{}
+
+	for {
+		n, err := p.objectItem()
+		if err == errEofToken {
+			break // we are finished
+		}
+
+		// we don't return a nil node, because might want to use already
+		// collected items.
+		if err != nil {
+			return node, err
+		}
+
+		node.Add(n)
+
+		// Check for a followup comma. If it isn't a comma, then we're done
+		if tok := p.scan(); tok.Type != token.COMMA {
+			break
+		}
+	}
+
+	return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+	defer un(trace(p, "ParseObjectItem"))
+
+	keys, err := p.objectKey()
+	if err != nil {
+		return nil, err
+	}
+
+	o := &ast.ObjectItem{
+		Keys: keys,
+	}
+
+	switch p.tok.Type {
+	case token.COLON:
+		pos := p.tok.Pos
+		o.Assign = hcltoken.Pos{
+			Filename: pos.Filename,
+			Offset:   pos.Offset,
+			Line:     pos.Line,
+			Column:   pos.Column,
+		}
+
+		o.Val, err = p.objectValue()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+	keyCount := 0
+	keys := make([]*ast.ObjectKey, 0)
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.EOF:
+			return nil, errEofToken
+		case token.STRING:
+			keyCount++
+			keys = append(keys, &ast.ObjectKey{
+				Token: p.tok.HCLToken(),
+			})
+		case token.COLON:
+			// If we have a zero keycount it means that we never got
+			// an object key, i.e. `{ :`. This is a syntax error.
+			if keyCount == 0 {
+				return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+			}
+
+			// Done
+			return keys, nil
+		case token.ILLEGAL:
+			return nil, errors.New("illegal")
+		default:
+			return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+		}
+	}
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+	defer un(trace(p, "ParseObjectValue"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+		return p.literalType()
+	case token.LBRACE:
+		return p.objectType()
+	case token.LBRACK:
+		return p.listType()
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseType"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.LBRACE:
+		return p.objectType()
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseObjectType"))
+
+	// we assume that the currently scanned token is a LBRACE
+	o := &ast.ObjectType{}
+
+	l, err := p.objectList()
+
+	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+	// not a RBRACE, it's an syntax error and we just return it.
+	if err != nil && p.tok.Type != token.RBRACE {
+		return nil, err
+	}
+
+	o.List = l
+	return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+	defer un(trace(p, "ParseListType"))
+
+	// we assume that the currently scanned token is a LBRACK
+	l := &ast.ListType{}
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.NUMBER, token.FLOAT, token.STRING:
+			node, err := p.literalType()
+			if err != nil {
+				return nil, err
+			}
+
+			l.Add(node)
+		case token.COMMA:
+			continue
+		case token.LBRACE:
+			node, err := p.objectType()
+			if err != nil {
+				return nil, err
+			}
+
+			l.Add(node)
+		case token.BOOL:
+			// TODO(arslan) should we support? not supported by HCL yet
+		case token.LBRACK:
+			// TODO(arslan) should we support nested lists? Even though it's
+			// written in README of HCL, it's not a part of the grammar
+			// (not defined in parse.y)
+		case token.RBRACK:
+			// finished
+			return l, nil
+		default:
+			return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+		}
+
+	}
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+	defer un(trace(p, "ParseLiteral"))
+
+	return &ast.LiteralType{
+		Token: p.tok.HCLToken(),
+	}, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+	// If we have a token on the buffer, then return it.
+	if p.n != 0 {
+		p.n = 0
+		return p.tok
+	}
+
+	p.tok = p.sc.Scan()
+	return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+	p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+	p.indent--
+	p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 000000000..fe3f0f095
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+	buf *bytes.Buffer // Source buffer for advancing and scanning
+	src []byte        // Source buffer for immutable access
+
+	// Source Position
+	srcPos  token.Pos // current position
+	prevPos token.Pos // previous position, used for peek() method
+
+	lastCharLen int // length of last character in bytes
+	lastLineLen int // length of last line in characters (for correct column reporting)
+
+	tokStart int // token text start position
+	tokEnd   int // token text end  position
+
+	// Error is called for each error encountered. If no Error
+	// function is set, the error is reported to os.Stderr.
+	Error func(pos token.Pos, msg string)
+
+	// ErrorCount is incremented by one for each error encountered.
+	ErrorCount int
+
+	// tokPos is the start position of most recently scanned token; set by
+	// Scan. The Filename field is always left untouched by the Scanner.  If
+	// an error is reported (via Error) and Position is invalid, the scanner is
+	// not inside a token.
+	tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+	// even though we accept a src, we read from a io.Reader compatible type
+	// (*bytes.Buffer). So in the future we might easily change it to streaming
+	// read.
+	b := bytes.NewBuffer(src)
+	s := &Scanner{
+		buf: b,
+		src: src,
+	}
+
+	// srcPosition always starts with 1
+	s.srcPos.Line = 1
+	return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+	ch, size, err := s.buf.ReadRune()
+	if err != nil {
+		// advance for error reporting
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		return eof
+	}
+
+	if ch == utf8.RuneError && size == 1 {
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		s.err("illegal UTF-8 encoding")
+		return ch
+	}
+
+	// remember last position
+	s.prevPos = s.srcPos
+
+	s.srcPos.Column++
+	s.lastCharLen = size
+	s.srcPos.Offset += size
+
+	if ch == '\n' {
+		s.srcPos.Line++
+		s.lastLineLen = s.srcPos.Column
+		s.srcPos.Column = 0
+	}
+
+	// debug
+	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+	return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+	if err := s.buf.UnreadRune(); err != nil {
+		panic(err) // this is user fault, we should catch it
+	}
+	s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+	peek, _, err := s.buf.ReadRune()
+	if err != nil {
+		return eof
+	}
+
+	s.buf.UnreadRune()
+	return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+	ch := s.next()
+
+	// skip white space
+	for isWhitespace(ch) {
+		ch = s.next()
+	}
+
+	var tok token.Type
+
+	// token text markings
+	s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+	// token position, initial next() is moving the offset by one(size of rune
+	// actually), though we are interested with the starting point
+	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+	if s.srcPos.Column > 0 {
+		// common case: last character was not a '\n'
+		s.tokPos.Line = s.srcPos.Line
+		s.tokPos.Column = s.srcPos.Column
+	} else {
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		s.tokPos.Line = s.srcPos.Line - 1
+		s.tokPos.Column = s.lastLineLen
+	}
+
+	switch {
+	case isLetter(ch):
+		lit := s.scanIdentifier()
+		if lit == "true" || lit == "false" {
+			tok = token.BOOL
+		} else if lit == "null" {
+			tok = token.NULL
+		} else {
+			s.err("illegal char")
+		}
+	case isDecimal(ch):
+		tok = s.scanNumber(ch)
+	default:
+		switch ch {
+		case eof:
+			tok = token.EOF
+		case '"':
+			tok = token.STRING
+			s.scanString()
+		case '.':
+			tok = token.PERIOD
+			ch = s.peek()
+			if isDecimal(ch) {
+				tok = token.FLOAT
+				ch = s.scanMantissa(ch)
+				ch = s.scanExponent(ch)
+			}
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			tok = token.RBRACE
+		case ',':
+			tok = token.COMMA
+		case ':':
+			tok = token.COLON
+		case '-':
+			if isDecimal(s.peek()) {
+				ch := s.next()
+				tok = s.scanNumber(ch)
+			} else {
+				s.err("illegal char")
+			}
+		default:
+			s.err("illegal char: " + string(ch))
+		}
+	}
+
+	// finish token ending
+	s.tokEnd = s.srcPos.Offset
+
+	// create token literal
+	var tokenText string
+	if s.tokStart >= 0 {
+		tokenText = string(s.src[s.tokStart:s.tokEnd])
+	}
+	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+	return token.Token{
+		Type: tok,
+		Pos:  s.tokPos,
+		Text: tokenText,
+	}
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+	zero := ch == '0'
+	pos := s.srcPos
+
+	s.scanMantissa(ch)
+	ch = s.next() // seek forward
+	if ch == 'e' || ch == 'E' {
+		ch = s.scanExponent(ch)
+		return token.FLOAT
+	}
+
+	if ch == '.' {
+		ch = s.scanFraction(ch)
+		if ch == 'e' || ch == 'E' {
+			ch = s.next()
+			ch = s.scanExponent(ch)
+		}
+		return token.FLOAT
+	}
+
+	if ch != eof {
+		s.unread()
+	}
+
+	// If we have a larger number and this is zero, error
+	if zero && pos != s.srcPos {
+		s.err("numbers cannot start with 0")
+	}
+
+	return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+	scanned := false
+	for isDecimal(ch) {
+		ch = s.next()
+		scanned = true
+	}
+
+	if scanned && ch != eof {
+		s.unread()
+	}
+	return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+	if ch == '.' {
+		ch = s.peek() // we peek just to see if we can move forward
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+	if ch == 'e' || ch == 'E' {
+		ch = s.next()
+		if ch == '-' || ch == '+' {
+			ch = s.next()
+		}
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+	braces := 0
+	for {
+		// '"' opening already consumed
+		// read character after quote
+		ch := s.next()
+
+		if ch == '\n' || ch < 0 || ch == eof {
+			s.err("literal not terminated")
+			return
+		}
+
+		if ch == '"' {
+			break
+		}
+
+		// If we're going into a ${} then we can ignore quotes for awhile
+		if braces == 0 && ch == '$' && s.peek() == '{' {
+			braces++
+			s.next()
+		} else if braces > 0 && ch == '{' {
+			braces++
+		}
+		if braces > 0 && ch == '}' {
+			braces--
+		}
+
+		if ch == '\\' {
+			s.scanEscape()
+		}
+	}
+
+	return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+	// http://en.cppreference.com/w/cpp/language/escape
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+		// nothing to do
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		// octal notation
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		// hexademical notation
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.err("illegal char escape")
+	}
+	return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		n--
+	}
+	if n > 0 {
+		s.err("illegal char escape")
+	}
+
+	// we scanned all digits, put the last non digit char back
+	s.unread()
+	return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+	offs := s.srcPos.Offset - s.lastCharLen
+	ch := s.next()
+	for isLetter(ch) || isDigit(ch) || ch == '-' {
+		ch = s.next()
+	}
+
+	if ch != eof {
+		s.unread() // we got identifier, put back latest char
+	}
+
+	return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+	pos.Offset = s.srcPos.Offset - s.lastCharLen
+	switch {
+	case s.srcPos.Column > 0:
+		// common case: last character was not a '\n'
+		pos.Line = s.srcPos.Line
+		pos.Column = s.srcPos.Column
+	case s.lastLineLen > 0:
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		pos.Line = s.srcPos.Line - 1
+		pos.Column = s.lastLineLen
+	default:
+		// at the beginning of the source
+		pos.Line = 1
+		pos.Column = 1
+	}
+	return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+	s.ErrorCount++
+	pos := s.recentPosition()
+
+	if s.Error != nil {
+		s.Error(pos, msg)
+		return
+	}
+
+	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 000000000..59c1bb72d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	line:column         valid position without file name
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (p Pos) String() string {
+	s := p.Filename
+	if p.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+	return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+	return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 000000000..95a0c3eee
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+	"fmt"
+	"strconv"
+
+	hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+	Type Type
+	Pos  Pos
+	Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+	// Special tokens
+	ILLEGAL Type = iota
+	EOF
+
+	identifier_beg
+	literal_beg
+	NUMBER // 12345
+	FLOAT  // 123.45
+	BOOL   // true,false
+	STRING // "abc"
+	NULL   // null
+	literal_end
+	identifier_end
+
+	operator_beg
+	LBRACK // [
+	LBRACE // {
+	COMMA  // ,
+	PERIOD // .
+	COLON  // :
+
+	RBRACK // ]
+	RBRACE // }
+
+	operator_end
+)
+
+var tokens = [...]string{
+	ILLEGAL: "ILLEGAL",
+
+	EOF: "EOF",
+
+	NUMBER: "NUMBER",
+	FLOAT:  "FLOAT",
+	BOOL:   "BOOL",
+	STRING: "STRING",
+	NULL:   "NULL",
+
+	LBRACK: "LBRACK",
+	LBRACE: "LBRACE",
+	COMMA:  "COMMA",
+	PERIOD: "PERIOD",
+	COLON:  "COLON",
+
+	RBRACK: "RBRACK",
+	RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+	s := ""
+	if 0 <= t && t < Type(len(tokens)) {
+		s = tokens[t]
+	}
+	if s == "" {
+		s = "token(" + strconv.Itoa(int(t)) + ")"
+	}
+	return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+	return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+	switch t.Type {
+	case BOOL:
+		return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+	case FLOAT:
+		return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+	case NULL:
+		return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+	case NUMBER:
+		return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+	case STRING:
+		return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+	default:
+		panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+	}
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 000000000..d9993c292
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+	"unicode"
+	"unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+	lexModeUnknown lexModeValue = iota
+	lexModeHcl
+	lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+	var (
+		r      rune
+		w      int
+		offset int
+	)
+
+	for {
+		r, w = utf8.DecodeRune(v[offset:])
+		offset += w
+		if unicode.IsSpace(r) {
+			continue
+		}
+		if r == '{' {
+			return lexModeJson
+		}
+		break
+	}
+
+	return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 000000000..1fca53c4c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	hclParser "github.com/hashicorp/hcl/hcl/parser"
+	jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+	return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+	return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+	switch lexMode(in) {
+	case lexModeHcl:
+		return hclParser.Parse(in)
+	case lexModeJson:
+		return jsonParser.Parse(in)
+	}
+
+	return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+	return parse([]byte(input))
+}
diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE
new file mode 100644
index 000000000..b387087c5
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/LICENSE
@@ -0,0 +1,25 @@
+goproperties - properties file decoder for Go
+
+Copyright (c) 2013-2018 - Frank Schroeder
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go
new file mode 100644
index 000000000..3ebf8049c
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/decode.go
@@ -0,0 +1,289 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Decode assigns property values to exported fields of a struct.
+//
+// Decode traverses v recursively and returns an error if a value cannot be
+// converted to the field type or a required value is missing for a field.
+//
+// The following type dependent decodings are used:
+//
+// String, boolean, numeric fields have the value of the property key assigned.
+// The property key name is the name of the field. A different key and a default
+// value can be set in the field's tag. Fields without default value are
+// required. If the value cannot be converted to the field type an error is
+// returned.
+//
+// time.Duration fields have the result of time.ParseDuration() assigned.
+//
+// time.Time fields have the vaule of time.Parse() assigned. The default layout
+// is time.RFC3339 but can be set in the field's tag.
+//
+// Arrays and slices of string, boolean, numeric, time.Duration and time.Time
+// fields have the value interpreted as a comma separated list of values. The
+// individual values are trimmed of whitespace and empty values are ignored. A
+// default value can be provided as a semicolon separated list in the field's
+// tag.
+//
+// Struct fields are decoded recursively using the field name plus "." as
+// prefix. The prefix (without dot) can be overridden in the field's tag.
+// Default values are not supported in the field's tag. Specify them on the
+// fields of the inner struct instead.
+//
+// Map fields must have a key of type string and are decoded recursively by
+// using the field's name plus ".' as prefix and the next element of the key
+// name as map key. The prefix (without dot) can be overridden in the field's
+// tag. Default values are not supported.
+//
+// Examples:
+//
+//     // Field is ignored.
+//     Field int `properties:"-"`
+//
+//     // Field is assigned value of 'Field'.
+//     Field int
+//
+//     // Field is assigned value of 'myName'.
+//     Field int `properties:"myName"`
+//
+//     // Field is assigned value of key 'myName' and has a default
+//     // value 15 if the key does not exist.
+//     Field int `properties:"myName,default=15"`
+//
+//     // Field is assigned value of key 'Field' and has a default
+//     // value 15 if the key does not exist.
+//     Field int `properties:",default=15"`
+//
+//     // Field is assigned value of key 'date' and the date
+//     // is in format 2006-01-02
+//     Field time.Time `properties:"date,layout=2006-01-02"`
+//
+//     // Field is assigned the non-empty and whitespace trimmed
+//     // values of key 'Field' split by commas.
+//     Field []string
+//
+//     // Field is assigned the non-empty and whitespace trimmed
+//     // values of key 'Field' split by commas and has a default
+//     // value ["a", "b", "c"] if the key does not exist.
+//     Field []string `properties:",default=a;b;c"`
+//
+//     // Field is decoded recursively with "Field." as key prefix.
+//     Field SomeStruct
+//
+//     // Field is decoded recursively with "myName." as key prefix.
+//     Field SomeStruct `properties:"myName"`
+//
+//     // Field is decoded recursively with "Field." as key prefix
+//     // and the next dotted element of the key as map key.
+//     Field map[string]string
+//
+//     // Field is decoded recursively with "myName." as key prefix
+//     // and the next dotted element of the key as map key.
+//     Field map[string]string `properties:"myName"`
+func (p *Properties) Decode(x interface{}) error {
+	t, v := reflect.TypeOf(x), reflect.ValueOf(x)
+	if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct {
+		return fmt.Errorf("not a pointer to struct: %s", t)
+	}
+	if err := dec(p, "", nil, nil, v); err != nil {
+		return err
+	}
+	return nil
+}
+
+func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error {
+	t := v.Type()
+
+	// value returns the property value for key or the default if provided.
+	value := func() (string, error) {
+		if val, ok := p.Get(key); ok {
+			return val, nil
+		}
+		if def != nil {
+			return *def, nil
+		}
+		return "", fmt.Errorf("missing required key %s", key)
+	}
+
+	// conv converts a string to a value of the given type.
+	conv := func(s string, t reflect.Type) (val reflect.Value, err error) {
+		var v interface{}
+
+		switch {
+		case isDuration(t):
+			v, err = time.ParseDuration(s)
+
+		case isTime(t):
+			layout := opts["layout"]
+			if layout == "" {
+				layout = time.RFC3339
+			}
+			v, err = time.Parse(layout, s)
+
+		case isBool(t):
+			v, err = boolVal(s), nil
+
+		case isString(t):
+			v, err = s, nil
+
+		case isFloat(t):
+			v, err = strconv.ParseFloat(s, 64)
+
+		case isInt(t):
+			v, err = strconv.ParseInt(s, 10, 64)
+
+		case isUint(t):
+			v, err = strconv.ParseUint(s, 10, 64)
+
+		default:
+			return reflect.Zero(t), fmt.Errorf("unsupported type %s", t)
+		}
+		if err != nil {
+			return reflect.Zero(t), err
+		}
+		return reflect.ValueOf(v).Convert(t), nil
+	}
+
+	// keydef returns the property key and the default value based on the
+	// name of the struct field and the options in the tag.
+	keydef := func(f reflect.StructField) (string, *string, map[string]string) {
+		_key, _opts := parseTag(f.Tag.Get("properties"))
+
+		var _def *string
+		if d, ok := _opts["default"]; ok {
+			_def = &d
+		}
+		if _key != "" {
+			return _key, _def, _opts
+		}
+		return f.Name, _def, _opts
+	}
+
+	switch {
+	case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t):
+		s, err := value()
+		if err != nil {
+			return err
+		}
+		val, err := conv(s, t)
+		if err != nil {
+			return err
+		}
+		v.Set(val)
+
+	case isPtr(t):
+		return dec(p, key, def, opts, v.Elem())
+
+	case isStruct(t):
+		for i := 0; i < v.NumField(); i++ {
+			fv := v.Field(i)
+			fk, def, opts := keydef(t.Field(i))
+			if !fv.CanSet() {
+				return fmt.Errorf("cannot set %s", t.Field(i).Name)
+			}
+			if fk == "-" {
+				continue
+			}
+			if key != "" {
+				fk = key + "." + fk
+			}
+			if err := dec(p, fk, def, opts, fv); err != nil {
+				return err
+			}
+		}
+		return nil
+
+	case isArray(t):
+		val, err := value()
+		if err != nil {
+			return err
+		}
+		vals := split(val, ";")
+		a := reflect.MakeSlice(t, 0, len(vals))
+		for _, s := range vals {
+			val, err := conv(s, t.Elem())
+			if err != nil {
+				return err
+			}
+			a = reflect.Append(a, val)
+		}
+		v.Set(a)
+
+	case isMap(t):
+		valT := t.Elem()
+		m := reflect.MakeMap(t)
+		for postfix := range p.FilterStripPrefix(key + ".").m {
+			pp := strings.SplitN(postfix, ".", 2)
+			mk, mv := pp[0], reflect.New(valT)
+			if err := dec(p, key+"."+mk, nil, nil, mv); err != nil {
+				return err
+			}
+			m.SetMapIndex(reflect.ValueOf(mk), mv.Elem())
+		}
+		v.Set(m)
+
+	default:
+		return fmt.Errorf("unsupported type %s", t)
+	}
+	return nil
+}
+
+// split splits a string on sep, trims whitespace of elements
+// and omits empty elements
+func split(s string, sep string) []string {
+	var a []string
+	for _, v := range strings.Split(s, sep) {
+		if v = strings.TrimSpace(v); v != "" {
+			a = append(a, v)
+		}
+	}
+	return a
+}
+
+// parseTag parses a "key,k=v,k=v,..."
+func parseTag(tag string) (key string, opts map[string]string) {
+	opts = map[string]string{}
+	for i, s := range strings.Split(tag, ",") {
+		if i == 0 {
+			key = s
+			continue
+		}
+
+		pp := strings.SplitN(s, "=", 2)
+		if len(pp) == 1 {
+			opts[pp[0]] = ""
+		} else {
+			opts[pp[0]] = pp[1]
+		}
+	}
+	return key, opts
+}
+
+func isArray(t reflect.Type) bool    { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice }
+func isBool(t reflect.Type) bool     { return t.Kind() == reflect.Bool }
+func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
+func isMap(t reflect.Type) bool      { return t.Kind() == reflect.Map }
+func isPtr(t reflect.Type) bool      { return t.Kind() == reflect.Ptr }
+func isString(t reflect.Type) bool   { return t.Kind() == reflect.String }
+func isStruct(t reflect.Type) bool   { return t.Kind() == reflect.Struct }
+func isTime(t reflect.Type) bool     { return t == reflect.TypeOf(time.Time{}) }
+func isFloat(t reflect.Type) bool {
+	return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64
+}
+func isInt(t reflect.Type) bool {
+	return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64
+}
+func isUint(t reflect.Type) bool {
+	return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64
+}
diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go
new file mode 100644
index 000000000..f8822da2b
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/doc.go
@@ -0,0 +1,156 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package properties provides functions for reading and writing
+// ISO-8859-1 and UTF-8 encoded .properties files and has
+// support for recursive property expansion.
+//
+// Java properties files are ISO-8859-1 encoded and use Unicode
+// literals for characters outside the ISO character set. Unicode
+// literals can be used in UTF-8 encoded properties files but
+// aren't necessary.
+//
+// To load a single properties file use MustLoadFile():
+//
+//   p := properties.MustLoadFile(filename, properties.UTF8)
+//
+// To load multiple properties files use MustLoadFiles()
+// which loads the files in the given order and merges the
+// result. Missing properties files can be ignored if the
+// 'ignoreMissing' flag is set to true.
+//
+// Filenames can contain environment variables which are expanded
+// before loading.
+//
+//   f1 := "/etc/myapp/myapp.conf"
+//   f2 := "/home/${USER}/myapp.conf"
+//   p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true)
+//
+// All of the different key/value delimiters ' ', ':' and '=' are
+// supported as well as the comment characters '!' and '#' and
+// multi-line values.
+//
+//   ! this is a comment
+//   # and so is this
+//
+//   # the following expressions are equal
+//   key value
+//   key=value
+//   key:value
+//   key = value
+//   key : value
+//   key = val\
+//         ue
+//
+// Properties stores all comments preceding a key and provides
+// GetComments() and SetComments() methods to retrieve and
+// update them. The convenience functions GetComment() and
+// SetComment() allow access to the last comment. The
+// WriteComment() method writes properties files including
+// the comments and with the keys in the original order.
+// This can be used for sanitizing properties files.
+//
+// Property expansion is recursive and circular references
+// and malformed expressions are not allowed and cause an
+// error. Expansion of environment variables is supported.
+//
+//   # standard property
+//   key = value
+//
+//   # property expansion: key2 = value
+//   key2 = ${key}
+//
+//   # recursive expansion: key3 = value
+//   key3 = ${key2}
+//
+//   # circular reference (error)
+//   key = ${key}
+//
+//   # malformed expression (error)
+//   key = ${ke
+//
+//   # refers to the users' home dir
+//   home = ${HOME}
+//
+//   # local key takes precedence over env var: u = foo
+//   USER = foo
+//   u = ${USER}
+//
+// The default property expansion format is ${key} but can be
+// changed by setting different pre- and postfix values on the
+// Properties object.
+//
+//   p := properties.NewProperties()
+//   p.Prefix = "#["
+//   p.Postfix = "]#"
+//
+// Properties provides convenience functions for getting typed
+// values with default values if the key does not exist or the
+// type conversion failed.
+//
+//   # Returns true if the value is either "1", "on", "yes" or "true"
+//   # Returns false for every other value and the default value if
+//   # the key does not exist.
+//   v = p.GetBool("key", false)
+//
+//   # Returns the value if the key exists and the format conversion
+//   # was successful. Otherwise, the default value is returned.
+//   v = p.GetInt64("key", 999)
+//   v = p.GetUint64("key", 999)
+//   v = p.GetFloat64("key", 123.0)
+//   v = p.GetString("key", "def")
+//   v = p.GetDuration("key", 999)
+//
+// As an alternative properties may be applied with the standard
+// library's flag implementation at any time.
+//
+//   # Standard configuration
+//   v = flag.Int("key", 999, "help message")
+//   flag.Parse()
+//
+//   # Merge p into the flag set
+//   p.MustFlag(flag.CommandLine)
+//
+// Properties provides several MustXXX() convenience functions
+// which will terminate the app if an error occurs. The behavior
+// of the failure is configurable and the default is to call
+// log.Fatal(err). To have the MustXXX() functions panic instead
+// of logging the error set a different ErrorHandler before
+// you use the Properties package.
+//
+//   properties.ErrorHandler = properties.PanicHandler
+//
+//   # Will panic instead of logging an error
+//   p := properties.MustLoadFile("config.properties")
+//
+// You can also provide your own ErrorHandler function. The only requirement
+// is that the error handler function must exit after handling the error.
+//
+//   properties.ErrorHandler = func(err error) {
+//	     fmt.Println(err)
+//       os.Exit(1)
+//   }
+//
+//   # Will write to stdout and then exit
+//   p := properties.MustLoadFile("config.properties")
+//
+// Properties can also be loaded into a struct via the `Decode`
+// method, e.g.
+//
+//   type S struct {
+//       A string        `properties:"a,default=foo"`
+//       D time.Duration `properties:"timeout,default=5s"`
+//       E time.Time     `properties:"expires,layout=2006-01-02,default=2015-01-01"`
+//   }
+//
+// See `Decode()` method for the full documentation.
+//
+// The following documents provide a description of the properties
+// file format.
+//
+// http://en.wikipedia.org/wiki/.properties
+//
+// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29
+//
+package properties
diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go
new file mode 100644
index 000000000..74d38dc67
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/integrate.go
@@ -0,0 +1,34 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import "flag"
+
+// MustFlag sets flags that are skipped by dst.Parse when p contains
+// the respective key for flag.Flag.Name.
+//
+// It's use is recommended with command line arguments as in:
+// 	flag.Parse()
+// 	p.MustFlag(flag.CommandLine)
+func (p *Properties) MustFlag(dst *flag.FlagSet) {
+	m := make(map[string]*flag.Flag)
+	dst.VisitAll(func(f *flag.Flag) {
+		m[f.Name] = f
+	})
+	dst.Visit(func(f *flag.Flag) {
+		delete(m, f.Name) // overridden
+	})
+
+	for name, f := range m {
+		v, ok := p.Get(name)
+		if !ok {
+			continue
+		}
+
+		if err := f.Value.Set(v); err != nil {
+			ErrorHandler(err)
+		}
+	}
+}
diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go
new file mode 100644
index 000000000..367166d58
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/lex.go
@@ -0,0 +1,407 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Parts of the lexer are from the template/text/parser package
+// For these parts the following applies:
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file of the go 1.2
+// distribution.
+
+package properties
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+	typ itemType // The type of this item.
+	pos int      // The starting position, in bytes, of this item in the input string.
+	val string   // The value of this item.
+}
+
+func (i item) String() string {
+	switch {
+	case i.typ == itemEOF:
+		return "EOF"
+	case i.typ == itemError:
+		return i.val
+	case len(i.val) > 10:
+		return fmt.Sprintf("%.10q...", i.val)
+	}
+	return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+	itemError itemType = iota // error occurred; value is text of error
+	itemEOF
+	itemKey     // a key
+	itemValue   // a value
+	itemComment // a comment
+)
+
+// defines a constant for EOF
+const eof = -1
+
+// permitted whitespace characters space, FF and TAB
+const whitespace = " \f\t"
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+	input   string    // the string being scanned
+	state   stateFn   // the next lexing function to enter
+	pos     int       // current position in the input
+	start   int       // start position of this item
+	width   int       // width of last rune read from input
+	lastPos int       // position of most recent item returned by nextItem
+	runes   []rune    // scanned runes for this item
+	items   chan item // channel of scanned items
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+	if l.pos >= len(l.input) {
+		l.width = 0
+		return eof
+	}
+	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+	l.width = w
+	l.pos += l.width
+	return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+	r := l.next()
+	l.backup()
+	return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+	l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+	i := item{t, l.start, string(l.runes)}
+	l.items <- i
+	l.start = l.pos
+	l.runes = l.runes[:0]
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+	l.start = l.pos
+}
+
+// appends the rune to the current value
+func (l *lexer) appendRune(r rune) {
+	l.runes = append(l.runes, r)
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+	if strings.ContainsRune(valid, l.next()) {
+		return true
+	}
+	l.backup()
+	return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+	for strings.ContainsRune(valid, l.next()) {
+	}
+	l.backup()
+}
+
+// acceptRunUntil consumes a run of runes up to a terminator.
+func (l *lexer) acceptRunUntil(term rune) {
+	for term != l.next() {
+	}
+	l.backup()
+}
+
+// hasText returns true if the current parsed text is not empty.
+func (l *lexer) isNotEmpty() bool {
+	return l.pos > l.start
+}
+
+// lineNumber reports which line we're on, based on the position of
+// the previous item returned by nextItem. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+	return 1 + strings.Count(l.input[:l.lastPos], "\n")
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
+	return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+	i := <-l.items
+	l.lastPos = i.pos
+	return i
+}
+
+// lex creates a new scanner for the input string.
+func lex(input string) *lexer {
+	l := &lexer{
+		input: input,
+		items: make(chan item),
+		runes: make([]rune, 0, 32),
+	}
+	go l.run()
+	return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+	for l.state = lexBeforeKey(l); l.state != nil; {
+		l.state = l.state(l)
+	}
+}
+
+// state functions
+
+// lexBeforeKey scans until a key begins.
+func lexBeforeKey(l *lexer) stateFn {
+	switch r := l.next(); {
+	case isEOF(r):
+		l.emit(itemEOF)
+		return nil
+
+	case isEOL(r):
+		l.ignore()
+		return lexBeforeKey
+
+	case isComment(r):
+		return lexComment
+
+	case isWhitespace(r):
+		l.ignore()
+		return lexBeforeKey
+
+	default:
+		l.backup()
+		return lexKey
+	}
+}
+
+// lexComment scans a comment line. The comment character has already been scanned.
+func lexComment(l *lexer) stateFn {
+	l.acceptRun(whitespace)
+	l.ignore()
+	for {
+		switch r := l.next(); {
+		case isEOF(r):
+			l.ignore()
+			l.emit(itemEOF)
+			return nil
+		case isEOL(r):
+			l.emit(itemComment)
+			return lexBeforeKey
+		default:
+			l.appendRune(r)
+		}
+	}
+}
+
+// lexKey scans the key up to a delimiter
+func lexKey(l *lexer) stateFn {
+	var r rune
+
+Loop:
+	for {
+		switch r = l.next(); {
+
+		case isEscape(r):
+			err := l.scanEscapeSequence()
+			if err != nil {
+				return l.errorf(err.Error())
+			}
+
+		case isEndOfKey(r):
+			l.backup()
+			break Loop
+
+		case isEOF(r):
+			break Loop
+
+		default:
+			l.appendRune(r)
+		}
+	}
+
+	if len(l.runes) > 0 {
+		l.emit(itemKey)
+	}
+
+	if isEOF(r) {
+		l.emit(itemEOF)
+		return nil
+	}
+
+	return lexBeforeValue
+}
+
+// lexBeforeValue scans the delimiter between key and value.
+// Leading and trailing whitespace is ignored.
+// We expect to be just after the key.
+func lexBeforeValue(l *lexer) stateFn {
+	l.acceptRun(whitespace)
+	l.accept(":=")
+	l.acceptRun(whitespace)
+	l.ignore()
+	return lexValue
+}
+
+// lexValue scans text until the end of the line. We expect to be just after the delimiter.
+func lexValue(l *lexer) stateFn {
+	for {
+		switch r := l.next(); {
+		case isEscape(r):
+			if isEOL(l.peek()) {
+				l.next()
+				l.acceptRun(whitespace)
+			} else {
+				err := l.scanEscapeSequence()
+				if err != nil {
+					return l.errorf(err.Error())
+				}
+			}
+
+		case isEOL(r):
+			l.emit(itemValue)
+			l.ignore()
+			return lexBeforeKey
+
+		case isEOF(r):
+			l.emit(itemValue)
+			l.emit(itemEOF)
+			return nil
+
+		default:
+			l.appendRune(r)
+		}
+	}
+}
+
+// scanEscapeSequence scans either one of the escaped characters
+// or a unicode literal. We expect to be after the escape character.
+func (l *lexer) scanEscapeSequence() error {
+	switch r := l.next(); {
+
+	case isEscapedCharacter(r):
+		l.appendRune(decodeEscapedCharacter(r))
+		return nil
+
+	case atUnicodeLiteral(r):
+		return l.scanUnicodeLiteral()
+
+	case isEOF(r):
+		return fmt.Errorf("premature EOF")
+
+	// silently drop the escape character and append the rune as is
+	default:
+		l.appendRune(r)
+		return nil
+	}
+}
+
+// scans a unicode literal in the form \uXXXX. We expect to be after the \u.
+func (l *lexer) scanUnicodeLiteral() error {
+	// scan the digits
+	d := make([]rune, 4)
+	for i := 0; i < 4; i++ {
+		d[i] = l.next()
+		if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) {
+			return fmt.Errorf("invalid unicode literal")
+		}
+	}
+
+	// decode the digits into a rune
+	r, err := strconv.ParseInt(string(d), 16, 0)
+	if err != nil {
+		return err
+	}
+
+	l.appendRune(rune(r))
+	return nil
+}
+
+// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character.
+func decodeEscapedCharacter(r rune) rune {
+	switch r {
+	case 'f':
+		return '\f'
+	case 'n':
+		return '\n'
+	case 'r':
+		return '\r'
+	case 't':
+		return '\t'
+	default:
+		return r
+	}
+}
+
+// atUnicodeLiteral reports whether we are at a unicode literal.
+// The escape character has already been consumed.
+func atUnicodeLiteral(r rune) bool {
+	return r == 'u'
+}
+
+// isComment reports whether we are at the start of a comment.
+func isComment(r rune) bool {
+	return r == '#' || r == '!'
+}
+
+// isEndOfKey reports whether the rune terminates the current key.
+func isEndOfKey(r rune) bool {
+	return strings.ContainsRune(" \f\t\r\n:=", r)
+}
+
+// isEOF reports whether we are at EOF.
+func isEOF(r rune) bool {
+	return r == eof
+}
+
+// isEOL reports whether we are at a new line character.
+func isEOL(r rune) bool {
+	return r == '\n' || r == '\r'
+}
+
+// isEscape reports whether the rune is the escape character which
+// prefixes unicode literals and other escaped characters.
+func isEscape(r rune) bool {
+	return r == '\\'
+}
+
+// isEscapedCharacter reports whether we are at one of the characters that need escaping.
+// The escape character has already been consumed.
+func isEscapedCharacter(r rune) bool {
+	return strings.ContainsRune(" :=fnrt", r)
+}
+
+// isWhitespace reports whether the rune is a whitespace character.
+func isWhitespace(r rune) bool {
+	return strings.ContainsRune(whitespace, r)
+}
diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go
new file mode 100644
index 000000000..c8e1b5804
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/load.go
@@ -0,0 +1,292 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+)
+
+// Encoding specifies encoding of the input data.
+type Encoding uint
+
+const (
+	// utf8Default is a private placeholder for the zero value of Encoding to
+	// ensure that it has the correct meaning. UTF8 is the default encoding but
+	// was assigned a non-zero value which cannot be changed without breaking
+	// existing code. Clients should continue to use the public constants.
+	utf8Default Encoding = iota
+
+	// UTF8 interprets the input data as UTF-8.
+	UTF8
+
+	// ISO_8859_1 interprets the input data as ISO-8859-1.
+	ISO_8859_1
+)
+
+type Loader struct {
+	// Encoding determines how the data from files and byte buffers
+	// is interpreted. For URLs the Content-Type header is used
+	// to determine the encoding of the data.
+	Encoding Encoding
+
+	// DisableExpansion configures the property expansion of the
+	// returned property object. When set to true, the property values
+	// will not be expanded and the Property object will not be checked
+	// for invalid expansion expressions.
+	DisableExpansion bool
+
+	// IgnoreMissing configures whether missing files or URLs which return
+	// 404 are reported as errors. When set to true, missing files and 404
+	// status codes are not reported as errors.
+	IgnoreMissing bool
+}
+
+// Load reads a buffer into a Properties struct.
+func (l *Loader) LoadBytes(buf []byte) (*Properties, error) {
+	return l.loadBytes(buf, l.Encoding)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into
+// a Properties struct. If IgnoreMissing is true then a 404 status code or
+// missing file will not be reported as error. Encoding sets the encoding for
+// files. For the URLs see LoadURL for the Content-Type header and the
+// encoding.
+func (l *Loader) LoadAll(names []string) (*Properties, error) {
+	all := NewProperties()
+	for _, name := range names {
+		n, err := expandName(name)
+		if err != nil {
+			return nil, err
+		}
+
+		var p *Properties
+		switch {
+		case strings.HasPrefix(n, "http://"):
+			p, err = l.LoadURL(n)
+		case strings.HasPrefix(n, "https://"):
+			p, err = l.LoadURL(n)
+		default:
+			p, err = l.LoadFile(n)
+		}
+		if err != nil {
+			return nil, err
+		}
+		all.Merge(p)
+	}
+
+	all.DisableExpansion = l.DisableExpansion
+	if all.DisableExpansion {
+		return all, nil
+	}
+	return all, all.check()
+}
+
+// LoadFile reads a file into a Properties struct.
+// If IgnoreMissing is true then a missing file will not be
+// reported as error.
+func (l *Loader) LoadFile(filename string) (*Properties, error) {
+	data, err := ioutil.ReadFile(filename)
+	if err != nil {
+		if l.IgnoreMissing && os.IsNotExist(err) {
+			LogPrintf("properties: %s not found. skipping", filename)
+			return NewProperties(), nil
+		}
+		return nil, err
+	}
+	return l.loadBytes(data, l.Encoding)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+//
+// The encoding is determined via the Content-Type header which
+// should be set to 'text/plain'. If the 'charset' parameter is
+// missing, 'iso-8859-1' or 'latin1' the encoding is set to
+// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
+// encoding is set to UTF-8. A missing content type header is
+// interpreted as 'text/plain; charset=utf-8'.
+func (l *Loader) LoadURL(url string) (*Properties, error) {
+	resp, err := http.Get(url)
+	if err != nil {
+		return nil, fmt.Errorf("properties: error fetching %q. %s", url, err)
+	}
+
+	if resp.StatusCode == 404 && l.IgnoreMissing {
+		LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode)
+		return NewProperties(), nil
+	}
+
+	if resp.StatusCode != 200 {
+		return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
+	}
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
+	}
+	defer resp.Body.Close()
+
+	ct := resp.Header.Get("Content-Type")
+	var enc Encoding
+	switch strings.ToLower(ct) {
+	case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1":
+		enc = ISO_8859_1
+	case "", "text/plain; charset=utf-8":
+		enc = UTF8
+	default:
+		return nil, fmt.Errorf("properties: invalid content type %s", ct)
+	}
+
+	return l.loadBytes(body, enc)
+}
+
+func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) {
+	p, err := parse(convert(buf, enc))
+	if err != nil {
+		return nil, err
+	}
+	p.DisableExpansion = l.DisableExpansion
+	if p.DisableExpansion {
+		return p, nil
+	}
+	return p, p.check()
+}
+
+// Load reads a buffer into a Properties struct.
+func Load(buf []byte, enc Encoding) (*Properties, error) {
+	l := &Loader{Encoding: enc}
+	return l.LoadBytes(buf)
+}
+
+// LoadString reads an UTF8 string into a properties struct.
+func LoadString(s string) (*Properties, error) {
+	l := &Loader{Encoding: UTF8}
+	return l.LoadBytes([]byte(s))
+}
+
+// LoadMap creates a new Properties struct from a string map.
+func LoadMap(m map[string]string) *Properties {
+	p := NewProperties()
+	for k, v := range m {
+		p.Set(k, v)
+	}
+	return p
+}
+
+// LoadFile reads a file into a Properties struct.
+func LoadFile(filename string, enc Encoding) (*Properties, error) {
+	l := &Loader{Encoding: enc}
+	return l.LoadAll([]string{filename})
+}
+
+// LoadFiles reads multiple files in the given order into
+// a Properties struct. If 'ignoreMissing' is true then
+// non-existent files will not be reported as error.
+func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+	l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
+	return l.LoadAll(filenames)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+// See Loader#LoadURL for details.
+func LoadURL(url string) (*Properties, error) {
+	l := &Loader{Encoding: UTF8}
+	return l.LoadAll([]string{url})
+}
+
+// LoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct. If IgnoreMissing is true then a 404 status code will
+// not be reported as error. See Loader#LoadURL for the Content-Type header
+// and the encoding.
+func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
+	l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing}
+	return l.LoadAll(urls)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding.
+func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+	l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing}
+	return l.LoadAll(names)
+}
+
+// MustLoadString reads an UTF8 string into a Properties struct and
+// panics on error.
+func MustLoadString(s string) *Properties {
+	return must(LoadString(s))
+}
+
+// MustLoadFile reads a file into a Properties struct and
+// panics on error.
+func MustLoadFile(filename string, enc Encoding) *Properties {
+	return must(LoadFile(filename, enc))
+}
+
+// MustLoadFiles reads multiple files in the given order into
+// a Properties struct and panics on error. If 'ignoreMissing'
+// is true then non-existent files will not be reported as error.
+func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
+	return must(LoadFiles(filenames, enc, ignoreMissing))
+}
+
+// MustLoadURL reads the content of a URL into a Properties struct and
+// panics on error.
+func MustLoadURL(url string) *Properties {
+	return must(LoadURL(url))
+}
+
+// MustLoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct and panics on error. If 'ignoreMissing' is true then a 404
+// status code will not be reported as error.
+func MustLoadURLs(urls []string, ignoreMissing bool) *Properties {
+	return must(LoadURLs(urls, ignoreMissing))
+}
+
+// MustLoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding. It panics on error.
+func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties {
+	return must(LoadAll(names, enc, ignoreMissing))
+}
+
+func must(p *Properties, err error) *Properties {
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return p
+}
+
+// expandName expands ${ENV_VAR} expressions in a name.
+// If the environment variable does not exist then it will be replaced
+// with an empty string. Malformed expressions like "${ENV_VAR" will
+// be reported as error.
+func expandName(name string) (string, error) {
+	return expand(name, []string{}, "${", "}", make(map[string]string))
+}
+
+// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string.
+// For ISO-8859-1 we can convert each byte straight into a rune since the
+// first 256 unicode code points cover ISO-8859-1.
+func convert(buf []byte, enc Encoding) string {
+	switch enc {
+	case utf8Default, UTF8:
+		return string(buf)
+	case ISO_8859_1:
+		runes := make([]rune, len(buf))
+		for i, b := range buf {
+			runes[i] = rune(b)
+		}
+		return string(runes)
+	default:
+		ErrorHandler(fmt.Errorf("unsupported encoding %v", enc))
+	}
+	panic("ErrorHandler should exit")
+}
diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go
new file mode 100644
index 000000000..cdc4a8034
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/parser.go
@@ -0,0 +1,95 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"runtime"
+)
+
+type parser struct {
+	lex *lexer
+}
+
+func parse(input string) (properties *Properties, err error) {
+	p := &parser{lex: lex(input)}
+	defer p.recover(&err)
+
+	properties = NewProperties()
+	key := ""
+	comments := []string{}
+
+	for {
+		token := p.expectOneOf(itemComment, itemKey, itemEOF)
+		switch token.typ {
+		case itemEOF:
+			goto done
+		case itemComment:
+			comments = append(comments, token.val)
+			continue
+		case itemKey:
+			key = token.val
+			if _, ok := properties.m[key]; !ok {
+				properties.k = append(properties.k, key)
+			}
+		}
+
+		token = p.expectOneOf(itemValue, itemEOF)
+		if len(comments) > 0 {
+			properties.c[key] = comments
+			comments = []string{}
+		}
+		switch token.typ {
+		case itemEOF:
+			properties.m[key] = ""
+			goto done
+		case itemValue:
+			properties.m[key] = token.val
+		}
+	}
+
+done:
+	return properties, nil
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+	format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format)
+	panic(fmt.Errorf(format, args...))
+}
+
+func (p *parser) expect(expected itemType) (token item) {
+	token = p.lex.nextItem()
+	if token.typ != expected {
+		p.unexpected(token)
+	}
+	return token
+}
+
+func (p *parser) expectOneOf(expected ...itemType) (token item) {
+	token = p.lex.nextItem()
+	for _, v := range expected {
+		if token.typ == v {
+			return token
+		}
+	}
+	p.unexpected(token)
+	panic("unexpected token")
+}
+
+func (p *parser) unexpected(token item) {
+	p.errorf(token.String())
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (p *parser) recover(errp *error) {
+	e := recover()
+	if e != nil {
+		if _, ok := e.(runtime.Error); ok {
+			panic(e)
+		}
+		*errp = e.(error)
+	}
+	return
+}
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
new file mode 100644
index 000000000..cb3d1a332
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -0,0 +1,833 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer.
+// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used.
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+const maxExpansionDepth = 64
+
+// ErrorHandlerFunc defines the type of function which handles failures
+// of the MustXXX() functions. An error handler function must exit
+// the application after handling the error.
+type ErrorHandlerFunc func(error)
+
+// ErrorHandler is the function which handles failures of the MustXXX()
+// functions. The default is LogFatalHandler.
+var ErrorHandler ErrorHandlerFunc = LogFatalHandler
+
+// LogHandlerFunc defines the function prototype for logging errors.
+type LogHandlerFunc func(fmt string, args ...interface{})
+
+// LogPrintf defines a log handler which uses log.Printf.
+var LogPrintf LogHandlerFunc = log.Printf
+
+// LogFatalHandler handles the error by logging a fatal error and exiting.
+func LogFatalHandler(err error) {
+	log.Fatal(err)
+}
+
+// PanicHandler handles the error by panicking.
+func PanicHandler(err error) {
+	panic(err)
+}
+
+// -----------------------------------------------------------------------------
+
+// A Properties contains the key/value pairs from the properties input.
+// All values are stored in unexpanded form and are expanded at runtime
+type Properties struct {
+	// Pre-/Postfix for property expansion.
+	Prefix  string
+	Postfix string
+
+	// DisableExpansion controls the expansion of properties on Get()
+	// and the check for circular references on Set(). When set to
+	// true Properties behaves like a simple key/value store and does
+	// not check for circular references on Get() or on Set().
+	DisableExpansion bool
+
+	// Stores the key/value pairs
+	m map[string]string
+
+	// Stores the comments per key.
+	c map[string][]string
+
+	// Stores the keys in order of appearance.
+	k []string
+}
+
+// NewProperties creates a new Properties struct with the default
+// configuration for "${key}" expressions.
+func NewProperties() *Properties {
+	return &Properties{
+		Prefix:  "${",
+		Postfix: "}",
+		m:       map[string]string{},
+		c:       map[string][]string{},
+		k:       []string{},
+	}
+}
+
+// Load reads a buffer into the given Properties struct.
+func (p *Properties) Load(buf []byte, enc Encoding) error {
+	l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion}
+	newProperties, err := l.LoadBytes(buf)
+	if err != nil {
+		return err
+	}
+	p.Merge(newProperties)
+	return nil
+}
+
+// Get returns the expanded value for the given key if exists.
+// Otherwise, ok is false.
+func (p *Properties) Get(key string) (value string, ok bool) {
+	v, ok := p.m[key]
+	if p.DisableExpansion {
+		return v, ok
+	}
+	if !ok {
+		return "", false
+	}
+
+	expanded, err := p.expand(key, v)
+
+	// we guarantee that the expanded value is free of
+	// circular references and malformed expressions
+	// so we panic if we still get an error here.
+	if err != nil {
+		ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v))
+	}
+
+	return expanded, true
+}
+
+// MustGet returns the expanded value for the given key if exists.
+// Otherwise, it panics.
+func (p *Properties) MustGet(key string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	ErrorHandler(invalidKeyError(key))
+	panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// ClearComments removes the comments for all keys.
+func (p *Properties) ClearComments() {
+	p.c = map[string][]string{}
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComment returns the last comment before the given key or an empty string.
+func (p *Properties) GetComment(key string) string {
+	comments, ok := p.c[key]
+	if !ok || len(comments) == 0 {
+		return ""
+	}
+	return comments[len(comments)-1]
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComments returns all comments that appeared before the given key or nil.
+func (p *Properties) GetComments(key string) []string {
+	if comments, ok := p.c[key]; ok {
+		return comments
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComment sets the comment for the key.
+func (p *Properties) SetComment(key, comment string) {
+	p.c[key] = []string{comment}
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComments sets the comments for the key. If the comments are nil then
+// all comments for this key are deleted.
+func (p *Properties) SetComments(key string, comments []string) {
+	if comments == nil {
+		delete(p.c, key)
+		return
+	}
+	p.c[key] = comments
+}
+
+// ----------------------------------------------------------------------------
+
+// GetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the default value is returned.
+func (p *Properties) GetBool(key string, def bool) bool {
+	v, err := p.getBool(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the function panics.
+func (p *Properties) MustGetBool(key string) bool {
+	v, err := p.getBool(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getBool(key string) (value bool, err error) {
+	if v, ok := p.Get(key); ok {
+		return boolVal(v), nil
+	}
+	return false, invalidKeyError(key)
+}
+
+func boolVal(v string) bool {
+	v = strings.ToLower(v)
+	return v == "1" || v == "true" || v == "yes" || v == "on"
+}
+
+// ----------------------------------------------------------------------------
+
+// GetDuration parses the expanded value as an time.Duration (in ns) if the
+// key exists. If key does not exist or the value cannot be parsed the default
+// value is returned. In almost all cases you want to use GetParsedDuration().
+func (p *Properties) GetDuration(key string, def time.Duration) time.Duration {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return time.Duration(v)
+}
+
+// MustGetDuration parses the expanded value as an time.Duration (in ns) if
+// the key exists. If key does not exist or the value cannot be parsed the
+// function panics. In almost all cases you want to use MustGetParsedDuration().
+func (p *Properties) MustGetDuration(key string) time.Duration {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return time.Duration(v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration {
+	s, ok := p.Get(key)
+	if !ok {
+		return def
+	}
+	v, err := time.ParseDuration(s)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetParsedDuration(key string) time.Duration {
+	s, ok := p.Get(key)
+	if !ok {
+		ErrorHandler(invalidKeyError(key))
+	}
+	v, err := time.ParseDuration(s)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+// ----------------------------------------------------------------------------
+
+// GetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetFloat64(key string, def float64) float64 {
+	v, err := p.getFloat64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetFloat64(key string) float64 {
+	v, err := p.getFloat64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getFloat64(key string) (value float64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseFloat(v, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetInt(key string, def int) int {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return intRangeCheck(key, v)
+}
+
+// MustGetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetInt(key string) int {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return intRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt64 parses the expanded value as an int64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetInt64(key string, def int64) int64 {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetInt64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetInt64(key string) int64 {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getInt64(key string) (value int64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseInt(v, 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint parses the expanded value as an uint if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetUint(key string, def uint) uint {
+	v, err := p.getUint64(key)
+	if err != nil {
+		return def
+	}
+	return uintRangeCheck(key, v)
+}
+
+// MustGetUint parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetUint(key string) uint {
+	v, err := p.getUint64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return uintRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint64 parses the expanded value as an uint64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetUint64(key string, def uint64) uint64 {
+	v, err := p.getUint64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetUint64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetUint64(key string) uint64 {
+	v, err := p.getUint64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getUint64(key string) (value uint64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseUint(v, 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetString returns the expanded value for the given key if exists or
+// the default value otherwise.
+func (p *Properties) GetString(key, def string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	return def
+}
+
+// MustGetString returns the expanded value for the given key if exists or
+// panics otherwise.
+func (p *Properties) MustGetString(key string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	ErrorHandler(invalidKeyError(key))
+	panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// Filter returns a new properties object which contains all properties
+// for which the key matches the pattern.
+func (p *Properties) Filter(pattern string) (*Properties, error) {
+	re, err := regexp.Compile(pattern)
+	if err != nil {
+		return nil, err
+	}
+
+	return p.FilterRegexp(re), nil
+}
+
+// FilterRegexp returns a new properties object which contains all properties
+// for which the key matches the regular expression.
+func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties {
+	pp := NewProperties()
+	for _, k := range p.k {
+		if re.MatchString(k) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+			pp.Set(k, p.m[k])
+		}
+	}
+	return pp
+}
+
+// FilterPrefix returns a new properties object with a subset of all keys
+// with the given prefix.
+func (p *Properties) FilterPrefix(prefix string) *Properties {
+	pp := NewProperties()
+	for _, k := range p.k {
+		if strings.HasPrefix(k, prefix) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+			pp.Set(k, p.m[k])
+		}
+	}
+	return pp
+}
+
+// FilterStripPrefix returns a new properties object with a subset of all keys
+// with the given prefix and the prefix removed from the keys.
+func (p *Properties) FilterStripPrefix(prefix string) *Properties {
+	pp := NewProperties()
+	n := len(prefix)
+	for _, k := range p.k {
+		if len(k) > len(prefix) && strings.HasPrefix(k, prefix) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference
+			// TODO(fs): this function should probably return an error but the signature is fixed
+			pp.Set(k[n:], p.m[k])
+		}
+	}
+	return pp
+}
+
+// Len returns the number of keys.
+func (p *Properties) Len() int {
+	return len(p.m)
+}
+
+// Keys returns all keys in the same order as in the input.
+func (p *Properties) Keys() []string {
+	keys := make([]string, len(p.k))
+	copy(keys, p.k)
+	return keys
+}
+
+// Set sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. If the value contains a
+// circular reference or a malformed expression then
+// an error is returned.
+// An empty key is silently ignored.
+func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
+	if key == "" {
+		return "", false, nil
+	}
+
+	// if expansion is disabled we allow circular references
+	if p.DisableExpansion {
+		prev, ok = p.Get(key)
+		p.m[key] = value
+		if !ok {
+			p.k = append(p.k, key)
+		}
+		return prev, ok, nil
+	}
+
+	// to check for a circular reference we temporarily need
+	// to set the new value. If there is an error then revert
+	// to the previous state. Only if all tests are successful
+	// then we add the key to the p.k list.
+	prev, ok = p.Get(key)
+	p.m[key] = value
+
+	// now check for a circular reference
+	_, err = p.expand(key, value)
+	if err != nil {
+
+		// revert to the previous state
+		if ok {
+			p.m[key] = prev
+		} else {
+			delete(p.m, key)
+		}
+
+		return "", false, err
+	}
+
+	if !ok {
+		p.k = append(p.k, key)
+	}
+
+	return prev, ok, nil
+}
+
+// SetValue sets property key to the default string value
+// as defined by fmt.Sprintf("%v").
+func (p *Properties) SetValue(key string, value interface{}) error {
+	_, _, err := p.Set(key, fmt.Sprintf("%v", value))
+	return err
+}
+
+// MustSet sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. An empty key is silently ignored.
+func (p *Properties) MustSet(key, value string) (prev string, ok bool) {
+	prev, ok, err := p.Set(key, value)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return prev, ok
+}
+
+// String returns a string of all expanded 'key = value' pairs.
+func (p *Properties) String() string {
+	var s string
+	for _, key := range p.k {
+		value, _ := p.Get(key)
+		s = fmt.Sprintf("%s%s = %s\n", s, key, value)
+	}
+	return s
+}
+
+// Write writes all unexpanded 'key = value' pairs to the given writer.
+// Write returns the number of bytes written and any write error encountered.
+func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
+	return p.WriteComment(w, "", enc)
+}
+
+// WriteComment writes all unexpanced 'key = value' pairs to the given writer.
+// If prefix is not empty then comments are written with a blank line and the
+// given prefix. The prefix should be either "# " or "! " to be compatible with
+// the properties file format. Otherwise, the properties parser will not be
+// able to read the file back in. It returns the number of bytes written and
+// any write error encountered.
+func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) {
+	var x int
+
+	for _, key := range p.k {
+		value := p.m[key]
+
+		if prefix != "" {
+			if comments, ok := p.c[key]; ok {
+				// don't print comments if they are all empty
+				allEmpty := true
+				for _, c := range comments {
+					if c != "" {
+						allEmpty = false
+						break
+					}
+				}
+
+				if !allEmpty {
+					// add a blank line between entries but not at the top
+					if len(comments) > 0 && n > 0 {
+						x, err = fmt.Fprintln(w)
+						if err != nil {
+							return
+						}
+						n += x
+					}
+
+					for _, c := range comments {
+						x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc))
+						if err != nil {
+							return
+						}
+						n += x
+					}
+				}
+			}
+		}
+
+		x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc))
+		if err != nil {
+			return
+		}
+		n += x
+	}
+	return
+}
+
+// Map returns a copy of the properties as a map.
+func (p *Properties) Map() map[string]string {
+	m := make(map[string]string)
+	for k, v := range p.m {
+		m[k] = v
+	}
+	return m
+}
+
+// FilterFunc returns a copy of the properties which includes the values which passed all filters.
+func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties {
+	pp := NewProperties()
+outer:
+	for k, v := range p.m {
+		for _, f := range filters {
+			if !f(k, v) {
+				continue outer
+			}
+			pp.Set(k, v)
+		}
+	}
+	return pp
+}
+
+// ----------------------------------------------------------------------------
+
+// Delete removes the key and its comments.
+func (p *Properties) Delete(key string) {
+	delete(p.m, key)
+	delete(p.c, key)
+	newKeys := []string{}
+	for _, k := range p.k {
+		if k != key {
+			newKeys = append(newKeys, k)
+		}
+	}
+	p.k = newKeys
+}
+
+// Merge merges properties, comments and keys from other *Properties into p
+func (p *Properties) Merge(other *Properties) {
+	for k, v := range other.m {
+		p.m[k] = v
+	}
+	for k, v := range other.c {
+		p.c[k] = v
+	}
+
+outer:
+	for _, otherKey := range other.k {
+		for _, key := range p.k {
+			if otherKey == key {
+				continue outer
+			}
+		}
+		p.k = append(p.k, otherKey)
+	}
+}
+
+// ----------------------------------------------------------------------------
+
+// check expands all values and returns an error if a circular reference or
+// a malformed expression was found.
+func (p *Properties) check() error {
+	for key, value := range p.m {
+		if _, err := p.expand(key, value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *Properties) expand(key, input string) (string, error) {
+	// no pre/postfix -> nothing to expand
+	if p.Prefix == "" && p.Postfix == "" {
+		return input, nil
+	}
+
+	return expand(input, []string{key}, p.Prefix, p.Postfix, p.m)
+}
+
+// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values.
+// The function keeps track of the keys that were already expanded and stops if it
+// detects a circular reference or a malformed expression of the form '(prefix)key'.
+func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) {
+	if len(keys) > maxExpansionDepth {
+		return "", fmt.Errorf("expansion too deep")
+	}
+
+	for {
+		start := strings.Index(s, prefix)
+		if start == -1 {
+			return s, nil
+		}
+
+		keyStart := start + len(prefix)
+		keyLen := strings.Index(s[keyStart:], postfix)
+		if keyLen == -1 {
+			return "", fmt.Errorf("malformed expression")
+		}
+
+		end := keyStart + keyLen + len(postfix) - 1
+		key := s[keyStart : keyStart+keyLen]
+
+		// fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key)
+
+		for _, k := range keys {
+			if key == k {
+				return "", fmt.Errorf("circular reference")
+			}
+		}
+
+		val, ok := values[key]
+		if !ok {
+			val = os.Getenv(key)
+		}
+		new_val, err := expand(val, append(keys, key), prefix, postfix, values)
+		if err != nil {
+			return "", err
+		}
+		s = s[:start] + new_val + s[end+1:]
+	}
+	return s, nil
+}
+
+// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.
+func encode(s string, special string, enc Encoding) string {
+	switch enc {
+	case UTF8:
+		return encodeUtf8(s, special)
+	case ISO_8859_1:
+		return encodeIso(s, special)
+	default:
+		panic(fmt.Sprintf("unsupported encoding %v", enc))
+	}
+}
+
+func encodeUtf8(s string, special string) string {
+	v := ""
+	for pos := 0; pos < len(s); {
+		r, w := utf8.DecodeRuneInString(s[pos:])
+		pos += w
+		v += escape(r, special)
+	}
+	return v
+}
+
+func encodeIso(s string, special string) string {
+	var r rune
+	var w int
+	var v string
+	for pos := 0; pos < len(s); {
+		switch r, w = utf8.DecodeRuneInString(s[pos:]); {
+		case r < 1<<8: // single byte rune -> escape special chars only
+			v += escape(r, special)
+		case r < 1<<16: // two byte rune -> unicode literal
+			v += fmt.Sprintf("\\u%04x", r)
+		default: // more than two bytes per rune -> can't encode
+			v += "?"
+		}
+		pos += w
+	}
+	return v
+}
+
+func escape(r rune, special string) string {
+	switch r {
+	case '\f':
+		return "\\f"
+	case '\n':
+		return "\\n"
+	case '\r':
+		return "\\r"
+	case '\t':
+		return "\\t"
+	default:
+		if strings.ContainsRune(special, r) {
+			return "\\" + string(r)
+		}
+		return string(r)
+	}
+}
+
+func invalidKeyError(key string) error {
+	return fmt.Errorf("unknown property: %s", key)
+}
diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go
new file mode 100644
index 000000000..b013a2e5e
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/rangecheck.go
@@ -0,0 +1,31 @@
+// Copyright 2018 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"math"
+)
+
+// make this a var to overwrite it in a test
+var is32Bit = ^uint(0) == math.MaxUint32
+
+// intRangeCheck checks if the value fits into the int type and
+// panics if it does not.
+func intRangeCheck(key string, v int64) int {
+	if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) {
+		panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+	}
+	return int(v)
+}
+
+// uintRangeCheck checks if the value fits into the uint type and
+// panics if it does not.
+func uintRangeCheck(key string, v uint64) uint {
+	if is32Bit && v > math.MaxUint32 {
+		panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+	}
+	return uint(v)
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644
index 000000000..f9c841a51
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644
index 000000000..2a727575a
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -0,0 +1,171 @@
+package mapstructure
+
+import (
+	"errors"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+	// Create variables here so we can reference them with the reflect pkg
+	var f1 DecodeHookFuncType
+	var f2 DecodeHookFuncKind
+
+	// Fill in the variables into this interface and the rest is done
+	// automatically using the reflect package.
+	potential := []interface{}{f1, f2}
+
+	v := reflect.ValueOf(h)
+	vt := v.Type()
+	for _, raw := range potential {
+		pt := reflect.ValueOf(raw).Type()
+		if vt.ConvertibleTo(pt) {
+			return v.Convert(pt).Interface()
+		}
+	}
+
+	return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+	raw DecodeHookFunc,
+	from reflect.Type, to reflect.Type,
+	data interface{}) (interface{}, error) {
+	switch f := typedDecodeHook(raw).(type) {
+	case DecodeHookFuncType:
+		return f(from, to, data)
+	case DecodeHookFuncKind:
+		return f(from.Kind(), to.Kind(), data)
+	default:
+		return nil, errors.New("invalid decode hook signature")
+	}
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		var err error
+		for _, f1 := range fs {
+			data, err = DecodeHookExec(f1, f, t, data)
+			if err != nil {
+				return nil, err
+			}
+
+			// Modify the from kind to be correct with the new data
+			f = nil
+			if val := reflect.ValueOf(data); val.IsValid() {
+				f = val.Type()
+			}
+		}
+
+		return data, nil
+	}
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+	return func(
+		f reflect.Kind,
+		t reflect.Kind,
+		data interface{}) (interface{}, error) {
+		if f != reflect.String || t != reflect.Slice {
+			return data, nil
+		}
+
+		raw := data.(string)
+		if raw == "" {
+			return []string{}, nil
+		}
+
+		return strings.Split(raw, sep), nil
+	}
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(time.Duration(5)) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		return time.ParseDuration(data.(string))
+	}
+}
+
+// StringToTimeHookFunc returns a DecodeHookFunc that converts
+// strings to time.Time.
+func StringToTimeHookFunc(layout string) DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(time.Time{}) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		return time.Parse(layout, data.(string))
+	}
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+	f reflect.Kind,
+	t reflect.Kind,
+	data interface{}) (interface{}, error) {
+	dataVal := reflect.ValueOf(data)
+	switch t {
+	case reflect.String:
+		switch f {
+		case reflect.Bool:
+			if dataVal.Bool() {
+				return "1", nil
+			}
+			return "0", nil
+		case reflect.Float32:
+			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+		case reflect.Int:
+			return strconv.FormatInt(dataVal.Int(), 10), nil
+		case reflect.Slice:
+			dataType := dataVal.Type()
+			elemKind := dataType.Elem().Kind()
+			if elemKind == reflect.Uint8 {
+				return string(dataVal.Interface().([]uint8)), nil
+			}
+		case reflect.Uint:
+			return strconv.FormatUint(dataVal.Uint(), 10), nil
+		}
+	}
+
+	return data, nil
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644
index 000000000..47a99e5af
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/error.go
@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+	Errors []string
+}
+
+func (e *Error) Error() string {
+	points := make([]string, len(e.Errors))
+	for i, err := range e.Errors {
+		points[i] = fmt.Sprintf("* %s", err)
+	}
+
+	sort.Strings(points)
+	return fmt.Sprintf(
+		"%d error(s) decoding:\n\n%s",
+		len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+	if e == nil {
+		return nil
+	}
+
+	result := make([]error, len(e.Errors))
+	for i, e := range e.Errors {
+		result[i] = errors.New(e)
+	}
+
+	return result
+}
+
+func appendErrors(errors []string, err error) []string {
+	switch e := err.(type) {
+	case *Error:
+		return append(errors, e.Errors...)
+	default:
+		return append(errors, e.Error())
+	}
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644
index 000000000..d3222b8f4
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -0,0 +1,1064 @@
+// Package mapstructure exposes functionality to convert an arbitrary
+// map[string]interface{} into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+package mapstructure
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type should be DecodeHookFuncType or DecodeHookFuncKind.
+// Either is accepted. Types are a superset of Kinds (Types can return
+// Kinds) and are generally a richer thing to use, but Kinds are simpler
+// if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+	// DecodeHook, if set, will be called before any decoding and any
+	// type conversion (if WeaklyTypedInput is on). This lets you modify
+	// the values before they're set down onto the resulting struct.
+	//
+	// If an error is returned, the entire decode will fail with that
+	// error.
+	DecodeHook DecodeHookFunc
+
+	// If ErrorUnused is true, then it is an error for there to exist
+	// keys in the original map that were unused in the decoding process
+	// (extra keys).
+	ErrorUnused bool
+
+	// ZeroFields, if set to true, will zero fields before writing them.
+	// For example, a map will be emptied before decoded values are put in
+	// it. If this is false, a map will be merged.
+	ZeroFields bool
+
+	// If WeaklyTypedInput is true, the decoder will make the following
+	// "weak" conversions:
+	//
+	//   - bools to string (true = "1", false = "0")
+	//   - numbers to string (base 10)
+	//   - bools to int/uint (true = 1, false = 0)
+	//   - strings to int/uint (base implied by prefix)
+	//   - int to bool (true if value != 0)
+	//   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+	//     FALSE, false, False. Anything else is an error)
+	//   - empty array = empty map and vice versa
+	//   - negative numbers to overflowed uint values (base 10)
+	//   - slice of maps to a merged map
+	//   - single values are converted to slices if required. Each
+	//     element is weakly decoded. For example: "4" can become []int{4}
+	//     if the target type is an int slice.
+	//
+	WeaklyTypedInput bool
+
+	// Metadata is the struct that will contain extra metadata about
+	// the decoding. If this is nil, then no metadata will be tracked.
+	Metadata *Metadata
+
+	// Result is a pointer to the struct that will contain the decoded
+	// value.
+	Result interface{}
+
+	// The tag name that mapstructure reads for field names. This
+	// defaults to "mapstructure"
+	TagName string
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+	config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+	// Keys are the keys of the structure which were successfully decoded
+	Keys []string
+
+	// Unused is a slice of keys that were found in the raw value but
+	// weren't decoded since there was no matching field in the result interface
+	Unused []string
+}
+
+// Decode takes an input structure and uses reflection to translate it to
+// the output structure. output must be a pointer to a map or struct.
+func Decode(input interface{}, output interface{}) error {
+	config := &DecoderConfig{
+		Metadata: nil,
+		Result:   output,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+	config := &DecoderConfig{
+		Metadata:         nil,
+		Result:           output,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// DecodeMetadata is the same as Decode, but is shorthand to
+// enable metadata collection. See DecoderConfig for more info.
+func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+	config := &DecoderConfig{
+		Metadata: metadata,
+		Result:   output,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// WeakDecodeMetadata is the same as Decode, but is shorthand to
+// enable both WeaklyTypedInput and metadata collection. See
+// DecoderConfig for more info.
+func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+	config := &DecoderConfig{
+		Metadata:         metadata,
+		Result:           output,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+	val := reflect.ValueOf(config.Result)
+	if val.Kind() != reflect.Ptr {
+		return nil, errors.New("result must be a pointer")
+	}
+
+	val = val.Elem()
+	if !val.CanAddr() {
+		return nil, errors.New("result must be addressable (a pointer)")
+	}
+
+	if config.Metadata != nil {
+		if config.Metadata.Keys == nil {
+			config.Metadata.Keys = make([]string, 0)
+		}
+
+		if config.Metadata.Unused == nil {
+			config.Metadata.Unused = make([]string, 0)
+		}
+	}
+
+	if config.TagName == "" {
+		config.TagName = "mapstructure"
+	}
+
+	result := &Decoder{
+		config: config,
+	}
+
+	return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(input interface{}) error {
+	return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
+	if input == nil {
+		// If the data is nil, then we don't set anything, unless ZeroFields is set
+		// to true.
+		if d.config.ZeroFields {
+			outVal.Set(reflect.Zero(outVal.Type()))
+
+			if d.config.Metadata != nil && name != "" {
+				d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+			}
+		}
+		return nil
+	}
+
+	inputVal := reflect.ValueOf(input)
+	if !inputVal.IsValid() {
+		// If the input value is invalid, then we just set the value
+		// to be the zero value.
+		outVal.Set(reflect.Zero(outVal.Type()))
+		if d.config.Metadata != nil && name != "" {
+			d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+		}
+		return nil
+	}
+
+	if d.config.DecodeHook != nil {
+		// We have a DecodeHook, so let's pre-process the input.
+		var err error
+		input, err = DecodeHookExec(
+			d.config.DecodeHook,
+			inputVal.Type(), outVal.Type(), input)
+		if err != nil {
+			return fmt.Errorf("error decoding '%s': %s", name, err)
+		}
+	}
+
+	var err error
+	inputKind := getKind(outVal)
+	switch inputKind {
+	case reflect.Bool:
+		err = d.decodeBool(name, input, outVal)
+	case reflect.Interface:
+		err = d.decodeBasic(name, input, outVal)
+	case reflect.String:
+		err = d.decodeString(name, input, outVal)
+	case reflect.Int:
+		err = d.decodeInt(name, input, outVal)
+	case reflect.Uint:
+		err = d.decodeUint(name, input, outVal)
+	case reflect.Float32:
+		err = d.decodeFloat(name, input, outVal)
+	case reflect.Struct:
+		err = d.decodeStruct(name, input, outVal)
+	case reflect.Map:
+		err = d.decodeMap(name, input, outVal)
+	case reflect.Ptr:
+		err = d.decodePtr(name, input, outVal)
+	case reflect.Slice:
+		err = d.decodeSlice(name, input, outVal)
+	case reflect.Array:
+		err = d.decodeArray(name, input, outVal)
+	case reflect.Func:
+		err = d.decodeFunc(name, input, outVal)
+	default:
+		// If we reached this point then we weren't able to decode it
+		return fmt.Errorf("%s: unsupported type: %s", name, inputKind)
+	}
+
+	// If we reached here, then we successfully decoded SOMETHING, so
+	// mark the key as used if we're tracking metainput.
+	if d.config.Metadata != nil && name != "" {
+		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+	}
+
+	return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+	if val.IsValid() && val.Elem().IsValid() {
+		return d.decode(name, data, val.Elem())
+	}
+	dataVal := reflect.ValueOf(data)
+	if !dataVal.IsValid() {
+		dataVal = reflect.Zero(val.Type())
+	}
+
+	dataValType := dataVal.Type()
+	if !dataValType.AssignableTo(val.Type()) {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got '%s'",
+			name, val.Type(), dataValType)
+	}
+
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	converted := true
+	switch {
+	case dataKind == reflect.String:
+		val.SetString(dataVal.String())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetString("1")
+		} else {
+			val.SetString("0")
+		}
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+	case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
+		dataKind == reflect.Array && d.config.WeaklyTypedInput:
+		dataType := dataVal.Type()
+		elemKind := dataType.Elem().Kind()
+		switch elemKind {
+		case reflect.Uint8:
+			var uints []uint8
+			if dataKind == reflect.Array {
+				uints = make([]uint8, dataVal.Len(), dataVal.Len())
+				for i := range uints {
+					uints[i] = dataVal.Index(i).Interface().(uint8)
+				}
+			} else {
+				uints = dataVal.Interface().([]uint8)
+			}
+			val.SetString(string(uints))
+		default:
+			converted = false
+		}
+	default:
+		converted = false
+	}
+
+	if !converted {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetInt(dataVal.Int())
+	case dataKind == reflect.Uint:
+		val.SetInt(int64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetInt(int64(dataVal.Float()))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetInt(1)
+		} else {
+			val.SetInt(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetInt(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Int64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetInt(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Int:
+		i := dataVal.Int()
+		if i < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %d overflows uint",
+				name, i)
+		}
+		val.SetUint(uint64(i))
+	case dataKind == reflect.Uint:
+		val.SetUint(dataVal.Uint())
+	case dataKind == reflect.Float32:
+		f := dataVal.Float()
+		if f < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %f overflows uint",
+				name, f)
+		}
+		val.SetUint(uint64(f))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetUint(1)
+		} else {
+			val.SetUint(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetUint(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Bool:
+		val.SetBool(dataVal.Bool())
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Int() != 0)
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Uint() != 0)
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Float() != 0)
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		b, err := strconv.ParseBool(dataVal.String())
+		if err == nil {
+			val.SetBool(b)
+		} else if dataVal.String() == "" {
+			val.SetBool(false)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetFloat(float64(dataVal.Int()))
+	case dataKind == reflect.Uint:
+		val.SetFloat(float64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetFloat(dataVal.Float())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetFloat(1)
+		} else {
+			val.SetFloat(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+		if err == nil {
+			val.SetFloat(f)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Float64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetFloat(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+	valType := val.Type()
+	valKeyType := valType.Key()
+	valElemType := valType.Elem()
+
+	// By default we overwrite keys in the current map
+	valMap := val
+
+	// If the map is nil or we're purposely zeroing fields, make a new map
+	if valMap.IsNil() || d.config.ZeroFields {
+		// Make a new map to hold our result
+		mapType := reflect.MapOf(valKeyType, valElemType)
+		valMap = reflect.MakeMap(mapType)
+	}
+
+	// Check input type and based on the input type jump to the proper func
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	switch dataVal.Kind() {
+	case reflect.Map:
+		return d.decodeMapFromMap(name, dataVal, val, valMap)
+
+	case reflect.Struct:
+		return d.decodeMapFromStruct(name, dataVal, val, valMap)
+
+	case reflect.Array, reflect.Slice:
+		if d.config.WeaklyTypedInput {
+			return d.decodeMapFromSlice(name, dataVal, val, valMap)
+		}
+
+		fallthrough
+
+	default:
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+	}
+}
+
+func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	// Special case for BC reasons (covered by tests)
+	if dataVal.Len() == 0 {
+		val.Set(valMap)
+		return nil
+	}
+
+	for i := 0; i < dataVal.Len(); i++ {
+		err := d.decode(
+			fmt.Sprintf("%s[%d]", name, i),
+			dataVal.Index(i).Interface(), val)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	valType := val.Type()
+	valKeyType := valType.Key()
+	valElemType := valType.Elem()
+
+	// Accumulate errors
+	errors := make([]string, 0)
+
+	for _, k := range dataVal.MapKeys() {
+		fieldName := fmt.Sprintf("%s[%s]", name, k)
+
+		// First decode the key into the proper type
+		currentKey := reflect.Indirect(reflect.New(valKeyType))
+		if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		// Next decode the data into the proper type
+		v := dataVal.MapIndex(k).Interface()
+		currentVal := reflect.Indirect(reflect.New(valElemType))
+		if err := d.decode(fieldName, v, currentVal); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		valMap.SetMapIndex(currentKey, currentVal)
+	}
+
+	// Set the built up map to the value
+	val.Set(valMap)
+
+	// If we had errors, return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	typ := dataVal.Type()
+	for i := 0; i < typ.NumField(); i++ {
+		// Get the StructField first since this is a cheap operation. If the
+		// field is unexported, then ignore it.
+		f := typ.Field(i)
+		if f.PkgPath != "" {
+			continue
+		}
+
+		// Next get the actual value of this field and verify it is assignable
+		// to the map value.
+		v := dataVal.Field(i)
+		if !v.Type().AssignableTo(valMap.Type().Elem()) {
+			return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
+		}
+
+		tagValue := f.Tag.Get(d.config.TagName)
+		tagParts := strings.Split(tagValue, ",")
+
+		// Determine the name of the key in the map
+		keyName := f.Name
+		if tagParts[0] != "" {
+			if tagParts[0] == "-" {
+				continue
+			}
+			keyName = tagParts[0]
+		}
+
+		// If "squash" is specified in the tag, we squash the field down.
+		squash := false
+		for _, tag := range tagParts[1:] {
+			if tag == "squash" {
+				squash = true
+				break
+			}
+		}
+		if squash && v.Kind() != reflect.Struct {
+			return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+		}
+
+		switch v.Kind() {
+		// this is an embedded struct, so handle it differently
+		case reflect.Struct:
+			x := reflect.New(v.Type())
+			x.Elem().Set(v)
+
+			vType := valMap.Type()
+			vKeyType := vType.Key()
+			vElemType := vType.Elem()
+			mType := reflect.MapOf(vKeyType, vElemType)
+			vMap := reflect.MakeMap(mType)
+
+			err := d.decode(keyName, x.Interface(), vMap)
+			if err != nil {
+				return err
+			}
+
+			if squash {
+				for _, k := range vMap.MapKeys() {
+					valMap.SetMapIndex(k, vMap.MapIndex(k))
+				}
+			} else {
+				valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
+			}
+
+		default:
+			valMap.SetMapIndex(reflect.ValueOf(keyName), v)
+		}
+	}
+
+	if val.CanAddr() {
+		val.Set(valMap)
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	valType := val.Type()
+	valElemType := valType.Elem()
+
+	if val.CanSet() {
+		realVal := val
+		if realVal.IsNil() || d.config.ZeroFields {
+			realVal = reflect.New(valElemType)
+		}
+
+		if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+			return err
+		}
+
+		val.Set(realVal)
+	} else {
+		if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	if val.Type() != dataVal.Type() {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataValKind := dataVal.Kind()
+	valType := val.Type()
+	valElemType := valType.Elem()
+	sliceType := reflect.SliceOf(valElemType)
+
+	valSlice := val
+	if valSlice.IsNil() || d.config.ZeroFields {
+		// Check input type
+		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+			if d.config.WeaklyTypedInput {
+				switch {
+				// Empty maps turn into empty slices
+				case dataValKind == reflect.Map:
+					if dataVal.Len() == 0 {
+						val.Set(reflect.MakeSlice(sliceType, 0, 0))
+						return nil
+					}
+					// Create slice of maps of other sizes
+					return d.decodeSlice(name, []interface{}{data}, val)
+
+				case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
+					return d.decodeSlice(name, []byte(dataVal.String()), val)
+				// All other types we try to convert to the slice type
+				// and "lift" it into it. i.e. a string becomes a string slice.
+				default:
+					// Just re-try this function with data as a slice.
+					return d.decodeSlice(name, []interface{}{data}, val)
+				}
+			}
+			return fmt.Errorf(
+				"'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+		}
+
+		// Make a new slice to hold our result, same size as the original data.
+		valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+	}
+
+	// Accumulate any errors
+	errors := make([]string, 0)
+
+	for i := 0; i < dataVal.Len(); i++ {
+		currentData := dataVal.Index(i).Interface()
+		for valSlice.Len() <= i {
+			valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+		}
+		currentField := valSlice.Index(i)
+
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+		if err := d.decode(fieldName, currentData, currentField); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	// Finally, set the value to the slice we built up
+	val.Set(valSlice)
+
+	// If there were errors, we return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataValKind := dataVal.Kind()
+	valType := val.Type()
+	valElemType := valType.Elem()
+	arrayType := reflect.ArrayOf(valType.Len(), valElemType)
+
+	valArray := val
+
+	if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+		// Check input type
+		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+			if d.config.WeaklyTypedInput {
+				switch {
+				// Empty maps turn into empty arrays
+				case dataValKind == reflect.Map:
+					if dataVal.Len() == 0 {
+						val.Set(reflect.Zero(arrayType))
+						return nil
+					}
+
+				// All other types we try to convert to the array type
+				// and "lift" it into it. i.e. a string becomes a string array.
+				default:
+					// Just re-try this function with data as a slice.
+					return d.decodeArray(name, []interface{}{data}, val)
+				}
+			}
+
+			return fmt.Errorf(
+				"'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+		}
+		if dataVal.Len() > arrayType.Len() {
+			return fmt.Errorf(
+				"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
+
+		}
+
+		// Make a new array to hold our result, same size as the original data.
+		valArray = reflect.New(arrayType).Elem()
+	}
+
+	// Accumulate any errors
+	errors := make([]string, 0)
+
+	for i := 0; i < dataVal.Len(); i++ {
+		currentData := dataVal.Index(i).Interface()
+		currentField := valArray.Index(i)
+
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+		if err := d.decode(fieldName, currentData, currentField); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	// Finally, set the value to the array we built up
+	val.Set(valArray)
+
+	// If there were errors, we return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+	// If the type of the value to write to and the data match directly,
+	// then we just set it directly instead of recursing into the structure.
+	if dataVal.Type() == val.Type() {
+		val.Set(dataVal)
+		return nil
+	}
+
+	dataValKind := dataVal.Kind()
+	if dataValKind != reflect.Map {
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
+	}
+
+	dataValType := dataVal.Type()
+	if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+		return fmt.Errorf(
+			"'%s' needs a map with string keys, has '%s' keys",
+			name, dataValType.Key().Kind())
+	}
+
+	dataValKeys := make(map[reflect.Value]struct{})
+	dataValKeysUnused := make(map[interface{}]struct{})
+	for _, dataValKey := range dataVal.MapKeys() {
+		dataValKeys[dataValKey] = struct{}{}
+		dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+	}
+
+	errors := make([]string, 0)
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = val
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	type field struct {
+		field reflect.StructField
+		val   reflect.Value
+	}
+	fields := []field{}
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			fieldKind := fieldType.Type.Kind()
+
+			// If "squash" is specified in the tag, we squash the field down.
+			squash := false
+			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+			for _, tag := range tagParts[1:] {
+				if tag == "squash" {
+					squash = true
+					break
+				}
+			}
+
+			if squash {
+				if fieldKind != reflect.Struct {
+					errors = appendErrors(errors,
+						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+				} else {
+					structs = append(structs, structVal.FieldByName(fieldType.Name))
+				}
+				continue
+			}
+
+			// Normal struct field, store it away
+			fields = append(fields, field{fieldType, structVal.Field(i)})
+		}
+	}
+
+	// for fieldType, field := range fields {
+	for _, f := range fields {
+		field, fieldValue := f.field, f.val
+		fieldName := field.Name
+
+		tagValue := field.Tag.Get(d.config.TagName)
+		tagValue = strings.SplitN(tagValue, ",", 2)[0]
+		if tagValue != "" {
+			fieldName = tagValue
+		}
+
+		rawMapKey := reflect.ValueOf(fieldName)
+		rawMapVal := dataVal.MapIndex(rawMapKey)
+		if !rawMapVal.IsValid() {
+			// Do a slower search by iterating over each key and
+			// doing case-insensitive search.
+			for dataValKey := range dataValKeys {
+				mK, ok := dataValKey.Interface().(string)
+				if !ok {
+					// Not a string key
+					continue
+				}
+
+				if strings.EqualFold(mK, fieldName) {
+					rawMapKey = dataValKey
+					rawMapVal = dataVal.MapIndex(dataValKey)
+					break
+				}
+			}
+
+			if !rawMapVal.IsValid() {
+				// There was no matching key in the map for the value in
+				// the struct. Just ignore.
+				continue
+			}
+		}
+
+		// Delete the key we're using from the unused map so we stop tracking
+		delete(dataValKeysUnused, rawMapKey.Interface())
+
+		if !fieldValue.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !fieldValue.CanSet() {
+			continue
+		}
+
+		// If the name is empty string, then we're at the root, and we
+		// don't dot-join the fields.
+		if name != "" {
+			fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		}
+
+		if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+		keys := make([]string, 0, len(dataValKeysUnused))
+		for rawKey := range dataValKeysUnused {
+			keys = append(keys, rawKey.(string))
+		}
+		sort.Strings(keys)
+
+		err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+		errors = appendErrors(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	// Add the unused keys to the list of unused keys if we're tracking metadata
+	if d.config.Metadata != nil {
+		for rawKey := range dataValKeysUnused {
+			key := rawKey.(string)
+			if name != "" {
+				key = fmt.Sprintf("%s.%s", name, key)
+			}
+
+			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+		}
+	}
+
+	return nil
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+	kind := val.Kind()
+
+	switch {
+	case kind >= reflect.Int && kind <= reflect.Int64:
+		return reflect.Int
+	case kind >= reflect.Uint && kind <= reflect.Uint64:
+		return reflect.Uint
+	case kind >= reflect.Float32 && kind <= reflect.Float64:
+		return reflect.Float32
+	default:
+		return kind
+	}
+}
diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE
new file mode 100644
index 000000000..583bdae62
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go
new file mode 100644
index 000000000..d5fd98c02
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/doc.go
@@ -0,0 +1,23 @@
+// Package toml is a TOML parser and manipulation library.
+//
+// This version supports the specification as described in
+// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
+//
+// Marshaling
+//
+// Go-toml can marshal and unmarshal TOML documents from and to data
+// structures.
+//
+// TOML document as a tree
+//
+// Go-toml can operate on a TOML document as a tree. Use one of the Load*
+// functions to parse TOML data and obtain a Tree instance, then one of its
+// methods to manipulate the tree.
+//
+// JSONPath-like queries
+//
+// The package github.com/pelletier/go-toml/query implements a system
+// similar to JSONPath to quickly retrieve elements of a TOML document using a
+// single expression. See the package documentation for more information.
+//
+package toml
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go
new file mode 100644
index 000000000..14570c8d3
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.go
@@ -0,0 +1,31 @@
+// +build gofuzz
+
+package toml
+
+func Fuzz(data []byte) int {
+	tree, err := LoadBytes(data)
+	if err != nil {
+		if tree != nil {
+			panic("tree must be nil if there is an error")
+		}
+		return 0
+	}
+
+	str, err := tree.ToTomlString()
+	if err != nil {
+		if str != "" {
+			panic(`str must be "" if there is an error`)
+		}
+		panic(err)
+	}
+
+	tree, err = Load(str)
+	if err != nil {
+		if tree != nil {
+			panic("tree must be nil if there is an error")
+		}
+		return 0
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
new file mode 100644
index 000000000..284db6467
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -0,0 +1,85 @@
+// Parsing keys handling both bare and quoted keys.
+
+package toml
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"unicode"
+)
+
+// Convert the bare key group string to an array.
+// The input supports double quotation to allow "." inside the key name,
+// but escape sequences are not supported. Lexers must unescape them beforehand.
+func parseKey(key string) ([]string, error) {
+	groups := []string{}
+	var buffer bytes.Buffer
+	inQuotes := false
+	wasInQuotes := false
+	ignoreSpace := true
+	expectDot := false
+
+	for _, char := range key {
+		if ignoreSpace {
+			if char == ' ' {
+				continue
+			}
+			ignoreSpace = false
+		}
+		switch char {
+		case '"':
+			if inQuotes {
+				groups = append(groups, buffer.String())
+				buffer.Reset()
+				wasInQuotes = true
+			}
+			inQuotes = !inQuotes
+			expectDot = false
+		case '.':
+			if inQuotes {
+				buffer.WriteRune(char)
+			} else {
+				if !wasInQuotes {
+					if buffer.Len() == 0 {
+						return nil, errors.New("empty table key")
+					}
+					groups = append(groups, buffer.String())
+					buffer.Reset()
+				}
+				ignoreSpace = true
+				expectDot = false
+				wasInQuotes = false
+			}
+		case ' ':
+			if inQuotes {
+				buffer.WriteRune(char)
+			} else {
+				expectDot = true
+			}
+		default:
+			if !inQuotes && !isValidBareChar(char) {
+				return nil, fmt.Errorf("invalid bare character: %c", char)
+			}
+			if !inQuotes && expectDot {
+				return nil, errors.New("what?")
+			}
+			buffer.WriteRune(char)
+			expectDot = false
+		}
+	}
+	if inQuotes {
+		return nil, errors.New("mismatched quotes")
+	}
+	if buffer.Len() > 0 {
+		groups = append(groups, buffer.String())
+	}
+	if len(groups) == 0 {
+		return nil, errors.New("empty key")
+	}
+	return groups, nil
+}
+
+func isValidBareChar(r rune) bool {
+	return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r)
+}
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
new file mode 100644
index 000000000..d11de4285
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -0,0 +1,750 @@
+// TOML lexer.
+//
+// Written using the principles developed by Rob Pike in
+// http://www.youtube.com/watch?v=HxaD_trXwRE
+
+package toml
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var dateRegexp *regexp.Regexp
+
+// Define state functions
+type tomlLexStateFn func() tomlLexStateFn
+
+// Define lexer
+type tomlLexer struct {
+	inputIdx          int
+	input             []rune // Textual source
+	currentTokenStart int
+	currentTokenStop  int
+	tokens            []token
+	depth             int
+	line              int
+	col               int
+	endbufferLine     int
+	endbufferCol      int
+}
+
+// Basic read operations on input
+
+func (l *tomlLexer) read() rune {
+	r := l.peek()
+	if r == '\n' {
+		l.endbufferLine++
+		l.endbufferCol = 1
+	} else {
+		l.endbufferCol++
+	}
+	l.inputIdx++
+	return r
+}
+
+func (l *tomlLexer) next() rune {
+	r := l.read()
+
+	if r != eof {
+		l.currentTokenStop++
+	}
+	return r
+}
+
+func (l *tomlLexer) ignore() {
+	l.currentTokenStart = l.currentTokenStop
+	l.line = l.endbufferLine
+	l.col = l.endbufferCol
+}
+
+func (l *tomlLexer) skip() {
+	l.next()
+	l.ignore()
+}
+
+func (l *tomlLexer) fastForward(n int) {
+	for i := 0; i < n; i++ {
+		l.next()
+	}
+}
+
+func (l *tomlLexer) emitWithValue(t tokenType, value string) {
+	l.tokens = append(l.tokens, token{
+		Position: Position{l.line, l.col},
+		typ:      t,
+		val:      value,
+	})
+	l.ignore()
+}
+
+func (l *tomlLexer) emit(t tokenType) {
+	l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
+}
+
+func (l *tomlLexer) peek() rune {
+	if l.inputIdx >= len(l.input) {
+		return eof
+	}
+	return l.input[l.inputIdx]
+}
+
+func (l *tomlLexer) peekString(size int) string {
+	maxIdx := len(l.input)
+	upperIdx := l.inputIdx + size // FIXME: potential overflow
+	if upperIdx > maxIdx {
+		upperIdx = maxIdx
+	}
+	return string(l.input[l.inputIdx:upperIdx])
+}
+
+func (l *tomlLexer) follow(next string) bool {
+	return next == l.peekString(len(next))
+}
+
+// Error management
+
+func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
+	l.tokens = append(l.tokens, token{
+		Position: Position{l.line, l.col},
+		typ:      tokenError,
+		val:      fmt.Sprintf(format, args...),
+	})
+	return nil
+}
+
+// State functions
+
+func (l *tomlLexer) lexVoid() tomlLexStateFn {
+	for {
+		next := l.peek()
+		switch next {
+		case '[':
+			return l.lexTableKey
+		case '#':
+			return l.lexComment(l.lexVoid)
+		case '=':
+			return l.lexEqual
+		case '\r':
+			fallthrough
+		case '\n':
+			l.skip()
+			continue
+		}
+
+		if isSpace(next) {
+			l.skip()
+		}
+
+		if l.depth > 0 {
+			return l.lexRvalue
+		}
+
+		if isKeyStartChar(next) {
+			return l.lexKey
+		}
+
+		if next == eof {
+			l.next()
+			break
+		}
+	}
+
+	l.emit(tokenEOF)
+	return nil
+}
+
+func (l *tomlLexer) lexRvalue() tomlLexStateFn {
+	for {
+		next := l.peek()
+		switch next {
+		case '.':
+			return l.errorf("cannot start float with a dot")
+		case '=':
+			return l.lexEqual
+		case '[':
+			l.depth++
+			return l.lexLeftBracket
+		case ']':
+			l.depth--
+			return l.lexRightBracket
+		case '{':
+			return l.lexLeftCurlyBrace
+		case '}':
+			return l.lexRightCurlyBrace
+		case '#':
+			return l.lexComment(l.lexRvalue)
+		case '"':
+			return l.lexString
+		case '\'':
+			return l.lexLiteralString
+		case ',':
+			return l.lexComma
+		case '\r':
+			fallthrough
+		case '\n':
+			l.skip()
+			if l.depth == 0 {
+				return l.lexVoid
+			}
+			return l.lexRvalue
+		case '_':
+			return l.errorf("cannot start number with underscore")
+		}
+
+		if l.follow("true") {
+			return l.lexTrue
+		}
+
+		if l.follow("false") {
+			return l.lexFalse
+		}
+
+		if l.follow("inf") {
+			return l.lexInf
+		}
+
+		if l.follow("nan") {
+			return l.lexNan
+		}
+
+		if isSpace(next) {
+			l.skip()
+			continue
+		}
+
+		if next == eof {
+			l.next()
+			break
+		}
+
+		possibleDate := l.peekString(35)
+		dateMatch := dateRegexp.FindString(possibleDate)
+		if dateMatch != "" {
+			l.fastForward(len(dateMatch))
+			return l.lexDate
+		}
+
+		if next == '+' || next == '-' || isDigit(next) {
+			return l.lexNumber
+		}
+
+		if isAlphanumeric(next) {
+			return l.lexKey
+		}
+
+		return l.errorf("no value can start with %c", next)
+	}
+
+	l.emit(tokenEOF)
+	return nil
+}
+
+func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
+	l.next()
+	l.emit(tokenLeftCurlyBrace)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
+	l.next()
+	l.emit(tokenRightCurlyBrace)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexDate() tomlLexStateFn {
+	l.emit(tokenDate)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTrue() tomlLexStateFn {
+	l.fastForward(4)
+	l.emit(tokenTrue)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexFalse() tomlLexStateFn {
+	l.fastForward(5)
+	l.emit(tokenFalse)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexInf() tomlLexStateFn {
+	l.fastForward(3)
+	l.emit(tokenInf)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexNan() tomlLexStateFn {
+	l.fastForward(3)
+	l.emit(tokenNan)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexEqual() tomlLexStateFn {
+	l.next()
+	l.emit(tokenEqual)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexComma() tomlLexStateFn {
+	l.next()
+	l.emit(tokenComma)
+	return l.lexRvalue
+}
+
+// Parse the key and emits its value without escape sequences.
+// bare keys, basic string keys and literal string keys are supported.
+func (l *tomlLexer) lexKey() tomlLexStateFn {
+	growingString := ""
+
+	for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() {
+		if r == '"' {
+			l.next()
+			str, err := l.lexStringAsString(`"`, false, true)
+			if err != nil {
+				return l.errorf(err.Error())
+			}
+			growingString += str
+			l.next()
+			continue
+		} else if r == '\'' {
+			l.next()
+			str, err := l.lexLiteralStringAsString(`'`, false)
+			if err != nil {
+				return l.errorf(err.Error())
+			}
+			growingString += str
+			l.next()
+			continue
+		} else if r == '\n' {
+			return l.errorf("keys cannot contain new lines")
+		} else if isSpace(r) {
+			break
+		} else if !isValidBareChar(r) {
+			return l.errorf("keys cannot contain %c character", r)
+		}
+		growingString += string(r)
+		l.next()
+	}
+	l.emitWithValue(tokenKey, growingString)
+	return l.lexVoid
+}
+
+func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn {
+	return func() tomlLexStateFn {
+		for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
+			if next == '\r' && l.follow("\r\n") {
+				break
+			}
+			l.next()
+		}
+		l.ignore()
+		return previousState
+	}
+}
+
+func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
+	l.next()
+	l.emit(tokenLeftBracket)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) {
+	growingString := ""
+
+	if discardLeadingNewLine {
+		if l.follow("\r\n") {
+			l.skip()
+			l.skip()
+		} else if l.peek() == '\n' {
+			l.skip()
+		}
+	}
+
+	// find end of string
+	for {
+		if l.follow(terminator) {
+			return growingString, nil
+		}
+
+		next := l.peek()
+		if next == eof {
+			break
+		}
+		growingString += string(l.next())
+	}
+
+	return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
+	l.skip()
+
+	// handle special case for triple-quote
+	terminator := "'"
+	discardLeadingNewLine := false
+	if l.follow("''") {
+		l.skip()
+		l.skip()
+		terminator = "'''"
+		discardLeadingNewLine = true
+	}
+
+	str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine)
+	if err != nil {
+		return l.errorf(err.Error())
+	}
+
+	l.emitWithValue(tokenString, str)
+	l.fastForward(len(terminator))
+	l.ignore()
+	return l.lexRvalue
+}
+
+// Lex a string and return the results as a string.
+// Terminator is the substring indicating the end of the token.
+// The resulting string does not include the terminator.
+func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) {
+	growingString := ""
+
+	if discardLeadingNewLine {
+		if l.follow("\r\n") {
+			l.skip()
+			l.skip()
+		} else if l.peek() == '\n' {
+			l.skip()
+		}
+	}
+
+	for {
+		if l.follow(terminator) {
+			return growingString, nil
+		}
+
+		if l.follow("\\") {
+			l.next()
+			switch l.peek() {
+			case '\r':
+				fallthrough
+			case '\n':
+				fallthrough
+			case '\t':
+				fallthrough
+			case ' ':
+				// skip all whitespace chars following backslash
+				for strings.ContainsRune("\r\n\t ", l.peek()) {
+					l.next()
+				}
+			case '"':
+				growingString += "\""
+				l.next()
+			case 'n':
+				growingString += "\n"
+				l.next()
+			case 'b':
+				growingString += "\b"
+				l.next()
+			case 'f':
+				growingString += "\f"
+				l.next()
+			case '/':
+				growingString += "/"
+				l.next()
+			case 't':
+				growingString += "\t"
+				l.next()
+			case 'r':
+				growingString += "\r"
+				l.next()
+			case '\\':
+				growingString += "\\"
+				l.next()
+			case 'u':
+				l.next()
+				code := ""
+				for i := 0; i < 4; i++ {
+					c := l.peek()
+					if !isHexDigit(c) {
+						return "", errors.New("unfinished unicode escape")
+					}
+					l.next()
+					code = code + string(c)
+				}
+				intcode, err := strconv.ParseInt(code, 16, 32)
+				if err != nil {
+					return "", errors.New("invalid unicode escape: \\u" + code)
+				}
+				growingString += string(rune(intcode))
+			case 'U':
+				l.next()
+				code := ""
+				for i := 0; i < 8; i++ {
+					c := l.peek()
+					if !isHexDigit(c) {
+						return "", errors.New("unfinished unicode escape")
+					}
+					l.next()
+					code = code + string(c)
+				}
+				intcode, err := strconv.ParseInt(code, 16, 64)
+				if err != nil {
+					return "", errors.New("invalid unicode escape: \\U" + code)
+				}
+				growingString += string(rune(intcode))
+			default:
+				return "", errors.New("invalid escape sequence: \\" + string(l.peek()))
+			}
+		} else {
+			r := l.peek()
+
+			if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) {
+				return "", fmt.Errorf("unescaped control character %U", r)
+			}
+			l.next()
+			growingString += string(r)
+		}
+
+		if l.peek() == eof {
+			break
+		}
+	}
+
+	return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexString() tomlLexStateFn {
+	l.skip()
+
+	// handle special case for triple-quote
+	terminator := `"`
+	discardLeadingNewLine := false
+	acceptNewLines := false
+	if l.follow(`""`) {
+		l.skip()
+		l.skip()
+		terminator = `"""`
+		discardLeadingNewLine = true
+		acceptNewLines = true
+	}
+
+	str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines)
+
+	if err != nil {
+		return l.errorf(err.Error())
+	}
+
+	l.emitWithValue(tokenString, str)
+	l.fastForward(len(terminator))
+	l.ignore()
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTableKey() tomlLexStateFn {
+	l.next()
+
+	if l.peek() == '[' {
+		// token '[[' signifies an array of tables
+		l.next()
+		l.emit(tokenDoubleLeftBracket)
+		return l.lexInsideTableArrayKey
+	}
+	// vanilla table key
+	l.emit(tokenLeftBracket)
+	return l.lexInsideTableKey
+}
+
+// Parse the key till "]]", but only bare keys are supported
+func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
+	for r := l.peek(); r != eof; r = l.peek() {
+		switch r {
+		case ']':
+			if l.currentTokenStop > l.currentTokenStart {
+				l.emit(tokenKeyGroupArray)
+			}
+			l.next()
+			if l.peek() != ']' {
+				break
+			}
+			l.next()
+			l.emit(tokenDoubleRightBracket)
+			return l.lexVoid
+		case '[':
+			return l.errorf("table array key cannot contain ']'")
+		default:
+			l.next()
+		}
+	}
+	return l.errorf("unclosed table array key")
+}
+
+// Parse the key till "]" but only bare keys are supported
+func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
+	for r := l.peek(); r != eof; r = l.peek() {
+		switch r {
+		case ']':
+			if l.currentTokenStop > l.currentTokenStart {
+				l.emit(tokenKeyGroup)
+			}
+			l.next()
+			l.emit(tokenRightBracket)
+			return l.lexVoid
+		case '[':
+			return l.errorf("table key cannot contain ']'")
+		default:
+			l.next()
+		}
+	}
+	return l.errorf("unclosed table key")
+}
+
+func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
+	l.next()
+	l.emit(tokenRightBracket)
+	return l.lexRvalue
+}
+
+type validRuneFn func(r rune) bool
+
+func isValidHexRune(r rune) bool {
+	return r >= 'a' && r <= 'f' ||
+		r >= 'A' && r <= 'F' ||
+		r >= '0' && r <= '9' ||
+		r == '_'
+}
+
+func isValidOctalRune(r rune) bool {
+	return r >= '0' && r <= '7' || r == '_'
+}
+
+func isValidBinaryRune(r rune) bool {
+	return r == '0' || r == '1' || r == '_'
+}
+
+func (l *tomlLexer) lexNumber() tomlLexStateFn {
+	r := l.peek()
+
+	if r == '0' {
+		follow := l.peekString(2)
+		if len(follow) == 2 {
+			var isValidRune validRuneFn
+			switch follow[1] {
+			case 'x':
+				isValidRune = isValidHexRune
+			case 'o':
+				isValidRune = isValidOctalRune
+			case 'b':
+				isValidRune = isValidBinaryRune
+			default:
+				if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' {
+					return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1]))
+				}
+			}
+
+			if isValidRune != nil {
+				l.next()
+				l.next()
+				digitSeen := false
+				for {
+					next := l.peek()
+					if !isValidRune(next) {
+						break
+					}
+					digitSeen = true
+					l.next()
+				}
+
+				if !digitSeen {
+					return l.errorf("number needs at least one digit")
+				}
+
+				l.emit(tokenInteger)
+
+				return l.lexRvalue
+			}
+		}
+	}
+
+	if r == '+' || r == '-' {
+		l.next()
+		if l.follow("inf") {
+			return l.lexInf
+		}
+		if l.follow("nan") {
+			return l.lexNan
+		}
+	}
+
+	pointSeen := false
+	expSeen := false
+	digitSeen := false
+	for {
+		next := l.peek()
+		if next == '.' {
+			if pointSeen {
+				return l.errorf("cannot have two dots in one float")
+			}
+			l.next()
+			if !isDigit(l.peek()) {
+				return l.errorf("float cannot end with a dot")
+			}
+			pointSeen = true
+		} else if next == 'e' || next == 'E' {
+			expSeen = true
+			l.next()
+			r := l.peek()
+			if r == '+' || r == '-' {
+				l.next()
+			}
+		} else if isDigit(next) {
+			digitSeen = true
+			l.next()
+		} else if next == '_' {
+			l.next()
+		} else {
+			break
+		}
+		if pointSeen && !digitSeen {
+			return l.errorf("cannot start float with a dot")
+		}
+	}
+
+	if !digitSeen {
+		return l.errorf("no digit in that number")
+	}
+	if pointSeen || expSeen {
+		l.emit(tokenFloat)
+	} else {
+		l.emit(tokenInteger)
+	}
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) run() {
+	for state := l.lexVoid; state != nil; {
+		state = state()
+	}
+}
+
+func init() {
+	dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`)
+}
+
+// Entry point
+func lexToml(inputBytes []byte) []token {
+	runes := bytes.Runes(inputBytes)
+	l := &tomlLexer{
+		input:         runes,
+		tokens:        make([]token, 0, 256),
+		line:          1,
+		col:           1,
+		endbufferLine: 1,
+		endbufferCol:  1,
+	}
+	l.run()
+	return l.tokens
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
new file mode 100644
index 000000000..671da5564
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -0,0 +1,609 @@
+package toml
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const tagKeyMultiline = "multiline"
+
+type tomlOpts struct {
+	name      string
+	comment   string
+	commented bool
+	multiline bool
+	include   bool
+	omitempty bool
+}
+
+type encOpts struct {
+	quoteMapKeys            bool
+	arraysOneElementPerLine bool
+}
+
+var encOptsDefaults = encOpts{
+	quoteMapKeys: false,
+}
+
+var timeType = reflect.TypeOf(time.Time{})
+var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+
+// Check if the given marshall type maps to a Tree primitive
+func isPrimitive(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Ptr:
+		return isPrimitive(mtype.Elem())
+	case reflect.Bool:
+		return true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Struct:
+		return mtype == timeType || isCustomMarshaler(mtype)
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a Tree slice
+func isTreeSlice(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Slice:
+		return !isOtherSlice(mtype)
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a non-Tree slice
+func isOtherSlice(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Ptr:
+		return isOtherSlice(mtype.Elem())
+	case reflect.Slice:
+		return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem())
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a Tree
+func isTree(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Map:
+		return true
+	case reflect.Struct:
+		return !isPrimitive(mtype)
+	default:
+		return false
+	}
+}
+
+func isCustomMarshaler(mtype reflect.Type) bool {
+	return mtype.Implements(marshalerType)
+}
+
+func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
+	return mval.Interface().(Marshaler).MarshalTOML()
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid TOML.
+type Marshaler interface {
+	MarshalTOML() ([]byte, error)
+}
+
+/*
+Marshal returns the TOML encoding of v.  Behavior is similar to the Go json
+encoder, except that there is no concept of a Marshaler interface or MarshalTOML
+function for sub-structs, and currently only definite types can be marshaled
+(i.e. no `interface{}`).
+
+The following struct annotations are supported:
+
+  toml:"Field"      Overrides the field's name to output.
+  omitempty         When set, empty values and groups are not emitted.
+  comment:"comment" Emits a # comment on the same line. This supports new lines.
+  commented:"true"  Emits the value as commented.
+
+Note that pointers are automatically assigned the "omitempty" option, as TOML
+explicitly does not handle null values (saying instead the label should be
+dropped).
+
+Tree structural types and corresponding marshal types:
+
+  *Tree                            (*)struct, (*)map[string]interface{}
+  []*Tree                          (*)[](*)struct, (*)[](*)map[string]interface{}
+  []interface{} (as interface{})   (*)[]primitive, (*)[]([]interface{})
+  interface{}                      (*)primitive
+
+Tree primitive types and corresponding marshal types:
+
+  uint64     uint, uint8-uint64, pointers to same
+  int64      int, int8-uint64, pointers to same
+  float64    float32, float64, pointers to same
+  string     string, pointers to same
+  bool       bool, pointers to same
+  time.Time  time.Time{}, pointers to same
+*/
+func Marshal(v interface{}) ([]byte, error) {
+	return NewEncoder(nil).marshal(v)
+}
+
+// Encoder writes TOML values to an output stream.
+type Encoder struct {
+	w io.Writer
+	encOpts
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		w:       w,
+		encOpts: encOptsDefaults,
+	}
+}
+
+// Encode writes the TOML encoding of v to the stream.
+//
+// See the documentation for Marshal for details.
+func (e *Encoder) Encode(v interface{}) error {
+	b, err := e.marshal(v)
+	if err != nil {
+		return err
+	}
+	if _, err := e.w.Write(b); err != nil {
+		return err
+	}
+	return nil
+}
+
+// QuoteMapKeys sets up the encoder to encode
+// maps with string type keys with quoted TOML keys.
+//
+// This relieves the character limitations on map keys.
+func (e *Encoder) QuoteMapKeys(v bool) *Encoder {
+	e.quoteMapKeys = v
+	return e
+}
+
+// ArraysWithOneElementPerLine sets up the encoder to encode arrays
+// with more than one element on multiple lines instead of one.
+//
+// For example:
+//
+//   A = [1,2,3]
+//
+// Becomes
+//
+//   A = [
+//     1,
+//     2,
+//     3,
+//   ]
+func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder {
+	e.arraysOneElementPerLine = v
+	return e
+}
+
+func (e *Encoder) marshal(v interface{}) ([]byte, error) {
+	mtype := reflect.TypeOf(v)
+	if mtype.Kind() != reflect.Struct {
+		return []byte{}, errors.New("Only a struct can be marshaled to TOML")
+	}
+	sval := reflect.ValueOf(v)
+	if isCustomMarshaler(mtype) {
+		return callCustomMarshaler(sval)
+	}
+	t, err := e.valueToTree(mtype, sval)
+	if err != nil {
+		return []byte{}, err
+	}
+
+	var buf bytes.Buffer
+	_, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine)
+
+	return buf.Bytes(), err
+}
+
+// Convert given marshal struct or map value to toml tree
+func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return e.valueToTree(mtype.Elem(), mval.Elem())
+	}
+	tval := newTree()
+	switch mtype.Kind() {
+	case reflect.Struct:
+		for i := 0; i < mtype.NumField(); i++ {
+			mtypef, mvalf := mtype.Field(i), mval.Field(i)
+			opts := tomlOptions(mtypef)
+			if opts.include && (!opts.omitempty || !isZero(mvalf)) {
+				val, err := e.valueToToml(mtypef.Type, mvalf)
+				if err != nil {
+					return nil, err
+				}
+
+				tval.SetWithOptions(opts.name, SetOptions{
+					Comment:   opts.comment,
+					Commented: opts.commented,
+					Multiline: opts.multiline,
+				}, val)
+			}
+		}
+	case reflect.Map:
+		for _, key := range mval.MapKeys() {
+			mvalf := mval.MapIndex(key)
+			val, err := e.valueToToml(mtype.Elem(), mvalf)
+			if err != nil {
+				return nil, err
+			}
+			if e.quoteMapKeys {
+				keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine)
+				if err != nil {
+					return nil, err
+				}
+				tval.SetPath([]string{keyStr}, val)
+			} else {
+				tval.Set(key.String(), val)
+			}
+		}
+	}
+	return tval, nil
+}
+
+// Convert given marshal slice to slice of Toml trees
+func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
+	tval := make([]*Tree, mval.Len(), mval.Len())
+	for i := 0; i < mval.Len(); i++ {
+		val, err := e.valueToTree(mtype.Elem(), mval.Index(i))
+		if err != nil {
+			return nil, err
+		}
+		tval[i] = val
+	}
+	return tval, nil
+}
+
+// Convert given marshal slice to slice of toml values
+func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+	tval := make([]interface{}, mval.Len(), mval.Len())
+	for i := 0; i < mval.Len(); i++ {
+		val, err := e.valueToToml(mtype.Elem(), mval.Index(i))
+		if err != nil {
+			return nil, err
+		}
+		tval[i] = val
+	}
+	return tval, nil
+}
+
+// Convert given marshal value to toml value
+func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return e.valueToToml(mtype.Elem(), mval.Elem())
+	}
+	switch {
+	case isCustomMarshaler(mtype):
+		return callCustomMarshaler(mval)
+	case isTree(mtype):
+		return e.valueToTree(mtype, mval)
+	case isTreeSlice(mtype):
+		return e.valueToTreeSlice(mtype, mval)
+	case isOtherSlice(mtype):
+		return e.valueToOtherSlice(mtype, mval)
+	default:
+		switch mtype.Kind() {
+		case reflect.Bool:
+			return mval.Bool(), nil
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			return mval.Int(), nil
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			return mval.Uint(), nil
+		case reflect.Float32, reflect.Float64:
+			return mval.Float(), nil
+		case reflect.String:
+			return mval.String(), nil
+		case reflect.Struct:
+			return mval.Interface().(time.Time), nil
+		default:
+			return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
+		}
+	}
+}
+
+// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
+// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
+// sub-structs, and only definite types can be unmarshaled.
+func (t *Tree) Unmarshal(v interface{}) error {
+	d := Decoder{tval: t}
+	return d.unmarshal(v)
+}
+
+// Marshal returns the TOML encoding of Tree.
+// See Marshal() documentation for types mapping table.
+func (t *Tree) Marshal() ([]byte, error) {
+	var buf bytes.Buffer
+	err := NewEncoder(&buf).Encode(t)
+	return buf.Bytes(), err
+}
+
+// Unmarshal parses the TOML-encoded data and stores the result in the value
+// pointed to by v. Behavior is similar to the Go json encoder, except that there
+// is no concept of an Unmarshaler interface or UnmarshalTOML function for
+// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
+// `interface{}`).
+//
+// The following struct annotations are supported:
+//
+//   toml:"Field" Overrides the field's name to map to.
+//
+// See Marshal() documentation for types mapping table.
+func Unmarshal(data []byte, v interface{}) error {
+	t, err := LoadReader(bytes.NewReader(data))
+	if err != nil {
+		return err
+	}
+	return t.Unmarshal(v)
+}
+
+// Decoder reads and decodes TOML values from an input stream.
+type Decoder struct {
+	r    io.Reader
+	tval *Tree
+	encOpts
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		r:       r,
+		encOpts: encOptsDefaults,
+	}
+}
+
+// Decode reads a TOML-encoded value from it's input
+// and unmarshals it in the value pointed at by v.
+//
+// See the documentation for Marshal for details.
+func (d *Decoder) Decode(v interface{}) error {
+	var err error
+	d.tval, err = LoadReader(d.r)
+	if err != nil {
+		return err
+	}
+	return d.unmarshal(v)
+}
+
+func (d *Decoder) unmarshal(v interface{}) error {
+	mtype := reflect.TypeOf(v)
+	if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
+		return errors.New("Only a pointer to struct can be unmarshaled from TOML")
+	}
+
+	sval, err := d.valueFromTree(mtype.Elem(), d.tval)
+	if err != nil {
+		return err
+	}
+	reflect.ValueOf(v).Elem().Set(sval)
+	return nil
+}
+
+// Convert toml tree to marshal struct or map, using marshal type
+func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return d.unwrapPointer(mtype, tval)
+	}
+	var mval reflect.Value
+	switch mtype.Kind() {
+	case reflect.Struct:
+		mval = reflect.New(mtype).Elem()
+		for i := 0; i < mtype.NumField(); i++ {
+			mtypef := mtype.Field(i)
+			opts := tomlOptions(mtypef)
+			if opts.include {
+				baseKey := opts.name
+				keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)}
+				for _, key := range keysToTry {
+					exists := tval.Has(key)
+					if !exists {
+						continue
+					}
+					val := tval.Get(key)
+					mvalf, err := d.valueFromToml(mtypef.Type, val)
+					if err != nil {
+						return mval, formatError(err, tval.GetPosition(key))
+					}
+					mval.Field(i).Set(mvalf)
+					break
+				}
+			}
+		}
+	case reflect.Map:
+		mval = reflect.MakeMap(mtype)
+		for _, key := range tval.Keys() {
+			// TODO: path splits key
+			val := tval.GetPath([]string{key})
+			mvalf, err := d.valueFromToml(mtype.Elem(), val)
+			if err != nil {
+				return mval, formatError(err, tval.GetPosition(key))
+			}
+			mval.SetMapIndex(reflect.ValueOf(key), mvalf)
+		}
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal struct/map slice, using marshal type
+func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
+	mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+	for i := 0; i < len(tval); i++ {
+		val, err := d.valueFromTree(mtype.Elem(), tval[i])
+		if err != nil {
+			return mval, err
+		}
+		mval.Index(i).Set(val)
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
+	mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+	for i := 0; i < len(tval); i++ {
+		val, err := d.valueFromToml(mtype.Elem(), tval[i])
+		if err != nil {
+			return mval, err
+		}
+		mval.Index(i).Set(val)
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal value, using marshal type
+func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return d.unwrapPointer(mtype, tval)
+	}
+
+	switch tval.(type) {
+	case *Tree:
+		if isTree(mtype) {
+			return d.valueFromTree(mtype, tval.(*Tree))
+		}
+		return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval)
+	case []*Tree:
+		if isTreeSlice(mtype) {
+			return d.valueFromTreeSlice(mtype, tval.([]*Tree))
+		}
+		return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval)
+	case []interface{}:
+		if isOtherSlice(mtype) {
+			return d.valueFromOtherSlice(mtype, tval.([]interface{}))
+		}
+		return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval)
+	default:
+		switch mtype.Kind() {
+		case reflect.Bool, reflect.Struct:
+			val := reflect.ValueOf(tval)
+			// if this passes for when mtype is reflect.Struct, tval is a time.Time
+			if !val.Type().ConvertibleTo(mtype) {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+			}
+
+			return val.Convert(mtype), nil
+		case reflect.String:
+			val := reflect.ValueOf(tval)
+			// stupidly, int64 is convertible to string. So special case this.
+			if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+			}
+
+			return val.Convert(mtype), nil
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			val := reflect.ValueOf(tval)
+			if !val.Type().ConvertibleTo(mtype) {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+			}
+			if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) {
+				return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+			}
+
+			return val.Convert(mtype), nil
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			val := reflect.ValueOf(tval)
+			if !val.Type().ConvertibleTo(mtype) {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+			}
+			if val.Int() < 0 {
+				return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
+			}
+			if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) {
+				return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+			}
+
+			return val.Convert(mtype), nil
+		case reflect.Float32, reflect.Float64:
+			val := reflect.ValueOf(tval)
+			if !val.Type().ConvertibleTo(mtype) {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
+			}
+			if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) {
+				return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String())
+			}
+
+			return val.Convert(mtype), nil
+		default:
+			return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
+		}
+	}
+}
+
+func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+	val, err := d.valueFromToml(mtype.Elem(), tval)
+	if err != nil {
+		return reflect.ValueOf(nil), err
+	}
+	mval := reflect.New(mtype.Elem())
+	mval.Elem().Set(val)
+	return mval, nil
+}
+
+func tomlOptions(vf reflect.StructField) tomlOpts {
+	tag := vf.Tag.Get("toml")
+	parse := strings.Split(tag, ",")
+	var comment string
+	if c := vf.Tag.Get("comment"); c != "" {
+		comment = c
+	}
+	commented, _ := strconv.ParseBool(vf.Tag.Get("commented"))
+	multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline))
+	result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false}
+	if parse[0] != "" {
+		if parse[0] == "-" && len(parse) == 1 {
+			result.include = false
+		} else {
+			result.name = strings.Trim(parse[0], " ")
+		}
+	}
+	if vf.PkgPath != "" {
+		result.include = false
+	}
+	if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
+		result.omitempty = true
+	}
+	if vf.Type.Kind() == reflect.Ptr {
+		result.omitempty = true
+	}
+	return result
+}
+
+func isZero(val reflect.Value) bool {
+	switch val.Type().Kind() {
+	case reflect.Map:
+		fallthrough
+	case reflect.Array:
+		fallthrough
+	case reflect.Slice:
+		return val.Len() == 0
+	default:
+		return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
+	}
+}
+
+func formatError(err error, pos Position) error {
+	if err.Error()[0] == '(' { // Error already contains position information
+		return err
+	}
+	return fmt.Errorf("%s: %s", pos, err)
+}
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
new file mode 100644
index 000000000..2d27599a9
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -0,0 +1,430 @@
+// TOML Parser.
+
+package toml
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type tomlParser struct {
+	flowIdx       int
+	flow          []token
+	tree          *Tree
+	currentTable  []string
+	seenTableKeys []string
+}
+
+type tomlParserStateFn func() tomlParserStateFn
+
+// Formats and panics an error message based on a token
+func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) {
+	panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
+}
+
+func (p *tomlParser) run() {
+	for state := p.parseStart; state != nil; {
+		state = state()
+	}
+}
+
+func (p *tomlParser) peek() *token {
+	if p.flowIdx >= len(p.flow) {
+		return nil
+	}
+	return &p.flow[p.flowIdx]
+}
+
+func (p *tomlParser) assume(typ tokenType) {
+	tok := p.getToken()
+	if tok == nil {
+		p.raiseError(tok, "was expecting token %s, but token stream is empty", tok)
+	}
+	if tok.typ != typ {
+		p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok)
+	}
+}
+
+func (p *tomlParser) getToken() *token {
+	tok := p.peek()
+	if tok == nil {
+		return nil
+	}
+	p.flowIdx++
+	return tok
+}
+
+func (p *tomlParser) parseStart() tomlParserStateFn {
+	tok := p.peek()
+
+	// end of stream, parsing is finished
+	if tok == nil {
+		return nil
+	}
+
+	switch tok.typ {
+	case tokenDoubleLeftBracket:
+		return p.parseGroupArray
+	case tokenLeftBracket:
+		return p.parseGroup
+	case tokenKey:
+		return p.parseAssign
+	case tokenEOF:
+		return nil
+	default:
+		p.raiseError(tok, "unexpected token")
+	}
+	return nil
+}
+
+func (p *tomlParser) parseGroupArray() tomlParserStateFn {
+	startToken := p.getToken() // discard the [[
+	key := p.getToken()
+	if key.typ != tokenKeyGroupArray {
+		p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
+	}
+
+	// get or create table array element at the indicated part in the path
+	keys, err := parseKey(key.val)
+	if err != nil {
+		p.raiseError(key, "invalid table array key: %s", err)
+	}
+	p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
+	destTree := p.tree.GetPath(keys)
+	var array []*Tree
+	if destTree == nil {
+		array = make([]*Tree, 0)
+	} else if target, ok := destTree.([]*Tree); ok && target != nil {
+		array = destTree.([]*Tree)
+	} else {
+		p.raiseError(key, "key %s is already assigned and not of type table array", key)
+	}
+	p.currentTable = keys
+
+	// add a new tree to the end of the table array
+	newTree := newTree()
+	newTree.position = startToken.Position
+	array = append(array, newTree)
+	p.tree.SetPath(p.currentTable, array)
+
+	// remove all keys that were children of this table array
+	prefix := key.val + "."
+	found := false
+	for ii := 0; ii < len(p.seenTableKeys); {
+		tableKey := p.seenTableKeys[ii]
+		if strings.HasPrefix(tableKey, prefix) {
+			p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
+		} else {
+			found = (tableKey == key.val)
+			ii++
+		}
+	}
+
+	// keep this key name from use by other kinds of assignments
+	if !found {
+		p.seenTableKeys = append(p.seenTableKeys, key.val)
+	}
+
+	// move to next parser state
+	p.assume(tokenDoubleRightBracket)
+	return p.parseStart
+}
+
+func (p *tomlParser) parseGroup() tomlParserStateFn {
+	startToken := p.getToken() // discard the [
+	key := p.getToken()
+	if key.typ != tokenKeyGroup {
+		p.raiseError(key, "unexpected token %s, was expecting a table key", key)
+	}
+	for _, item := range p.seenTableKeys {
+		if item == key.val {
+			p.raiseError(key, "duplicated tables")
+		}
+	}
+
+	p.seenTableKeys = append(p.seenTableKeys, key.val)
+	keys, err := parseKey(key.val)
+	if err != nil {
+		p.raiseError(key, "invalid table array key: %s", err)
+	}
+	if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
+		p.raiseError(key, "%s", err)
+	}
+	p.assume(tokenRightBracket)
+	p.currentTable = keys
+	return p.parseStart
+}
+
+func (p *tomlParser) parseAssign() tomlParserStateFn {
+	key := p.getToken()
+	p.assume(tokenEqual)
+
+	value := p.parseRvalue()
+	var tableKey []string
+	if len(p.currentTable) > 0 {
+		tableKey = p.currentTable
+	} else {
+		tableKey = []string{}
+	}
+
+	// find the table to assign, looking out for arrays of tables
+	var targetNode *Tree
+	switch node := p.tree.GetPath(tableKey).(type) {
+	case []*Tree:
+		targetNode = node[len(node)-1]
+	case *Tree:
+		targetNode = node
+	default:
+		p.raiseError(key, "Unknown table type for path: %s",
+			strings.Join(tableKey, "."))
+	}
+
+	// assign value to the found table
+	keyVals := []string{key.val}
+	if len(keyVals) != 1 {
+		p.raiseError(key, "Invalid key")
+	}
+	keyVal := keyVals[0]
+	localKey := []string{keyVal}
+	finalKey := append(tableKey, keyVal)
+	if targetNode.GetPath(localKey) != nil {
+		p.raiseError(key, "The following key was defined twice: %s",
+			strings.Join(finalKey, "."))
+	}
+	var toInsert interface{}
+
+	switch value.(type) {
+	case *Tree, []*Tree:
+		toInsert = value
+	default:
+		toInsert = &tomlValue{value: value, position: key.Position}
+	}
+	targetNode.values[keyVal] = toInsert
+	return p.parseStart
+}
+
+var numberUnderscoreInvalidRegexp *regexp.Regexp
+var hexNumberUnderscoreInvalidRegexp *regexp.Regexp
+
+func numberContainsInvalidUnderscore(value string) error {
+	if numberUnderscoreInvalidRegexp.MatchString(value) {
+		return errors.New("invalid use of _ in number")
+	}
+	return nil
+}
+
+func hexNumberContainsInvalidUnderscore(value string) error {
+	if hexNumberUnderscoreInvalidRegexp.MatchString(value) {
+		return errors.New("invalid use of _ in hex number")
+	}
+	return nil
+}
+
+func cleanupNumberToken(value string) string {
+	cleanedVal := strings.Replace(value, "_", "", -1)
+	return cleanedVal
+}
+
+func (p *tomlParser) parseRvalue() interface{} {
+	tok := p.getToken()
+	if tok == nil || tok.typ == tokenEOF {
+		p.raiseError(tok, "expecting a value")
+	}
+
+	switch tok.typ {
+	case tokenString:
+		return tok.val
+	case tokenTrue:
+		return true
+	case tokenFalse:
+		return false
+	case tokenInf:
+		if tok.val[0] == '-' {
+			return math.Inf(-1)
+		}
+		return math.Inf(1)
+	case tokenNan:
+		return math.NaN()
+	case tokenInteger:
+		cleanedVal := cleanupNumberToken(tok.val)
+		var err error
+		var val int64
+		if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
+			switch cleanedVal[1] {
+			case 'x':
+				err = hexNumberContainsInvalidUnderscore(tok.val)
+				if err != nil {
+					p.raiseError(tok, "%s", err)
+				}
+				val, err = strconv.ParseInt(cleanedVal[2:], 16, 64)
+			case 'o':
+				err = numberContainsInvalidUnderscore(tok.val)
+				if err != nil {
+					p.raiseError(tok, "%s", err)
+				}
+				val, err = strconv.ParseInt(cleanedVal[2:], 8, 64)
+			case 'b':
+				err = numberContainsInvalidUnderscore(tok.val)
+				if err != nil {
+					p.raiseError(tok, "%s", err)
+				}
+				val, err = strconv.ParseInt(cleanedVal[2:], 2, 64)
+			default:
+				panic("invalid base") // the lexer should catch this first
+			}
+		} else {
+			err = numberContainsInvalidUnderscore(tok.val)
+			if err != nil {
+				p.raiseError(tok, "%s", err)
+			}
+			val, err = strconv.ParseInt(cleanedVal, 10, 64)
+		}
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		return val
+	case tokenFloat:
+		err := numberContainsInvalidUnderscore(tok.val)
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		cleanedVal := cleanupNumberToken(tok.val)
+		val, err := strconv.ParseFloat(cleanedVal, 64)
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		return val
+	case tokenDate:
+		val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC)
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		return val
+	case tokenLeftBracket:
+		return p.parseArray()
+	case tokenLeftCurlyBrace:
+		return p.parseInlineTable()
+	case tokenEqual:
+		p.raiseError(tok, "cannot have multiple equals for the same key")
+	case tokenError:
+		p.raiseError(tok, "%s", tok)
+	}
+
+	p.raiseError(tok, "never reached")
+
+	return nil
+}
+
+func tokenIsComma(t *token) bool {
+	return t != nil && t.typ == tokenComma
+}
+
+func (p *tomlParser) parseInlineTable() *Tree {
+	tree := newTree()
+	var previous *token
+Loop:
+	for {
+		follow := p.peek()
+		if follow == nil || follow.typ == tokenEOF {
+			p.raiseError(follow, "unterminated inline table")
+		}
+		switch follow.typ {
+		case tokenRightCurlyBrace:
+			p.getToken()
+			break Loop
+		case tokenKey:
+			if !tokenIsComma(previous) && previous != nil {
+				p.raiseError(follow, "comma expected between fields in inline table")
+			}
+			key := p.getToken()
+			p.assume(tokenEqual)
+			value := p.parseRvalue()
+			tree.Set(key.val, value)
+		case tokenComma:
+			if previous == nil {
+				p.raiseError(follow, "inline table cannot start with a comma")
+			}
+			if tokenIsComma(previous) {
+				p.raiseError(follow, "need field between two commas in inline table")
+			}
+			p.getToken()
+		default:
+			p.raiseError(follow, "unexpected token type in inline table: %s", follow.String())
+		}
+		previous = follow
+	}
+	if tokenIsComma(previous) {
+		p.raiseError(previous, "trailing comma at the end of inline table")
+	}
+	return tree
+}
+
+func (p *tomlParser) parseArray() interface{} {
+	var array []interface{}
+	arrayType := reflect.TypeOf(nil)
+	for {
+		follow := p.peek()
+		if follow == nil || follow.typ == tokenEOF {
+			p.raiseError(follow, "unterminated array")
+		}
+		if follow.typ == tokenRightBracket {
+			p.getToken()
+			break
+		}
+		val := p.parseRvalue()
+		if arrayType == nil {
+			arrayType = reflect.TypeOf(val)
+		}
+		if reflect.TypeOf(val) != arrayType {
+			p.raiseError(follow, "mixed types in array")
+		}
+		array = append(array, val)
+		follow = p.peek()
+		if follow == nil || follow.typ == tokenEOF {
+			p.raiseError(follow, "unterminated array")
+		}
+		if follow.typ != tokenRightBracket && follow.typ != tokenComma {
+			p.raiseError(follow, "missing comma")
+		}
+		if follow.typ == tokenComma {
+			p.getToken()
+		}
+	}
+	// An array of Trees is actually an array of inline
+	// tables, which is a shorthand for a table array. If the
+	// array was not converted from []interface{} to []*Tree,
+	// the two notations would not be equivalent.
+	if arrayType == reflect.TypeOf(newTree()) {
+		tomlArray := make([]*Tree, len(array))
+		for i, v := range array {
+			tomlArray[i] = v.(*Tree)
+		}
+		return tomlArray
+	}
+	return array
+}
+
+func parseToml(flow []token) *Tree {
+	result := newTree()
+	result.position = Position{1, 1}
+	parser := &tomlParser{
+		flowIdx:       0,
+		flow:          flow,
+		tree:          result,
+		currentTable:  make([]string, 0),
+		seenTableKeys: make([]string, 0),
+	}
+	parser.run()
+	return result
+}
+
+func init() {
+	numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`)
+	hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`)
+}
diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go
new file mode 100644
index 000000000..c17bff87b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/position.go
@@ -0,0 +1,29 @@
+// Position support for go-toml
+
+package toml
+
+import (
+	"fmt"
+)
+
+// Position of a document element within a TOML document.
+//
+// Line and Col are both 1-indexed positions for the element's line number and
+// column number, respectively.  Values of zero or less will cause Invalid(),
+// to return true.
+type Position struct {
+	Line int // line within the document
+	Col  int // column within the line
+}
+
+// String representation of the position.
+// Displays 1-indexed line and column numbers.
+func (p Position) String() string {
+	return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
+}
+
+// Invalid returns whether or not the position is valid (i.e. with negative or
+// null values)
+func (p Position) Invalid() bool {
+	return p.Line <= 0 || p.Col <= 0
+}
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
new file mode 100644
index 000000000..1a9081346
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -0,0 +1,144 @@
+package toml
+
+import (
+	"fmt"
+	"strconv"
+	"unicode"
+)
+
+// Define tokens
+type tokenType int
+
+const (
+	eof = -(iota + 1)
+)
+
+const (
+	tokenError tokenType = iota
+	tokenEOF
+	tokenComment
+	tokenKey
+	tokenString
+	tokenInteger
+	tokenTrue
+	tokenFalse
+	tokenFloat
+	tokenInf
+	tokenNan
+	tokenEqual
+	tokenLeftBracket
+	tokenRightBracket
+	tokenLeftCurlyBrace
+	tokenRightCurlyBrace
+	tokenLeftParen
+	tokenRightParen
+	tokenDoubleLeftBracket
+	tokenDoubleRightBracket
+	tokenDate
+	tokenKeyGroup
+	tokenKeyGroupArray
+	tokenComma
+	tokenColon
+	tokenDollar
+	tokenStar
+	tokenQuestion
+	tokenDot
+	tokenDotDot
+	tokenEOL
+)
+
+var tokenTypeNames = []string{
+	"Error",
+	"EOF",
+	"Comment",
+	"Key",
+	"String",
+	"Integer",
+	"True",
+	"False",
+	"Float",
+	"Inf",
+	"NaN",
+	"=",
+	"[",
+	"]",
+	"{",
+	"}",
+	"(",
+	")",
+	"]]",
+	"[[",
+	"Date",
+	"KeyGroup",
+	"KeyGroupArray",
+	",",
+	":",
+	"$",
+	"*",
+	"?",
+	".",
+	"..",
+	"EOL",
+}
+
+type token struct {
+	Position
+	typ tokenType
+	val string
+}
+
+func (tt tokenType) String() string {
+	idx := int(tt)
+	if idx < len(tokenTypeNames) {
+		return tokenTypeNames[idx]
+	}
+	return "Unknown"
+}
+
+func (t token) Int() int {
+	if result, err := strconv.Atoi(t.val); err != nil {
+		panic(err)
+	} else {
+		return result
+	}
+}
+
+func (t token) String() string {
+	switch t.typ {
+	case tokenEOF:
+		return "EOF"
+	case tokenError:
+		return t.val
+	}
+
+	return fmt.Sprintf("%q", t.val)
+}
+
+func isSpace(r rune) bool {
+	return r == ' ' || r == '\t'
+}
+
+func isAlphanumeric(r rune) bool {
+	return unicode.IsLetter(r) || r == '_'
+}
+
+func isKeyChar(r rune) bool {
+	// Keys start with the first character that isn't whitespace or [ and end
+	// with the last non-whitespace character before the equals sign. Keys
+	// cannot contain a # character."
+	return !(r == '\r' || r == '\n' || r == eof || r == '=')
+}
+
+func isKeyStartChar(r rune) bool {
+	return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
+}
+
+func isDigit(r rune) bool {
+	return unicode.IsNumber(r)
+}
+
+func isHexDigit(r rune) bool {
+	return isDigit(r) ||
+		(r >= 'a' && r <= 'f') ||
+		(r >= 'A' && r <= 'F')
+}
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
new file mode 100644
index 000000000..98c185ad0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -0,0 +1,367 @@
+package toml
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"runtime"
+	"strings"
+)
+
+type tomlValue struct {
+	value     interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
+	comment   string
+	commented bool
+	multiline bool
+	position  Position
+}
+
+// Tree is the result of the parsing of a TOML file.
+type Tree struct {
+	values    map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
+	comment   string
+	commented bool
+	position  Position
+}
+
+func newTree() *Tree {
+	return &Tree{
+		values:   make(map[string]interface{}),
+		position: Position{},
+	}
+}
+
+// TreeFromMap initializes a new Tree object using the given map.
+func TreeFromMap(m map[string]interface{}) (*Tree, error) {
+	result, err := toTree(m)
+	if err != nil {
+		return nil, err
+	}
+	return result.(*Tree), nil
+}
+
+// Position returns the position of the tree.
+func (t *Tree) Position() Position {
+	return t.position
+}
+
+// Has returns a boolean indicating if the given key exists.
+func (t *Tree) Has(key string) bool {
+	if key == "" {
+		return false
+	}
+	return t.HasPath(strings.Split(key, "."))
+}
+
+// HasPath returns true if the given path of keys exists, false otherwise.
+func (t *Tree) HasPath(keys []string) bool {
+	return t.GetPath(keys) != nil
+}
+
+// Keys returns the keys of the toplevel tree (does not recurse).
+func (t *Tree) Keys() []string {
+	keys := make([]string, len(t.values))
+	i := 0
+	for k := range t.values {
+		keys[i] = k
+		i++
+	}
+	return keys
+}
+
+// Get the value at key in the Tree.
+// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings.
+// If you need to retrieve non-bare keys, use GetPath.
+// Returns nil if the path does not exist in the tree.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) Get(key string) interface{} {
+	if key == "" {
+		return t
+	}
+	return t.GetPath(strings.Split(key, "."))
+}
+
+// GetPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPath(keys []string) interface{} {
+	if len(keys) == 0 {
+		return t
+	}
+	subtree := t
+	for _, intermediateKey := range keys[:len(keys)-1] {
+		value, exists := subtree.values[intermediateKey]
+		if !exists {
+			return nil
+		}
+		switch node := value.(type) {
+		case *Tree:
+			subtree = node
+		case []*Tree:
+			// go to most recent element
+			if len(node) == 0 {
+				return nil
+			}
+			subtree = node[len(node)-1]
+		default:
+			return nil // cannot navigate through other node types
+		}
+	}
+	// branch based on final node type
+	switch node := subtree.values[keys[len(keys)-1]].(type) {
+	case *tomlValue:
+		return node.value
+	default:
+		return node
+	}
+}
+
+// GetPosition returns the position of the given key.
+func (t *Tree) GetPosition(key string) Position {
+	if key == "" {
+		return t.position
+	}
+	return t.GetPositionPath(strings.Split(key, "."))
+}
+
+// GetPositionPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPositionPath(keys []string) Position {
+	if len(keys) == 0 {
+		return t.position
+	}
+	subtree := t
+	for _, intermediateKey := range keys[:len(keys)-1] {
+		value, exists := subtree.values[intermediateKey]
+		if !exists {
+			return Position{0, 0}
+		}
+		switch node := value.(type) {
+		case *Tree:
+			subtree = node
+		case []*Tree:
+			// go to most recent element
+			if len(node) == 0 {
+				return Position{0, 0}
+			}
+			subtree = node[len(node)-1]
+		default:
+			return Position{0, 0}
+		}
+	}
+	// branch based on final node type
+	switch node := subtree.values[keys[len(keys)-1]].(type) {
+	case *tomlValue:
+		return node.position
+	case *Tree:
+		return node.position
+	case []*Tree:
+		// go to most recent element
+		if len(node) == 0 {
+			return Position{0, 0}
+		}
+		return node[len(node)-1].position
+	default:
+		return Position{0, 0}
+	}
+}
+
+// GetDefault works like Get but with a default value
+func (t *Tree) GetDefault(key string, def interface{}) interface{} {
+	val := t.Get(key)
+	if val == nil {
+		return def
+	}
+	return val
+}
+
+// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour.
+// The default values within the struct are valid default options.
+type SetOptions struct {
+	Comment   string
+	Commented bool
+	Multiline bool
+}
+
+// SetWithOptions is the same as Set, but allows you to provide formatting
+// instructions to the key, that will be used by Marshal().
+func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) {
+	t.SetPathWithOptions(strings.Split(key, "."), opts, value)
+}
+
+// SetPathWithOptions is the same as SetPath, but allows you to provide
+// formatting instructions to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) {
+	subtree := t
+	for _, intermediateKey := range keys[:len(keys)-1] {
+		nextTree, exists := subtree.values[intermediateKey]
+		if !exists {
+			nextTree = newTree()
+			subtree.values[intermediateKey] = nextTree // add new element here
+		}
+		switch node := nextTree.(type) {
+		case *Tree:
+			subtree = node
+		case []*Tree:
+			// go to most recent element
+			if len(node) == 0 {
+				// create element if it does not exist
+				subtree.values[intermediateKey] = append(node, newTree())
+			}
+			subtree = node[len(node)-1]
+		}
+	}
+
+	var toInsert interface{}
+
+	switch value.(type) {
+	case *Tree:
+		tt := value.(*Tree)
+		tt.comment = opts.Comment
+		toInsert = value
+	case []*Tree:
+		toInsert = value
+	case *tomlValue:
+		tt := value.(*tomlValue)
+		tt.comment = opts.Comment
+		toInsert = tt
+	default:
+		toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline}
+	}
+
+	subtree.values[keys[len(keys)-1]] = toInsert
+}
+
+// Set an element in the tree.
+// Key is a dot-separated path (e.g. a.b.c).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) Set(key string, value interface{}) {
+	t.SetWithComment(key, "", false, value)
+}
+
+// SetWithComment is the same as Set, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) {
+	t.SetPathWithComment(strings.Split(key, "."), comment, commented, value)
+}
+
+// SetPath sets an element in the tree.
+// Keys is an array of path elements (e.g. {"a","b","c"}).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) SetPath(keys []string, value interface{}) {
+	t.SetPathWithComment(keys, "", false, value)
+}
+
+// SetPathWithComment is the same as SetPath, but allows you to provide comment
+// information to the key, that will be reused by Marshal().
+func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) {
+	subtree := t
+	for _, intermediateKey := range keys[:len(keys)-1] {
+		nextTree, exists := subtree.values[intermediateKey]
+		if !exists {
+			nextTree = newTree()
+			subtree.values[intermediateKey] = nextTree // add new element here
+		}
+		switch node := nextTree.(type) {
+		case *Tree:
+			subtree = node
+		case []*Tree:
+			// go to most recent element
+			if len(node) == 0 {
+				// create element if it does not exist
+				subtree.values[intermediateKey] = append(node, newTree())
+			}
+			subtree = node[len(node)-1]
+		}
+	}
+
+	var toInsert interface{}
+
+	switch value.(type) {
+	case *Tree:
+		tt := value.(*Tree)
+		tt.comment = comment
+		toInsert = value
+	case []*Tree:
+		toInsert = value
+	case *tomlValue:
+		tt := value.(*tomlValue)
+		tt.comment = comment
+		toInsert = tt
+	default:
+		toInsert = &tomlValue{value: value, comment: comment, commented: commented}
+	}
+
+	subtree.values[keys[len(keys)-1]] = toInsert
+}
+
+// createSubTree takes a tree and a key and create the necessary intermediate
+// subtrees to create a subtree at that point. In-place.
+//
+// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b]
+// and tree[a][b][c]
+//
+// Returns nil on success, error object on failure
+func (t *Tree) createSubTree(keys []string, pos Position) error {
+	subtree := t
+	for _, intermediateKey := range keys {
+		nextTree, exists := subtree.values[intermediateKey]
+		if !exists {
+			tree := newTree()
+			tree.position = pos
+			subtree.values[intermediateKey] = tree
+			nextTree = tree
+		}
+
+		switch node := nextTree.(type) {
+		case []*Tree:
+			subtree = node[len(node)-1]
+		case *Tree:
+			subtree = node
+		default:
+			return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
+				strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
+		}
+	}
+	return nil
+}
+
+// LoadBytes creates a Tree from a []byte.
+func LoadBytes(b []byte) (tree *Tree, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			err = errors.New(r.(string))
+		}
+	}()
+	tree = parseToml(lexToml(b))
+	return
+}
+
+// LoadReader creates a Tree from any io.Reader.
+func LoadReader(reader io.Reader) (tree *Tree, err error) {
+	inputBytes, err := ioutil.ReadAll(reader)
+	if err != nil {
+		return
+	}
+	tree, err = LoadBytes(inputBytes)
+	return
+}
+
+// Load creates a Tree from a string.
+func Load(content string) (tree *Tree, err error) {
+	return LoadBytes([]byte(content))
+}
+
+// LoadFile creates a Tree from a file.
+func LoadFile(path string) (tree *Tree, err error) {
+	file, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	return LoadReader(file)
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
new file mode 100644
index 000000000..79610e9b3
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -0,0 +1,142 @@
+package toml
+
+import (
+	"fmt"
+	"reflect"
+	"time"
+)
+
+var kindToType = [reflect.String + 1]reflect.Type{
+	reflect.Bool:    reflect.TypeOf(true),
+	reflect.String:  reflect.TypeOf(""),
+	reflect.Float32: reflect.TypeOf(float64(1)),
+	reflect.Float64: reflect.TypeOf(float64(1)),
+	reflect.Int:     reflect.TypeOf(int64(1)),
+	reflect.Int8:    reflect.TypeOf(int64(1)),
+	reflect.Int16:   reflect.TypeOf(int64(1)),
+	reflect.Int32:   reflect.TypeOf(int64(1)),
+	reflect.Int64:   reflect.TypeOf(int64(1)),
+	reflect.Uint:    reflect.TypeOf(uint64(1)),
+	reflect.Uint8:   reflect.TypeOf(uint64(1)),
+	reflect.Uint16:  reflect.TypeOf(uint64(1)),
+	reflect.Uint32:  reflect.TypeOf(uint64(1)),
+	reflect.Uint64:  reflect.TypeOf(uint64(1)),
+}
+
+// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
+// supported values:
+// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
+func typeFor(k reflect.Kind) reflect.Type {
+	if k > 0 && int(k) < len(kindToType) {
+		return kindToType[k]
+	}
+	return nil
+}
+
+func simpleValueCoercion(object interface{}) (interface{}, error) {
+	switch original := object.(type) {
+	case string, bool, int64, uint64, float64, time.Time:
+		return original, nil
+	case int:
+		return int64(original), nil
+	case int8:
+		return int64(original), nil
+	case int16:
+		return int64(original), nil
+	case int32:
+		return int64(original), nil
+	case uint:
+		return uint64(original), nil
+	case uint8:
+		return uint64(original), nil
+	case uint16:
+		return uint64(original), nil
+	case uint32:
+		return uint64(original), nil
+	case float32:
+		return float64(original), nil
+	case fmt.Stringer:
+		return original.String(), nil
+	default:
+		return nil, fmt.Errorf("cannot convert type %T to Tree", object)
+	}
+}
+
+func sliceToTree(object interface{}) (interface{}, error) {
+	// arrays are a bit tricky, since they can represent either a
+	// collection of simple values, which is represented by one
+	// *tomlValue, or an array of tables, which is represented by an
+	// array of *Tree.
+
+	// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
+	value := reflect.ValueOf(object)
+	insideType := value.Type().Elem()
+	length := value.Len()
+	if length > 0 {
+		insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
+	}
+	if insideType.Kind() == reflect.Map {
+		// this is considered as an array of tables
+		tablesArray := make([]*Tree, 0, length)
+		for i := 0; i < length; i++ {
+			table := value.Index(i)
+			tree, err := toTree(table.Interface())
+			if err != nil {
+				return nil, err
+			}
+			tablesArray = append(tablesArray, tree.(*Tree))
+		}
+		return tablesArray, nil
+	}
+
+	sliceType := typeFor(insideType.Kind())
+	if sliceType == nil {
+		sliceType = insideType
+	}
+
+	arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
+
+	for i := 0; i < length; i++ {
+		val := value.Index(i).Interface()
+		simpleValue, err := simpleValueCoercion(val)
+		if err != nil {
+			return nil, err
+		}
+		arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+	}
+	return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
+}
+
+func toTree(object interface{}) (interface{}, error) {
+	value := reflect.ValueOf(object)
+
+	if value.Kind() == reflect.Map {
+		values := map[string]interface{}{}
+		keys := value.MapKeys()
+		for _, key := range keys {
+			if key.Kind() != reflect.String {
+				if _, ok := key.Interface().(string); !ok {
+					return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
+				}
+			}
+
+			v := value.MapIndex(key)
+			newValue, err := toTree(v.Interface())
+			if err != nil {
+				return nil, err
+			}
+			values[key.String()] = newValue
+		}
+		return &Tree{values: values, position: Position{}}, nil
+	}
+
+	if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
+		return sliceToTree(object)
+	}
+
+	simpleValue, err := simpleValueCoercion(object)
+	if err != nil {
+		return nil, err
+	}
+	return &tomlValue{value: simpleValue, position: Position{}}, nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
new file mode 100644
index 000000000..e4049e29f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -0,0 +1,333 @@
+package toml
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Encodes a string to a TOML-compliant multi-line string value
+// This function is a clone of the existing encodeTomlString function, except that whitespace characters
+// are preserved. Quotation marks and backslashes are also not escaped.
+func encodeMultilineTomlString(value string) string {
+	var b bytes.Buffer
+
+	for _, rr := range value {
+		switch rr {
+		case '\b':
+			b.WriteString(`\b`)
+		case '\t':
+			b.WriteString("\t")
+		case '\n':
+			b.WriteString("\n")
+		case '\f':
+			b.WriteString(`\f`)
+		case '\r':
+			b.WriteString("\r")
+		case '"':
+			b.WriteString(`"`)
+		case '\\':
+			b.WriteString(`\`)
+		default:
+			intRr := uint16(rr)
+			if intRr < 0x001F {
+				b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+			} else {
+				b.WriteRune(rr)
+			}
+		}
+	}
+	return b.String()
+}
+
+// Encodes a string to a TOML-compliant string value
+func encodeTomlString(value string) string {
+	var b bytes.Buffer
+
+	for _, rr := range value {
+		switch rr {
+		case '\b':
+			b.WriteString(`\b`)
+		case '\t':
+			b.WriteString(`\t`)
+		case '\n':
+			b.WriteString(`\n`)
+		case '\f':
+			b.WriteString(`\f`)
+		case '\r':
+			b.WriteString(`\r`)
+		case '"':
+			b.WriteString(`\"`)
+		case '\\':
+			b.WriteString(`\\`)
+		default:
+			intRr := uint16(rr)
+			if intRr < 0x001F {
+				b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+			} else {
+				b.WriteRune(rr)
+			}
+		}
+	}
+	return b.String()
+}
+
+func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) {
+	// this interface check is added to dereference the change made in the writeTo function.
+	// That change was made to allow this function to see formatting options.
+	tv, ok := v.(*tomlValue)
+	if ok {
+		v = tv.value
+	} else {
+		tv = &tomlValue{}
+	}
+
+	switch value := v.(type) {
+	case uint64:
+		return strconv.FormatUint(value, 10), nil
+	case int64:
+		return strconv.FormatInt(value, 10), nil
+	case float64:
+		// Ensure a round float does contain a decimal point. Otherwise feeding
+		// the output back to the parser would convert to an integer.
+		if math.Trunc(value) == value {
+			return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil
+		}
+		return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil
+	case string:
+		if tv.multiline {
+			return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil
+		}
+		return "\"" + encodeTomlString(value) + "\"", nil
+	case []byte:
+		b, _ := v.([]byte)
+		return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine)
+	case bool:
+		if value {
+			return "true", nil
+		}
+		return "false", nil
+	case time.Time:
+		return value.Format(time.RFC3339), nil
+	case nil:
+		return "", nil
+	}
+
+	rv := reflect.ValueOf(v)
+
+	if rv.Kind() == reflect.Slice {
+		var values []string
+		for i := 0; i < rv.Len(); i++ {
+			item := rv.Index(i).Interface()
+			itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine)
+			if err != nil {
+				return "", err
+			}
+			values = append(values, itemRepr)
+		}
+		if arraysOneElementPerLine && len(values) > 1 {
+			stringBuffer := bytes.Buffer{}
+			valueIndent := indent + `  ` // TODO: move that to a shared encoder state
+
+			stringBuffer.WriteString("[\n")
+
+			for _, value := range values {
+				stringBuffer.WriteString(valueIndent)
+				stringBuffer.WriteString(value)
+				stringBuffer.WriteString(`,`)
+				stringBuffer.WriteString("\n")
+			}
+
+			stringBuffer.WriteString(indent + "]")
+
+			return stringBuffer.String(), nil
+		}
+		return "[" + strings.Join(values, ",") + "]", nil
+	}
+	return "", fmt.Errorf("unsupported value type %T: %v", v, v)
+}
+
+func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
+	simpleValuesKeys := make([]string, 0)
+	complexValuesKeys := make([]string, 0)
+
+	for k := range t.values {
+		v := t.values[k]
+		switch v.(type) {
+		case *Tree, []*Tree:
+			complexValuesKeys = append(complexValuesKeys, k)
+		default:
+			simpleValuesKeys = append(simpleValuesKeys, k)
+		}
+	}
+
+	sort.Strings(simpleValuesKeys)
+	sort.Strings(complexValuesKeys)
+
+	for _, k := range simpleValuesKeys {
+		v, ok := t.values[k].(*tomlValue)
+		if !ok {
+			return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+		}
+
+		repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine)
+		if err != nil {
+			return bytesCount, err
+		}
+
+		if v.comment != "" {
+			comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
+			start := "# "
+			if strings.HasPrefix(comment, "#") {
+				start = ""
+			}
+			writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n")
+			bytesCount += int64(writtenBytesCountComment)
+			if errc != nil {
+				return bytesCount, errc
+			}
+		}
+
+		var commented string
+		if v.commented {
+			commented = "# "
+		}
+		writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n")
+		bytesCount += int64(writtenBytesCount)
+		if err != nil {
+			return bytesCount, err
+		}
+	}
+
+	for _, k := range complexValuesKeys {
+		v := t.values[k]
+
+		combinedKey := k
+		if keyspace != "" {
+			combinedKey = keyspace + "." + combinedKey
+		}
+		var commented string
+		if t.commented {
+			commented = "# "
+		}
+
+		switch node := v.(type) {
+		// node has to be of those two types given how keys are sorted above
+		case *Tree:
+			tv, ok := t.values[k].(*Tree)
+			if !ok {
+				return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+			}
+			if tv.comment != "" {
+				comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
+				start := "# "
+				if strings.HasPrefix(comment, "#") {
+					start = ""
+				}
+				writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
+				bytesCount += int64(writtenBytesCountComment)
+				if errc != nil {
+					return bytesCount, errc
+				}
+			}
+			writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
+			bytesCount += int64(writtenBytesCount)
+			if err != nil {
+				return bytesCount, err
+			}
+			bytesCount, err = node.writeTo(w, indent+"  ", combinedKey, bytesCount, arraysOneElementPerLine)
+			if err != nil {
+				return bytesCount, err
+			}
+		case []*Tree:
+			for _, subTree := range node {
+				writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
+				bytesCount += int64(writtenBytesCount)
+				if err != nil {
+					return bytesCount, err
+				}
+
+				bytesCount, err = subTree.writeTo(w, indent+"  ", combinedKey, bytesCount, arraysOneElementPerLine)
+				if err != nil {
+					return bytesCount, err
+				}
+			}
+		}
+	}
+
+	return bytesCount, nil
+}
+
+func writeStrings(w io.Writer, s ...string) (int, error) {
+	var n int
+	for i := range s {
+		b, err := io.WriteString(w, s[i])
+		n += b
+		if err != nil {
+			return n, err
+		}
+	}
+	return n, nil
+}
+
+// WriteTo encode the Tree as Toml and writes it to the writer w.
+// Returns the number of bytes written in case of success, or an error if anything happened.
+func (t *Tree) WriteTo(w io.Writer) (int64, error) {
+	return t.writeTo(w, "", "", 0, false)
+}
+
+// ToTomlString generates a human-readable representation of the current tree.
+// Output spans multiple lines, and is suitable for ingest by a TOML parser.
+// If the conversion cannot be performed, ToString returns a non-nil error.
+func (t *Tree) ToTomlString() (string, error) {
+	var buf bytes.Buffer
+	_, err := t.WriteTo(&buf)
+	if err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+// String generates a human-readable representation of the current tree.
+// Alias of ToString. Present to implement the fmt.Stringer interface.
+func (t *Tree) String() string {
+	result, _ := t.ToTomlString()
+	return result
+}
+
+// ToMap recursively generates a representation of the tree using Go built-in structures.
+// The following types are used:
+//
+//	* bool
+//	* float64
+//	* int64
+//	* string
+//	* uint64
+//	* time.Time
+//	* map[string]interface{} (where interface{} is any of this list)
+//	* []interface{} (where interface{} is any of this list)
+func (t *Tree) ToMap() map[string]interface{} {
+	result := map[string]interface{}{}
+
+	for k, v := range t.values {
+		switch node := v.(type) {
+		case []*Tree:
+			var array []interface{}
+			for _, item := range node {
+				array = append(array, item.ToMap())
+			}
+			result[k] = array
+		case *Tree:
+			result[k] = node.ToMap()
+		case *tomlValue:
+			result[k] = node.value
+		}
+	}
+	return result
+}
diff --git a/vendor/github.com/shibukawa/configdir/LICENSE b/vendor/github.com/shibukawa/configdir/LICENSE
new file mode 100644
index 000000000..b20af456a
--- /dev/null
+++ b/vendor/github.com/shibukawa/configdir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 shibukawa
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/shibukawa/configdir/config.go b/vendor/github.com/shibukawa/configdir/config.go
new file mode 100644
index 000000000..8a20e54b5
--- /dev/null
+++ b/vendor/github.com/shibukawa/configdir/config.go
@@ -0,0 +1,160 @@
+// configdir provides access to configuration folder in each platforms.
+//
+// System wide configuration folders:
+//
+//   - Windows: %PROGRAMDATA% (C:\ProgramData)
+//   - Linux/BSDs: ${XDG_CONFIG_DIRS} (/etc/xdg)
+//   - MacOSX: "/Library/Application Support"
+//
+// User wide configuration folders:
+//
+//   - Windows: %APPDATA% (C:\Users\\AppData\Roaming)
+//   - Linux/BSDs: ${XDG_CONFIG_HOME} (${HOME}/.config)
+//   - MacOSX: "${HOME}/Library/Application Support"
+//
+// User wide cache folders:
+//
+//   - Windows: %LOCALAPPDATA% (C:\Users\\AppData\Local)
+//   - Linux/BSDs: ${XDG_CACHE_HOME} (${HOME}/.cache)
+//   - MacOSX: "${HOME}/Library/Caches"
+//
+// configdir returns paths inside the above folders.
+
+package configdir
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+)
+
+type ConfigType int
+
+const (
+	System ConfigType = iota
+	Global
+	All
+	Existing
+	Local
+	Cache
+)
+
+// Config represents each folder
+type Config struct {
+	Path string
+	Type ConfigType
+}
+
+func (c Config) Open(fileName string) (*os.File, error) {
+	return os.Open(filepath.Join(c.Path, fileName))
+}
+
+func (c Config) Create(fileName string) (*os.File, error) {
+	err := c.CreateParentDir(fileName)
+	if err != nil {
+		return nil, err
+	}
+	return os.Create(filepath.Join(c.Path, fileName))
+}
+
+func (c Config) ReadFile(fileName string) ([]byte, error) {
+	return ioutil.ReadFile(filepath.Join(c.Path, fileName))
+}
+
+// CreateParentDir creates the parent directory of fileName inside c. fileName
+// is a relative path inside c, containing zero or more path separators.
+func (c Config) CreateParentDir(fileName string) error {
+	return os.MkdirAll(filepath.Dir(filepath.Join(c.Path, fileName)), 0755)
+}
+
+func (c Config) WriteFile(fileName string, data []byte) error {
+	err := c.CreateParentDir(fileName)
+	if err != nil {
+		return err
+	}
+	return ioutil.WriteFile(filepath.Join(c.Path, fileName), data, 0644)
+}
+
+func (c Config) MkdirAll() error {
+	return os.MkdirAll(c.Path, 0755)
+}
+
+func (c Config) Exists(fileName string) bool {
+	_, err := os.Stat(filepath.Join(c.Path, fileName))
+	return !os.IsNotExist(err)
+}
+
+// ConfigDir keeps setting for querying folders.
+type ConfigDir struct {
+	VendorName      string
+	ApplicationName string
+	LocalPath       string
+}
+
+func New(vendorName, applicationName string) ConfigDir {
+	return ConfigDir{
+		VendorName:      vendorName,
+		ApplicationName: applicationName,
+	}
+}
+
+func (c ConfigDir) joinPath(root string) string {
+	if c.VendorName != "" && hasVendorName {
+		return filepath.Join(root, c.VendorName, c.ApplicationName)
+	}
+	return filepath.Join(root, c.ApplicationName)
+}
+
+func (c ConfigDir) QueryFolders(configType ConfigType) []*Config {
+	if configType == Cache {
+		return []*Config{c.QueryCacheFolder()}
+	}
+	var result []*Config
+	if c.LocalPath != "" && configType != System && configType != Global {
+		result = append(result, &Config{
+			Path: c.LocalPath,
+			Type: Local,
+		})
+	}
+	if configType != System && configType != Local {
+		result = append(result, &Config{
+			Path: c.joinPath(globalSettingFolder),
+			Type: Global,
+		})
+	}
+	if configType != Global && configType != Local {
+		for _, root := range systemSettingFolders {
+			result = append(result, &Config{
+				Path: c.joinPath(root),
+				Type: System,
+			})
+		}
+	}
+	if configType != Existing {
+		return result
+	}
+	var existing []*Config
+	for _, entry := range result {
+		if _, err := os.Stat(entry.Path); !os.IsNotExist(err) {
+			existing = append(existing, entry)
+		}
+	}
+	return existing
+}
+
+func (c ConfigDir) QueryFolderContainsFile(fileName string) *Config {
+	configs := c.QueryFolders(Existing)
+	for _, config := range configs {
+		if _, err := os.Stat(filepath.Join(config.Path, fileName)); !os.IsNotExist(err) {
+			return config
+		}
+	}
+	return nil
+}
+
+func (c ConfigDir) QueryCacheFolder() *Config {
+	return &Config{
+		Path: c.joinPath(cacheFolder),
+		Type: Cache,
+	}
+}
diff --git a/vendor/github.com/shibukawa/configdir/config_darwin.go b/vendor/github.com/shibukawa/configdir/config_darwin.go
new file mode 100644
index 000000000..d668507a7
--- /dev/null
+++ b/vendor/github.com/shibukawa/configdir/config_darwin.go
@@ -0,0 +1,8 @@
+package configdir
+
+import "os"
+
+var hasVendorName = true
+var systemSettingFolders = []string{"/Library/Application Support"}
+var globalSettingFolder = os.Getenv("HOME") + "/Library/Application Support"
+var cacheFolder = os.Getenv("HOME") + "/Library/Caches"
diff --git a/vendor/github.com/shibukawa/configdir/config_windows.go b/vendor/github.com/shibukawa/configdir/config_windows.go
new file mode 100644
index 000000000..098482177
--- /dev/null
+++ b/vendor/github.com/shibukawa/configdir/config_windows.go
@@ -0,0 +1,8 @@
+package configdir
+
+import "os"
+
+var hasVendorName = true
+var systemSettingFolders = []string{os.Getenv("PROGRAMDATA")}
+var globalSettingFolder = os.Getenv("APPDATA")
+var cacheFolder = os.Getenv("LOCALAPPDATA")
diff --git a/vendor/github.com/shibukawa/configdir/config_xdg.go b/vendor/github.com/shibukawa/configdir/config_xdg.go
new file mode 100644
index 000000000..026ca68a0
--- /dev/null
+++ b/vendor/github.com/shibukawa/configdir/config_xdg.go
@@ -0,0 +1,34 @@
+// +build !windows,!darwin
+
+package configdir
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+// https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+var hasVendorName = true
+var systemSettingFolders []string
+var globalSettingFolder string
+var cacheFolder string
+
+func init() {
+	if os.Getenv("XDG_CONFIG_HOME") != "" {
+		globalSettingFolder = os.Getenv("XDG_CONFIG_HOME")
+	} else {
+		globalSettingFolder = filepath.Join(os.Getenv("HOME"), ".config")
+	}
+	if os.Getenv("XDG_CONFIG_DIRS") != "" {
+		systemSettingFolders = strings.Split(os.Getenv("XDG_CONFIG_DIRS"), ":")
+	} else {
+		systemSettingFolders = []string{"/etc/xdg"}
+	}
+	if os.Getenv("XDG_CACHE_HOME") != "" {
+		cacheFolder = os.Getenv("XDG_CACHE_HOME")
+	} else {
+		cacheFolder = filepath.Join(os.Getenv("HOME"), ".cache")
+	}
+}
diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt
new file mode 100644
index 000000000..298f0e266
--- /dev/null
+++ b/vendor/github.com/spf13/afero/LICENSE.txt
@@ -0,0 +1,174 @@
+                                Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go
new file mode 100644
index 000000000..f5b5e127c
--- /dev/null
+++ b/vendor/github.com/spf13/afero/afero.go
@@ -0,0 +1,108 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package afero provides types and methods for interacting with the filesystem,
+// as an abstraction layer.
+
+// Afero also provides a few implementations that are mostly interoperable. One that
+// uses the operating system filesystem, one that uses memory to store files
+// (cross platform) and an interface that should be implemented if you want to
+// provide your own filesystem.
+
+package afero
+
+import (
+	"errors"
+	"io"
+	"os"
+	"time"
+)
+
+type Afero struct {
+	Fs
+}
+
+// File represents a file in the filesystem.
+type File interface {
+	io.Closer
+	io.Reader
+	io.ReaderAt
+	io.Seeker
+	io.Writer
+	io.WriterAt
+
+	Name() string
+	Readdir(count int) ([]os.FileInfo, error)
+	Readdirnames(n int) ([]string, error)
+	Stat() (os.FileInfo, error)
+	Sync() error
+	Truncate(size int64) error
+	WriteString(s string) (ret int, err error)
+}
+
+// Fs is the filesystem interface.
+//
+// Any simulated or real filesystem should implement this interface.
+type Fs interface {
+	// Create creates a file in the filesystem, returning the file and an
+	// error, if any happens.
+	Create(name string) (File, error)
+
+	// Mkdir creates a directory in the filesystem, return an error if any
+	// happens.
+	Mkdir(name string, perm os.FileMode) error
+
+	// MkdirAll creates a directory path and all parents that does not exist
+	// yet.
+	MkdirAll(path string, perm os.FileMode) error
+
+	// Open opens a file, returning it or an error, if any happens.
+	Open(name string) (File, error)
+
+	// OpenFile opens a file using the given flags and the given mode.
+	OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+	// Remove removes a file identified by name, returning an error, if any
+	// happens.
+	Remove(name string) error
+
+	// RemoveAll removes a directory path and any children it contains. It
+	// does not fail if the path does not exist (return nil).
+	RemoveAll(path string) error
+
+	// Rename renames a file.
+	Rename(oldname, newname string) error
+
+	// Stat returns a FileInfo describing the named file, or an error, if any
+	// happens.
+	Stat(name string) (os.FileInfo, error)
+
+	// The name of this FileSystem
+	Name() string
+
+	//Chmod changes the mode of the named file to mode.
+	Chmod(name string, mode os.FileMode) error
+
+	//Chtimes changes the access and modification times of the named file
+	Chtimes(name string, atime time.Time, mtime time.Time) error
+}
+
+var (
+	ErrFileClosed        = errors.New("File is closed")
+	ErrOutOfRange        = errors.New("Out of range")
+	ErrTooLarge          = errors.New("Too large")
+	ErrFileNotFound      = os.ErrNotExist
+	ErrFileExists        = os.ErrExist
+	ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go
new file mode 100644
index 000000000..616ff8ff7
--- /dev/null
+++ b/vendor/github.com/spf13/afero/basepath.go
@@ -0,0 +1,180 @@
+package afero
+
+import (
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"time"
+)
+
+var _ Lstater = (*BasePathFs)(nil)
+
+// The BasePathFs restricts all operations to a given path within an Fs.
+// The given file name to the operations on this Fs will be prepended with
+// the base path before calling the base Fs.
+// Any file name (after filepath.Clean()) outside this base path will be
+// treated as non existing file.
+//
+// Note that it does not clean the error messages on return, so you may
+// reveal the real path on errors.
+type BasePathFs struct {
+	source Fs
+	path   string
+}
+
+type BasePathFile struct {
+	File
+	path string
+}
+
+func (f *BasePathFile) Name() string {
+	sourcename := f.File.Name()
+	return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
+}
+
+func NewBasePathFs(source Fs, path string) Fs {
+	return &BasePathFs{source: source, path: path}
+}
+
+// on a file outside the base path it returns the given file name and an error,
+// else the given file with the base path prepended
+func (b *BasePathFs) RealPath(name string) (path string, err error) {
+	if err := validateBasePathName(name); err != nil {
+		return name, err
+	}
+
+	bpath := filepath.Clean(b.path)
+	path = filepath.Clean(filepath.Join(bpath, name))
+	if !strings.HasPrefix(path, bpath) {
+		return name, os.ErrNotExist
+	}
+
+	return path, nil
+}
+
+func validateBasePathName(name string) error {
+	if runtime.GOOS != "windows" {
+		// Not much to do here;
+		// the virtual file paths all look absolute on *nix.
+		return nil
+	}
+
+	// On Windows a common mistake would be to provide an absolute OS path
+	// We could strip out the base part, but that would not be very portable.
+	if filepath.IsAbs(name) {
+		return os.ErrNotExist
+	}
+
+	return nil
+}
+
+func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "chtimes", Path: name, Err: err}
+	}
+	return b.source.Chtimes(name, atime, mtime)
+}
+
+func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "chmod", Path: name, Err: err}
+	}
+	return b.source.Chmod(name, mode)
+}
+
+func (b *BasePathFs) Name() string {
+	return "BasePathFs"
+}
+
+func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return nil, &os.PathError{Op: "stat", Path: name, Err: err}
+	}
+	return b.source.Stat(name)
+}
+
+func (b *BasePathFs) Rename(oldname, newname string) (err error) {
+	if oldname, err = b.RealPath(oldname); err != nil {
+		return &os.PathError{Op: "rename", Path: oldname, Err: err}
+	}
+	if newname, err = b.RealPath(newname); err != nil {
+		return &os.PathError{Op: "rename", Path: newname, Err: err}
+	}
+	return b.source.Rename(oldname, newname)
+}
+
+func (b *BasePathFs) RemoveAll(name string) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "remove_all", Path: name, Err: err}
+	}
+	return b.source.RemoveAll(name)
+}
+
+func (b *BasePathFs) Remove(name string) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "remove", Path: name, Err: err}
+	}
+	return b.source.Remove(name)
+}
+
+func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
+	}
+	sourcef, err := b.source.OpenFile(name, flag, mode)
+	if err != nil {
+		return nil, err
+	}
+	return &BasePathFile{sourcef, b.path}, nil
+}
+
+func (b *BasePathFs) Open(name string) (f File, err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return nil, &os.PathError{Op: "open", Path: name, Err: err}
+	}
+	sourcef, err := b.source.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
+	}
+	return b.source.Mkdir(name, mode)
+}
+
+func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
+	}
+	return b.source.MkdirAll(name, mode)
+}
+
+func (b *BasePathFs) Create(name string) (f File, err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return nil, &os.PathError{Op: "create", Path: name, Err: err}
+	}
+	sourcef, err := b.source.Create(name)
+	if err != nil {
+		return nil, err
+	}
+	return &BasePathFile{File: sourcef, path: b.path}, nil
+}
+
+func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+	name, err := b.RealPath(name)
+	if err != nil {
+		return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
+	}
+	if lstater, ok := b.source.(Lstater); ok {
+		return lstater.LstatIfPossible(name)
+	}
+	fi, err := b.source.Stat(name)
+	return fi, false, err
+}
+
+// vim: ts=4 sw=4 noexpandtab nolist syn=go
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
new file mode 100644
index 000000000..29a26c67d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -0,0 +1,290 @@
+package afero
+
+import (
+	"os"
+	"syscall"
+	"time"
+)
+
+// If the cache duration is 0, cache time will be unlimited, i.e. once
+// a file is in the layer, the base will never be read again for this file.
+//
+// For cache times greater than 0, the modification time of a file is
+// checked. Note that a lot of file system implementations only allow a
+// resolution of a second for timestamps... or as the godoc for os.Chtimes()
+// states: "The underlying filesystem may truncate or round the values to a
+// less precise time unit."
+//
+// This caching union will forward all write calls also to the base file
+// system first. To prevent writing to the base Fs, wrap it in a read-only
+// filter - Note: this will also make the overlay read-only, for writing files
+// in the overlay, use the overlay Fs directly, not via the union Fs.
+type CacheOnReadFs struct {
+	base      Fs
+	layer     Fs
+	cacheTime time.Duration
+}
+
+func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
+	return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
+}
+
+type cacheState int
+
+const (
+	// not present in the overlay, unknown if it exists in the base:
+	cacheMiss cacheState = iota
+	// present in the overlay and in base, base file is newer:
+	cacheStale
+	// present in the overlay - with cache time == 0 it may exist in the base,
+	// with cacheTime > 0 it exists in the base and is same age or newer in the
+	// overlay
+	cacheHit
+	// happens if someone writes directly to the overlay without
+	// going through this union
+	cacheLocal
+)
+
+func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
+	var lfi, bfi os.FileInfo
+	lfi, err = u.layer.Stat(name)
+	if err == nil {
+		if u.cacheTime == 0 {
+			return cacheHit, lfi, nil
+		}
+		if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
+			bfi, err = u.base.Stat(name)
+			if err != nil {
+				return cacheLocal, lfi, nil
+			}
+			if bfi.ModTime().After(lfi.ModTime()) {
+				return cacheStale, bfi, nil
+			}
+		}
+		return cacheHit, lfi, nil
+	}
+
+	if err == syscall.ENOENT || os.IsNotExist(err) {
+		return cacheMiss, nil, nil
+	}
+
+	return cacheMiss, nil, err
+}
+
+func (u *CacheOnReadFs) copyToLayer(name string) error {
+	return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit:
+		err = u.base.Chtimes(name, atime, mtime)
+	case cacheStale, cacheMiss:
+		if err := u.copyToLayer(name); err != nil {
+			return err
+		}
+		err = u.base.Chtimes(name, atime, mtime)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit:
+		err = u.base.Chmod(name, mode)
+	case cacheStale, cacheMiss:
+		if err := u.copyToLayer(name); err != nil {
+			return err
+		}
+		err = u.base.Chmod(name, mode)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.Chmod(name, mode)
+}
+
+func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
+	st, fi, err := u.cacheStatus(name)
+	if err != nil {
+		return nil, err
+	}
+	switch st {
+	case cacheMiss:
+		return u.base.Stat(name)
+	default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
+		return fi, nil
+	}
+}
+
+func (u *CacheOnReadFs) Rename(oldname, newname string) error {
+	st, _, err := u.cacheStatus(oldname)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit:
+		err = u.base.Rename(oldname, newname)
+	case cacheStale, cacheMiss:
+		if err := u.copyToLayer(oldname); err != nil {
+			return err
+		}
+		err = u.base.Rename(oldname, newname)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.Rename(oldname, newname)
+}
+
+func (u *CacheOnReadFs) Remove(name string) error {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit, cacheStale, cacheMiss:
+		err = u.base.Remove(name)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.Remove(name)
+}
+
+func (u *CacheOnReadFs) RemoveAll(name string) error {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit, cacheStale, cacheMiss:
+		err = u.base.RemoveAll(name)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.RemoveAll(name)
+}
+
+func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return nil, err
+	}
+	switch st {
+	case cacheLocal, cacheHit:
+	default:
+		if err := u.copyToLayer(name); err != nil {
+			return nil, err
+		}
+	}
+	if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+		bfi, err := u.base.OpenFile(name, flag, perm)
+		if err != nil {
+			return nil, err
+		}
+		lfi, err := u.layer.OpenFile(name, flag, perm)
+		if err != nil {
+			bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
+			return nil, err
+		}
+		return &UnionFile{Base: bfi, Layer: lfi}, nil
+	}
+	return u.layer.OpenFile(name, flag, perm)
+}
+
+func (u *CacheOnReadFs) Open(name string) (File, error) {
+	st, fi, err := u.cacheStatus(name)
+	if err != nil {
+		return nil, err
+	}
+
+	switch st {
+	case cacheLocal:
+		return u.layer.Open(name)
+
+	case cacheMiss:
+		bfi, err := u.base.Stat(name)
+		if err != nil {
+			return nil, err
+		}
+		if bfi.IsDir() {
+			return u.base.Open(name)
+		}
+		if err := u.copyToLayer(name); err != nil {
+			return nil, err
+		}
+		return u.layer.Open(name)
+
+	case cacheStale:
+		if !fi.IsDir() {
+			if err := u.copyToLayer(name); err != nil {
+				return nil, err
+			}
+			return u.layer.Open(name)
+		}
+	case cacheHit:
+		if !fi.IsDir() {
+			return u.layer.Open(name)
+		}
+	}
+	// the dirs from cacheHit, cacheStale fall down here:
+	bfile, _ := u.base.Open(name)
+	lfile, err := u.layer.Open(name)
+	if err != nil && bfile == nil {
+		return nil, err
+	}
+	return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
+	err := u.base.Mkdir(name, perm)
+	if err != nil {
+		return err
+	}
+	return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
+}
+
+func (u *CacheOnReadFs) Name() string {
+	return "CacheOnReadFs"
+}
+
+func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
+	err := u.base.MkdirAll(name, perm)
+	if err != nil {
+		return err
+	}
+	return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CacheOnReadFs) Create(name string) (File, error) {
+	bfh, err := u.base.Create(name)
+	if err != nil {
+		return nil, err
+	}
+	lfh, err := u.layer.Create(name)
+	if err != nil {
+		// oops, see comment about OS_TRUNC above, should we remove? then we have to
+		// remember if the file did not exist before
+		bfh.Close()
+		return nil, err
+	}
+	return &UnionFile{Base: bfh, Layer: lfh}, nil
+}
diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go
new file mode 100644
index 000000000..5728243d9
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_bsds.go
@@ -0,0 +1,22 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin openbsd freebsd netbsd dragonfly
+
+package afero
+
+import (
+	"syscall"
+)
+
+const BADFD = syscall.EBADF
diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go
new file mode 100644
index 000000000..968fc2783
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_win_unix.go
@@ -0,0 +1,25 @@
+// Copyright © 2016 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// +build !darwin
+// +build !openbsd
+// +build !freebsd
+// +build !dragonfly
+// +build !netbsd
+
+package afero
+
+import (
+	"syscall"
+)
+
+const BADFD = syscall.EBADFD
diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go
new file mode 100644
index 000000000..9aef39794
--- /dev/null
+++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go
@@ -0,0 +1,292 @@
+package afero
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"syscall"
+	"time"
+)
+
+var _ Lstater = (*CopyOnWriteFs)(nil)
+
+// The CopyOnWriteFs is a union filesystem: a read only base file system with
+// a possibly writeable layer on top. Changes to the file system will only
+// be made in the overlay: Changing an existing file in the base layer which
+// is not present in the overlay will copy the file to the overlay ("changing"
+// includes also calls to e.g. Chtimes() and Chmod()).
+//
+// Reading directories is currently only supported via Open(), not OpenFile().
+type CopyOnWriteFs struct {
+	base  Fs
+	layer Fs
+}
+
+func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
+	return &CopyOnWriteFs{base: base, layer: layer}
+}
+
+// Returns true if the file is not in the overlay
+func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
+	if _, err := u.layer.Stat(name); err == nil {
+		return false, nil
+	}
+	_, err := u.base.Stat(name)
+	if err != nil {
+		if oerr, ok := err.(*os.PathError); ok {
+			if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
+				return false, nil
+			}
+		}
+		if err == syscall.ENOENT {
+			return false, nil
+		}
+	}
+	return true, err
+}
+
+func (u *CopyOnWriteFs) copyToLayer(name string) error {
+	return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
+	b, err := u.isBaseFile(name)
+	if err != nil {
+		return err
+	}
+	if b {
+		if err := u.copyToLayer(name); err != nil {
+			return err
+		}
+	}
+	return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
+	b, err := u.isBaseFile(name)
+	if err != nil {
+		return err
+	}
+	if b {
+		if err := u.copyToLayer(name); err != nil {
+			return err
+		}
+	}
+	return u.layer.Chmod(name, mode)
+}
+
+func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
+	fi, err := u.layer.Stat(name)
+	if err != nil {
+		isNotExist := u.isNotExist(err)
+		if isNotExist {
+			return u.base.Stat(name)
+		}
+		return nil, err
+	}
+	return fi, nil
+}
+
+func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+	llayer, ok1 := u.layer.(Lstater)
+	lbase, ok2 := u.base.(Lstater)
+
+	if ok1 {
+		fi, b, err := llayer.LstatIfPossible(name)
+		if err == nil {
+			return fi, b, nil
+		}
+
+		if !u.isNotExist(err) {
+			return nil, b, err
+		}
+	}
+
+	if ok2 {
+		fi, b, err := lbase.LstatIfPossible(name)
+		if err == nil {
+			return fi, b, nil
+		}
+		if !u.isNotExist(err) {
+			return nil, b, err
+		}
+	}
+
+	fi, err := u.Stat(name)
+
+	return fi, false, err
+}
+
+func (u *CopyOnWriteFs) isNotExist(err error) bool {
+	if e, ok := err.(*os.PathError); ok {
+		err = e.Err
+	}
+	if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
+		return true
+	}
+	return false
+}
+
+// Renaming files present only in the base layer is not permitted
+func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
+	b, err := u.isBaseFile(oldname)
+	if err != nil {
+		return err
+	}
+	if b {
+		return syscall.EPERM
+	}
+	return u.layer.Rename(oldname, newname)
+}
+
+// Removing files present only in the base layer is not permitted. If
+// a file is present in the base layer and the overlay, only the overlay
+// will be removed.
+func (u *CopyOnWriteFs) Remove(name string) error {
+	err := u.layer.Remove(name)
+	switch err {
+	case syscall.ENOENT:
+		_, err = u.base.Stat(name)
+		if err == nil {
+			return syscall.EPERM
+		}
+		return syscall.ENOENT
+	default:
+		return err
+	}
+}
+
+func (u *CopyOnWriteFs) RemoveAll(name string) error {
+	err := u.layer.RemoveAll(name)
+	switch err {
+	case syscall.ENOENT:
+		_, err = u.base.Stat(name)
+		if err == nil {
+			return syscall.EPERM
+		}
+		return syscall.ENOENT
+	default:
+		return err
+	}
+}
+
+func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	b, err := u.isBaseFile(name)
+	if err != nil {
+		return nil, err
+	}
+
+	if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+		if b {
+			if err = u.copyToLayer(name); err != nil {
+				return nil, err
+			}
+			return u.layer.OpenFile(name, flag, perm)
+		}
+
+		dir := filepath.Dir(name)
+		isaDir, err := IsDir(u.base, dir)
+		if err != nil && !os.IsNotExist(err) {
+			return nil, err
+		}
+		if isaDir {
+			if err = u.layer.MkdirAll(dir, 0777); err != nil {
+				return nil, err
+			}
+			return u.layer.OpenFile(name, flag, perm)
+		}
+
+		isaDir, err = IsDir(u.layer, dir)
+		if err != nil {
+			return nil, err
+		}
+		if isaDir {
+			return u.layer.OpenFile(name, flag, perm)
+		}
+
+		return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
+	}
+	if b {
+		return u.base.OpenFile(name, flag, perm)
+	}
+	return u.layer.OpenFile(name, flag, perm)
+}
+
+// This function handles the 9 different possibilities caused
+// by the union which are the intersection of the following...
+//  layer: doesn't exist, exists as a file, and exists as a directory
+//  base:  doesn't exist, exists as a file, and exists as a directory
+func (u *CopyOnWriteFs) Open(name string) (File, error) {
+	// Since the overlay overrides the base we check that first
+	b, err := u.isBaseFile(name)
+	if err != nil {
+		return nil, err
+	}
+
+	// If overlay doesn't exist, return the base (base state irrelevant)
+	if b {
+		return u.base.Open(name)
+	}
+
+	// If overlay is a file, return it (base state irrelevant)
+	dir, err := IsDir(u.layer, name)
+	if err != nil {
+		return nil, err
+	}
+	if !dir {
+		return u.layer.Open(name)
+	}
+
+	// Overlay is a directory, base state now matters.
+	// Base state has 3 states to check but 2 outcomes:
+	// A. It's a file or non-readable in the base (return just the overlay)
+	// B. It's an accessible directory in the base (return a UnionFile)
+
+	// If base is file or nonreadable, return overlay
+	dir, err = IsDir(u.base, name)
+	if !dir || err != nil {
+		return u.layer.Open(name)
+	}
+
+	// Both base & layer are directories
+	// Return union file (if opens are without error)
+	bfile, bErr := u.base.Open(name)
+	lfile, lErr := u.layer.Open(name)
+
+	// If either have errors at this point something is very wrong. Return nil and the errors
+	if bErr != nil || lErr != nil {
+		return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
+	}
+
+	return &UnionFile{Base: bfile, Layer: lfile}, nil
+}
+
+func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
+	dir, err := IsDir(u.base, name)
+	if err != nil {
+		return u.layer.MkdirAll(name, perm)
+	}
+	if dir {
+		return syscall.EEXIST
+	}
+	return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Name() string {
+	return "CopyOnWriteFs"
+}
+
+func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
+	dir, err := IsDir(u.base, name)
+	if err != nil {
+		return u.layer.MkdirAll(name, perm)
+	}
+	if dir {
+		return syscall.EEXIST
+	}
+	return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Create(name string) (File, error) {
+	return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
+}
diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go
new file mode 100644
index 000000000..c42193688
--- /dev/null
+++ b/vendor/github.com/spf13/afero/httpFs.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"errors"
+	"net/http"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+	"time"
+)
+
+type httpDir struct {
+	basePath string
+	fs       HttpFs
+}
+
+func (d httpDir) Open(name string) (http.File, error) {
+	if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+		strings.Contains(name, "\x00") {
+		return nil, errors.New("http: invalid character in file path")
+	}
+	dir := string(d.basePath)
+	if dir == "" {
+		dir = "."
+	}
+
+	f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
+	if err != nil {
+		return nil, err
+	}
+	return f, nil
+}
+
+type HttpFs struct {
+	source Fs
+}
+
+func NewHttpFs(source Fs) *HttpFs {
+	return &HttpFs{source: source}
+}
+
+func (h HttpFs) Dir(s string) *httpDir {
+	return &httpDir{basePath: s, fs: h}
+}
+
+func (h HttpFs) Name() string { return "h HttpFs" }
+
+func (h HttpFs) Create(name string) (File, error) {
+	return h.source.Create(name)
+}
+
+func (h HttpFs) Chmod(name string, mode os.FileMode) error {
+	return h.source.Chmod(name, mode)
+}
+
+func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+	return h.source.Chtimes(name, atime, mtime)
+}
+
+func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
+	return h.source.Mkdir(name, perm)
+}
+
+func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
+	return h.source.MkdirAll(path, perm)
+}
+
+func (h HttpFs) Open(name string) (http.File, error) {
+	f, err := h.source.Open(name)
+	if err == nil {
+		if httpfile, ok := f.(http.File); ok {
+			return httpfile, nil
+		}
+	}
+	return nil, err
+}
+
+func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	return h.source.OpenFile(name, flag, perm)
+}
+
+func (h HttpFs) Remove(name string) error {
+	return h.source.Remove(name)
+}
+
+func (h HttpFs) RemoveAll(path string) error {
+	return h.source.RemoveAll(path)
+}
+
+func (h HttpFs) Rename(oldname, newname string) error {
+	return h.source.Rename(oldname, newname)
+}
+
+func (h HttpFs) Stat(name string) (os.FileInfo, error) {
+	return h.source.Stat(name)
+}
diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go
new file mode 100644
index 000000000..5c3a3d8ff
--- /dev/null
+++ b/vendor/github.com/spf13/afero/ioutil.go
@@ -0,0 +1,230 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia 
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"bytes"
+	"io"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// byName implements sort.Interface.
+type byName []os.FileInfo
+
+func (f byName) Len() int           { return len(f) }
+func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
+func (f byName) Swap(i, j int)      { f[i], f[j] = f[j], f[i] }
+
+// ReadDir reads the directory named by dirname and returns
+// a list of sorted directory entries.
+func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
+	return ReadDir(a.Fs, dirname)
+}
+
+func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
+	f, err := fs.Open(dirname)
+	if err != nil {
+		return nil, err
+	}
+	list, err := f.Readdir(-1)
+	f.Close()
+	if err != nil {
+		return nil, err
+	}
+	sort.Sort(byName(list))
+	return list, nil
+}
+
+// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
+func (a Afero) ReadFile(filename string) ([]byte, error) {
+	return ReadFile(a.Fs, filename)
+}
+
+func ReadFile(fs Fs, filename string) ([]byte, error) {
+	f, err := fs.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	// It's a good but not certain bet that FileInfo will tell us exactly how much to
+	// read, so let's try it but be prepared for the answer to be wrong.
+	var n int64
+
+	if fi, err := f.Stat(); err == nil {
+		// Don't preallocate a huge buffer, just in case.
+		if size := fi.Size(); size < 1e9 {
+			n = size
+		}
+	}
+	// As initial capacity for readAll, use n + a little extra in case Size is zero,
+	// and to avoid another allocation after Read has filled the buffer.  The readAll
+	// call will read into its allocated internal buffer cheaply.  If the size was
+	// wrong, we'll either waste some space off the end or reallocate as needed, but
+	// in the overwhelmingly common case we'll get it just right.
+	return readAll(f, n+bytes.MinRead)
+}
+
+// readAll reads from r until an error or EOF and returns the data it read
+// from the internal buffer allocated with a specified capacity.
+func readAll(r io.Reader, capacity int64) (b []byte, err error) {
+	buf := bytes.NewBuffer(make([]byte, 0, capacity))
+	// If the buffer overflows, we will get bytes.ErrTooLarge.
+	// Return that as an error. Any other panic remains.
+	defer func() {
+		e := recover()
+		if e == nil {
+			return
+		}
+		if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
+			err = panicErr
+		} else {
+			panic(e)
+		}
+	}()
+	_, err = buf.ReadFrom(r)
+	return buf.Bytes(), err
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(r io.Reader) ([]byte, error) {
+	return readAll(r, bytes.MinRead)
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
+	return WriteFile(a.Fs, filename, data, perm)
+}
+
+func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
+	f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	if err != nil {
+		return err
+	}
+	n, err := f.Write(data)
+	if err == nil && n < len(data) {
+		err = io.ErrShortWrite
+	}
+	if err1 := f.Close(); err == nil {
+		err = err1
+	}
+	return err
+}
+
+// Random number state.
+// We generate random temporary file names so that there's a good
+// chance the file doesn't exist yet - keeps the number of tries in
+// TempFile to a minimum.
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+	return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+
+func nextSuffix() string {
+	randmu.Lock()
+	r := rand
+	if r == 0 {
+		r = reseed()
+	}
+	r = r*1664525 + 1013904223 // constants from Numerical Recipes
+	rand = r
+	randmu.Unlock()
+	return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file.  The caller can use f.Name()
+// to find the pathname of the file.  It is the caller's responsibility
+// to remove the file when no longer needed.
+func (a Afero) TempFile(dir, prefix string) (f File, err error) {
+	return TempFile(a.Fs, dir, prefix)
+}
+
+func TempFile(fs Fs, dir, prefix string) (f File, err error) {
+	if dir == "" {
+		dir = os.TempDir()
+	}
+
+	nconflict := 0
+	for i := 0; i < 10000; i++ {
+		name := filepath.Join(dir, prefix+nextSuffix())
+		f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+		if os.IsExist(err) {
+			if nconflict++; nconflict > 10 {
+				randmu.Lock()
+				rand = reseed()
+				randmu.Unlock()
+			}
+			continue
+		}
+		break
+	}
+	return
+}
+
+// TempDir creates a new temporary directory in the directory dir
+// with a name beginning with prefix and returns the path of the
+// new directory.  If dir is the empty string, TempDir uses the
+// default directory for temporary files (see os.TempDir).
+// Multiple programs calling TempDir simultaneously
+// will not choose the same directory.  It is the caller's responsibility
+// to remove the directory when no longer needed.
+func (a Afero) TempDir(dir, prefix string) (name string, err error) {
+	return TempDir(a.Fs, dir, prefix)
+}
+func TempDir(fs Fs, dir, prefix string) (name string, err error) {
+	if dir == "" {
+		dir = os.TempDir()
+	}
+
+	nconflict := 0
+	for i := 0; i < 10000; i++ {
+		try := filepath.Join(dir, prefix+nextSuffix())
+		err = fs.Mkdir(try, 0700)
+		if os.IsExist(err) {
+			if nconflict++; nconflict > 10 {
+				randmu.Lock()
+				rand = reseed()
+				randmu.Unlock()
+			}
+			continue
+		}
+		if err == nil {
+			name = try
+		}
+		break
+	}
+	return
+}
diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go
new file mode 100644
index 000000000..89c1bfc0a
--- /dev/null
+++ b/vendor/github.com/spf13/afero/lstater.go
@@ -0,0 +1,27 @@
+// Copyright © 2018 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"os"
+)
+
+// Lstater is an optional interface in Afero. It is only implemented by the
+// filesystems saying so.
+// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
+// Else it will call Stat.
+// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
+type Lstater interface {
+	LstatIfPossible(name string) (os.FileInfo, bool, error)
+}
diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go
new file mode 100644
index 000000000..c18a87fb7
--- /dev/null
+++ b/vendor/github.com/spf13/afero/match.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2009 The Go Authors. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"path/filepath"
+	"sort"
+	"strings"
+)
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
+// built-ins from that package.
+func Glob(fs Fs, pattern string) (matches []string, err error) {
+	if !hasMeta(pattern) {
+		// Lstat not supported by a ll filesystems.
+		if _, err = lstatIfPossible(fs, pattern); err != nil {
+			return nil, nil
+		}
+		return []string{pattern}, nil
+	}
+
+	dir, file := filepath.Split(pattern)
+	switch dir {
+	case "":
+		dir = "."
+	case string(filepath.Separator):
+	// nothing
+	default:
+		dir = dir[0 : len(dir)-1] // chop off trailing separator
+	}
+
+	if !hasMeta(dir) {
+		return glob(fs, dir, file, nil)
+	}
+
+	var m []string
+	m, err = Glob(fs, dir)
+	if err != nil {
+		return
+	}
+	for _, d := range m {
+		matches, err = glob(fs, d, file, matches)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
+	m = matches
+	fi, err := fs.Stat(dir)
+	if err != nil {
+		return
+	}
+	if !fi.IsDir() {
+		return
+	}
+	d, err := fs.Open(dir)
+	if err != nil {
+		return
+	}
+	defer d.Close()
+
+	names, _ := d.Readdirnames(-1)
+	sort.Strings(names)
+
+	for _, n := range names {
+		matched, err := filepath.Match(pattern, n)
+		if err != nil {
+			return m, err
+		}
+		if matched {
+			m = append(m, filepath.Join(dir, n))
+		}
+	}
+	return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+	// TODO(niemeyer): Should other magic characters be added here?
+	return strings.IndexAny(path, "*?[") >= 0
+}
diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go
new file mode 100644
index 000000000..e104013f4
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dir.go
@@ -0,0 +1,37 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+type Dir interface {
+	Len() int
+	Names() []string
+	Files() []*FileData
+	Add(*FileData)
+	Remove(*FileData)
+}
+
+func RemoveFromMemDir(dir *FileData, f *FileData) {
+	dir.memDir.Remove(f)
+}
+
+func AddToMemDir(dir *FileData, f *FileData) {
+	dir.memDir.Add(f)
+}
+
+func InitializeDir(d *FileData) {
+	if d.memDir == nil {
+		d.dir = true
+		d.memDir = &DirMap{}
+	}
+}
diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go
new file mode 100644
index 000000000..03a57ee5b
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dirmap.go
@@ -0,0 +1,43 @@
+// Copyright © 2015 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import "sort"
+
+type DirMap map[string]*FileData
+
+func (m DirMap) Len() int           { return len(m) }
+func (m DirMap) Add(f *FileData)    { m[f.name] = f }
+func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
+func (m DirMap) Files() (files []*FileData) {
+	for _, f := range m {
+		files = append(files, f)
+	}
+	sort.Sort(filesSorter(files))
+	return files
+}
+
+// implement sort.Interface for []*FileData
+type filesSorter []*FileData
+
+func (s filesSorter) Len() int           { return len(s) }
+func (s filesSorter) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
+
+func (m DirMap) Names() (names []string) {
+	for x := range m {
+		names = append(names, x)
+	}
+	return names
+}
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
new file mode 100644
index 000000000..7af2fb56f
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -0,0 +1,317 @@
+// Copyright © 2015 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"os"
+	"path/filepath"
+	"sync"
+	"sync/atomic"
+)
+
+import "time"
+
+const FilePathSeparator = string(filepath.Separator)
+
+type File struct {
+	// atomic requires 64-bit alignment for struct field access
+	at           int64
+	readDirCount int64
+	closed       bool
+	readOnly     bool
+	fileData     *FileData
+}
+
+func NewFileHandle(data *FileData) *File {
+	return &File{fileData: data}
+}
+
+func NewReadOnlyFileHandle(data *FileData) *File {
+	return &File{fileData: data, readOnly: true}
+}
+
+func (f File) Data() *FileData {
+	return f.fileData
+}
+
+type FileData struct {
+	sync.Mutex
+	name    string
+	data    []byte
+	memDir  Dir
+	dir     bool
+	mode    os.FileMode
+	modtime time.Time
+}
+
+func (d *FileData) Name() string {
+	d.Lock()
+	defer d.Unlock()
+	return d.name
+}
+
+func CreateFile(name string) *FileData {
+	return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
+}
+
+func CreateDir(name string) *FileData {
+	return &FileData{name: name, memDir: &DirMap{}, dir: true}
+}
+
+func ChangeFileName(f *FileData, newname string) {
+	f.Lock()
+	f.name = newname
+	f.Unlock()
+}
+
+func SetMode(f *FileData, mode os.FileMode) {
+	f.Lock()
+	f.mode = mode
+	f.Unlock()
+}
+
+func SetModTime(f *FileData, mtime time.Time) {
+	f.Lock()
+	setModTime(f, mtime)
+	f.Unlock()
+}
+
+func setModTime(f *FileData, mtime time.Time) {
+	f.modtime = mtime
+}
+
+func GetFileInfo(f *FileData) *FileInfo {
+	return &FileInfo{f}
+}
+
+func (f *File) Open() error {
+	atomic.StoreInt64(&f.at, 0)
+	atomic.StoreInt64(&f.readDirCount, 0)
+	f.fileData.Lock()
+	f.closed = false
+	f.fileData.Unlock()
+	return nil
+}
+
+func (f *File) Close() error {
+	f.fileData.Lock()
+	f.closed = true
+	if !f.readOnly {
+		setModTime(f.fileData, time.Now())
+	}
+	f.fileData.Unlock()
+	return nil
+}
+
+func (f *File) Name() string {
+	return f.fileData.Name()
+}
+
+func (f *File) Stat() (os.FileInfo, error) {
+	return &FileInfo{f.fileData}, nil
+}
+
+func (f *File) Sync() error {
+	return nil
+}
+
+func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
+	if !f.fileData.dir {
+		return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
+	}
+	var outLength int64
+
+	f.fileData.Lock()
+	files := f.fileData.memDir.Files()[f.readDirCount:]
+	if count > 0 {
+		if len(files) < count {
+			outLength = int64(len(files))
+		} else {
+			outLength = int64(count)
+		}
+		if len(files) == 0 {
+			err = io.EOF
+		}
+	} else {
+		outLength = int64(len(files))
+	}
+	f.readDirCount += outLength
+	f.fileData.Unlock()
+
+	res = make([]os.FileInfo, outLength)
+	for i := range res {
+		res[i] = &FileInfo{files[i]}
+	}
+
+	return res, err
+}
+
+func (f *File) Readdirnames(n int) (names []string, err error) {
+	fi, err := f.Readdir(n)
+	names = make([]string, len(fi))
+	for i, f := range fi {
+		_, names[i] = filepath.Split(f.Name())
+	}
+	return names, err
+}
+
+func (f *File) Read(b []byte) (n int, err error) {
+	f.fileData.Lock()
+	defer f.fileData.Unlock()
+	if f.closed == true {
+		return 0, ErrFileClosed
+	}
+	if len(b) > 0 && int(f.at) == len(f.fileData.data) {
+		return 0, io.EOF
+	}
+	if int(f.at) > len(f.fileData.data) {
+		return 0, io.ErrUnexpectedEOF
+	}
+	if len(f.fileData.data)-int(f.at) >= len(b) {
+		n = len(b)
+	} else {
+		n = len(f.fileData.data) - int(f.at)
+	}
+	copy(b, f.fileData.data[f.at:f.at+int64(n)])
+	atomic.AddInt64(&f.at, int64(n))
+	return
+}
+
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+	atomic.StoreInt64(&f.at, off)
+	return f.Read(b)
+}
+
+func (f *File) Truncate(size int64) error {
+	if f.closed == true {
+		return ErrFileClosed
+	}
+	if f.readOnly {
+		return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+	}
+	if size < 0 {
+		return ErrOutOfRange
+	}
+	if size > int64(len(f.fileData.data)) {
+		diff := size - int64(len(f.fileData.data))
+		f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
+	} else {
+		f.fileData.data = f.fileData.data[0:size]
+	}
+	setModTime(f.fileData, time.Now())
+	return nil
+}
+
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+	if f.closed == true {
+		return 0, ErrFileClosed
+	}
+	switch whence {
+	case 0:
+		atomic.StoreInt64(&f.at, offset)
+	case 1:
+		atomic.AddInt64(&f.at, int64(offset))
+	case 2:
+		atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
+	}
+	return f.at, nil
+}
+
+func (f *File) Write(b []byte) (n int, err error) {
+	if f.readOnly {
+		return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+	}
+	n = len(b)
+	cur := atomic.LoadInt64(&f.at)
+	f.fileData.Lock()
+	defer f.fileData.Unlock()
+	diff := cur - int64(len(f.fileData.data))
+	var tail []byte
+	if n+int(cur) < len(f.fileData.data) {
+		tail = f.fileData.data[n+int(cur):]
+	}
+	if diff > 0 {
+		f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...)
+		f.fileData.data = append(f.fileData.data, tail...)
+	} else {
+		f.fileData.data = append(f.fileData.data[:cur], b...)
+		f.fileData.data = append(f.fileData.data, tail...)
+	}
+	setModTime(f.fileData, time.Now())
+
+	atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
+	return
+}
+
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+	atomic.StoreInt64(&f.at, off)
+	return f.Write(b)
+}
+
+func (f *File) WriteString(s string) (ret int, err error) {
+	return f.Write([]byte(s))
+}
+
+func (f *File) Info() *FileInfo {
+	return &FileInfo{f.fileData}
+}
+
+type FileInfo struct {
+	*FileData
+}
+
+// Implements os.FileInfo
+func (s *FileInfo) Name() string {
+	s.Lock()
+	_, name := filepath.Split(s.name)
+	s.Unlock()
+	return name
+}
+func (s *FileInfo) Mode() os.FileMode {
+	s.Lock()
+	defer s.Unlock()
+	return s.mode
+}
+func (s *FileInfo) ModTime() time.Time {
+	s.Lock()
+	defer s.Unlock()
+	return s.modtime
+}
+func (s *FileInfo) IsDir() bool {
+	s.Lock()
+	defer s.Unlock()
+	return s.dir
+}
+func (s *FileInfo) Sys() interface{} { return nil }
+func (s *FileInfo) Size() int64 {
+	if s.IsDir() {
+		return int64(42)
+	}
+	s.Lock()
+	defer s.Unlock()
+	return int64(len(s.data))
+}
+
+var (
+	ErrFileClosed        = errors.New("File is closed")
+	ErrOutOfRange        = errors.New("Out of range")
+	ErrTooLarge          = errors.New("Too large")
+	ErrFileNotFound      = os.ErrNotExist
+	ErrFileExists        = os.ErrExist
+	ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
new file mode 100644
index 000000000..09498e70f
--- /dev/null
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -0,0 +1,365 @@
+// Copyright © 2014 Steve Francia .
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/spf13/afero/mem"
+)
+
+type MemMapFs struct {
+	mu   sync.RWMutex
+	data map[string]*mem.FileData
+	init sync.Once
+}
+
+func NewMemMapFs() Fs {
+	return &MemMapFs{}
+}
+
+func (m *MemMapFs) getData() map[string]*mem.FileData {
+	m.init.Do(func() {
+		m.data = make(map[string]*mem.FileData)
+		// Root should always exist, right?
+		// TODO: what about windows?
+		m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator)
+	})
+	return m.data
+}
+
+func (*MemMapFs) Name() string { return "MemMapFS" }
+
+func (m *MemMapFs) Create(name string) (File, error) {
+	name = normalizePath(name)
+	m.mu.Lock()
+	file := mem.CreateFile(name)
+	m.getData()[name] = file
+	m.registerWithParent(file)
+	m.mu.Unlock()
+	return mem.NewFileHandle(file), nil
+}
+
+func (m *MemMapFs) unRegisterWithParent(fileName string) error {
+	f, err := m.lockfreeOpen(fileName)
+	if err != nil {
+		return err
+	}
+	parent := m.findParent(f)
+	if parent == nil {
+		log.Panic("parent of ", f.Name(), " is nil")
+	}
+
+	parent.Lock()
+	mem.RemoveFromMemDir(parent, f)
+	parent.Unlock()
+	return nil
+}
+
+func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
+	pdir, _ := filepath.Split(f.Name())
+	pdir = filepath.Clean(pdir)
+	pfile, err := m.lockfreeOpen(pdir)
+	if err != nil {
+		return nil
+	}
+	return pfile
+}
+
+func (m *MemMapFs) registerWithParent(f *mem.FileData) {
+	if f == nil {
+		return
+	}
+	parent := m.findParent(f)
+	if parent == nil {
+		pdir := filepath.Dir(filepath.Clean(f.Name()))
+		err := m.lockfreeMkdir(pdir, 0777)
+		if err != nil {
+			//log.Println("Mkdir error:", err)
+			return
+		}
+		parent, err = m.lockfreeOpen(pdir)
+		if err != nil {
+			//log.Println("Open after Mkdir error:", err)
+			return
+		}
+	}
+
+	parent.Lock()
+	mem.InitializeDir(parent)
+	mem.AddToMemDir(parent, f)
+	parent.Unlock()
+}
+
+func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
+	name = normalizePath(name)
+	x, ok := m.getData()[name]
+	if ok {
+		// Only return ErrFileExists if it's a file, not a directory.
+		i := mem.FileInfo{FileData: x}
+		if !i.IsDir() {
+			return ErrFileExists
+		}
+	} else {
+		item := mem.CreateDir(name)
+		m.getData()[name] = item
+		m.registerWithParent(item)
+	}
+	return nil
+}
+
+func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
+	name = normalizePath(name)
+
+	m.mu.RLock()
+	_, ok := m.getData()[name]
+	m.mu.RUnlock()
+	if ok {
+		return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
+	}
+
+	m.mu.Lock()
+	item := mem.CreateDir(name)
+	m.getData()[name] = item
+	m.registerWithParent(item)
+	m.mu.Unlock()
+
+	m.Chmod(name, perm|os.ModeDir)
+
+	return nil
+}
+
+func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
+	err := m.Mkdir(path, perm)
+	if err != nil {
+		if err.(*os.PathError).Err == ErrFileExists {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
+
+// Handle some relative paths
+func normalizePath(path string) string {
+	path = filepath.Clean(path)
+
+	switch path {
+	case ".":
+		return FilePathSeparator
+	case "..":
+		return FilePathSeparator
+	default:
+		return path
+	}
+}
+
+func (m *MemMapFs) Open(name string) (File, error) {
+	f, err := m.open(name)
+	if f != nil {
+		return mem.NewReadOnlyFileHandle(f), err
+	}
+	return nil, err
+}
+
+func (m *MemMapFs) openWrite(name string) (File, error) {
+	f, err := m.open(name)
+	if f != nil {
+		return mem.NewFileHandle(f), err
+	}
+	return nil, err
+}
+
+func (m *MemMapFs) open(name string) (*mem.FileData, error) {
+	name = normalizePath(name)
+
+	m.mu.RLock()
+	f, ok := m.getData()[name]
+	m.mu.RUnlock()
+	if !ok {
+		return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
+	}
+	return f, nil
+}
+
+func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
+	name = normalizePath(name)
+	f, ok := m.getData()[name]
+	if ok {
+		return f, nil
+	} else {
+		return nil, ErrFileNotFound
+	}
+}
+
+func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	chmod := false
+	file, err := m.openWrite(name)
+	if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
+		file, err = m.Create(name)
+		chmod = true
+	}
+	if err != nil {
+		return nil, err
+	}
+	if flag == os.O_RDONLY {
+		file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
+	}
+	if flag&os.O_APPEND > 0 {
+		_, err = file.Seek(0, os.SEEK_END)
+		if err != nil {
+			file.Close()
+			return nil, err
+		}
+	}
+	if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
+		err = file.Truncate(0)
+		if err != nil {
+			file.Close()
+			return nil, err
+		}
+	}
+	if chmod {
+		m.Chmod(name, perm)
+	}
+	return file, nil
+}
+
+func (m *MemMapFs) Remove(name string) error {
+	name = normalizePath(name)
+
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	if _, ok := m.getData()[name]; ok {
+		err := m.unRegisterWithParent(name)
+		if err != nil {
+			return &os.PathError{Op: "remove", Path: name, Err: err}
+		}
+		delete(m.getData(), name)
+	} else {
+		return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
+	}
+	return nil
+}
+
+func (m *MemMapFs) RemoveAll(path string) error {
+	path = normalizePath(path)
+	m.mu.Lock()
+	m.unRegisterWithParent(path)
+	m.mu.Unlock()
+
+	m.mu.RLock()
+	defer m.mu.RUnlock()
+
+	for p, _ := range m.getData() {
+		if strings.HasPrefix(p, path) {
+			m.mu.RUnlock()
+			m.mu.Lock()
+			delete(m.getData(), p)
+			m.mu.Unlock()
+			m.mu.RLock()
+		}
+	}
+	return nil
+}
+
+func (m *MemMapFs) Rename(oldname, newname string) error {
+	oldname = normalizePath(oldname)
+	newname = normalizePath(newname)
+
+	if oldname == newname {
+		return nil
+	}
+
+	m.mu.RLock()
+	defer m.mu.RUnlock()
+	if _, ok := m.getData()[oldname]; ok {
+		m.mu.RUnlock()
+		m.mu.Lock()
+		m.unRegisterWithParent(oldname)
+		fileData := m.getData()[oldname]
+		delete(m.getData(), oldname)
+		mem.ChangeFileName(fileData, newname)
+		m.getData()[newname] = fileData
+		m.registerWithParent(fileData)
+		m.mu.Unlock()
+		m.mu.RLock()
+	} else {
+		return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
+	}
+	return nil
+}
+
+func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
+	f, err := m.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	fi := mem.GetFileInfo(f.(*mem.File).Data())
+	return fi, nil
+}
+
+func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
+	name = normalizePath(name)
+
+	m.mu.RLock()
+	f, ok := m.getData()[name]
+	m.mu.RUnlock()
+	if !ok {
+		return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+	}
+
+	m.mu.Lock()
+	mem.SetMode(f, mode)
+	m.mu.Unlock()
+
+	return nil
+}
+
+func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+	name = normalizePath(name)
+
+	m.mu.RLock()
+	f, ok := m.getData()[name]
+	m.mu.RUnlock()
+	if !ok {
+		return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
+	}
+
+	m.mu.Lock()
+	mem.SetModTime(f, mtime)
+	m.mu.Unlock()
+
+	return nil
+}
+
+func (m *MemMapFs) List() {
+	for _, x := range m.data {
+		y := mem.FileInfo{FileData: x}
+		fmt.Println(x.Name(), y.Size())
+	}
+}
+
+// func debugMemMapList(fs Fs) {
+// 	if x, ok := fs.(*MemMapFs); ok {
+// 		x.List()
+// 	}
+// }
diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go
new file mode 100644
index 000000000..13cc1b84c
--- /dev/null
+++ b/vendor/github.com/spf13/afero/os.go
@@ -0,0 +1,101 @@
+// Copyright © 2014 Steve Francia .
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"os"
+	"time"
+)
+
+var _ Lstater = (*OsFs)(nil)
+
+// OsFs is a Fs implementation that uses functions provided by the os package.
+//
+// For details in any method, check the documentation of the os package
+// (http://golang.org/pkg/os/).
+type OsFs struct{}
+
+func NewOsFs() Fs {
+	return &OsFs{}
+}
+
+func (OsFs) Name() string { return "OsFs" }
+
+func (OsFs) Create(name string) (File, error) {
+	f, e := os.Create(name)
+	if f == nil {
+		// while this looks strange, we need to return a bare nil (of type nil) not
+		// a nil value of type *os.File or nil won't be nil
+		return nil, e
+	}
+	return f, e
+}
+
+func (OsFs) Mkdir(name string, perm os.FileMode) error {
+	return os.Mkdir(name, perm)
+}
+
+func (OsFs) MkdirAll(path string, perm os.FileMode) error {
+	return os.MkdirAll(path, perm)
+}
+
+func (OsFs) Open(name string) (File, error) {
+	f, e := os.Open(name)
+	if f == nil {
+		// while this looks strange, we need to return a bare nil (of type nil) not
+		// a nil value of type *os.File or nil won't be nil
+		return nil, e
+	}
+	return f, e
+}
+
+func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	f, e := os.OpenFile(name, flag, perm)
+	if f == nil {
+		// while this looks strange, we need to return a bare nil (of type nil) not
+		// a nil value of type *os.File or nil won't be nil
+		return nil, e
+	}
+	return f, e
+}
+
+func (OsFs) Remove(name string) error {
+	return os.Remove(name)
+}
+
+func (OsFs) RemoveAll(path string) error {
+	return os.RemoveAll(path)
+}
+
+func (OsFs) Rename(oldname, newname string) error {
+	return os.Rename(oldname, newname)
+}
+
+func (OsFs) Stat(name string) (os.FileInfo, error) {
+	return os.Stat(name)
+}
+
+func (OsFs) Chmod(name string, mode os.FileMode) error {
+	return os.Chmod(name, mode)
+}
+
+func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+	return os.Chtimes(name, atime, mtime)
+}
+
+func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+	fi, err := os.Lstat(name)
+	return fi, true, err
+}
diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go
new file mode 100644
index 000000000..18f60a0f6
--- /dev/null
+++ b/vendor/github.com/spf13/afero/path.go
@@ -0,0 +1,106 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia 
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"os"
+	"path/filepath"
+	"sort"
+)
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+// adapted from https://golang.org/src/path/filepath/path.go
+func readDirNames(fs Fs, dirname string) ([]string, error) {
+	f, err := fs.Open(dirname)
+	if err != nil {
+		return nil, err
+	}
+	names, err := f.Readdirnames(-1)
+	f.Close()
+	if err != nil {
+		return nil, err
+	}
+	sort.Strings(names)
+	return names, nil
+}
+
+// walk recursively descends path, calling walkFn
+// adapted from https://golang.org/src/path/filepath/path.go
+func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+	err := walkFn(path, info, nil)
+	if err != nil {
+		if info.IsDir() && err == filepath.SkipDir {
+			return nil
+		}
+		return err
+	}
+
+	if !info.IsDir() {
+		return nil
+	}
+
+	names, err := readDirNames(fs, path)
+	if err != nil {
+		return walkFn(path, info, err)
+	}
+
+	for _, name := range names {
+		filename := filepath.Join(path, name)
+		fileInfo, err := lstatIfPossible(fs, filename)
+		if err != nil {
+			if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+				return err
+			}
+		} else {
+			err = walk(fs, filename, fileInfo, walkFn)
+			if err != nil {
+				if !fileInfo.IsDir() || err != filepath.SkipDir {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// if the filesystem supports it, use Lstat, else use fs.Stat
+func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
+	if lfs, ok := fs.(Lstater); ok {
+		fi, _, err := lfs.LstatIfPossible(path)
+		return fi, err
+	}
+	return fs.Stat(path)
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root. All errors that arise visiting files
+// and directories are filtered by walkFn. The files are walked in lexical
+// order, which makes the output deterministic but means that for very
+// large directories Walk can be inefficient.
+// Walk does not follow symbolic links.
+
+func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
+	return Walk(a.Fs, root, walkFn)
+}
+
+func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
+	info, err := lstatIfPossible(fs, root)
+	if err != nil {
+		return walkFn(root, nil, err)
+	}
+	return walk(fs, root, info, walkFn)
+}
diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go
new file mode 100644
index 000000000..c6376ec37
--- /dev/null
+++ b/vendor/github.com/spf13/afero/readonlyfs.go
@@ -0,0 +1,80 @@
+package afero
+
+import (
+	"os"
+	"syscall"
+	"time"
+)
+
+var _ Lstater = (*ReadOnlyFs)(nil)
+
+type ReadOnlyFs struct {
+	source Fs
+}
+
+func NewReadOnlyFs(source Fs) Fs {
+	return &ReadOnlyFs{source: source}
+}
+
+func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
+	return ReadDir(r.source, name)
+}
+
+func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Name() string {
+	return "ReadOnlyFilter"
+}
+
+func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
+	return r.source.Stat(name)
+}
+
+func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
+	if lsf, ok := r.source.(Lstater); ok {
+		return lsf.LstatIfPossible(name)
+	}
+	fi, err := r.Stat(name)
+	return fi, false, err
+}
+
+func (r *ReadOnlyFs) Rename(o, n string) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) RemoveAll(p string) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Remove(n string) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+		return nil, syscall.EPERM
+	}
+	return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *ReadOnlyFs) Open(n string) (File, error) {
+	return r.source.Open(n)
+}
+
+func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Create(n string) (File, error) {
+	return nil, syscall.EPERM
+}
diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go
new file mode 100644
index 000000000..9d92dbc05
--- /dev/null
+++ b/vendor/github.com/spf13/afero/regexpfs.go
@@ -0,0 +1,214 @@
+package afero
+
+import (
+	"os"
+	"regexp"
+	"syscall"
+	"time"
+)
+
+// The RegexpFs filters files (not directories) by regular expression. Only
+// files matching the given regexp will be allowed, all others get a ENOENT error (
+// "No such file or directory").
+//
+type RegexpFs struct {
+	re     *regexp.Regexp
+	source Fs
+}
+
+func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
+	return &RegexpFs{source: source, re: re}
+}
+
+type RegexpFile struct {
+	f  File
+	re *regexp.Regexp
+}
+
+func (r *RegexpFs) matchesName(name string) error {
+	if r.re == nil {
+		return nil
+	}
+	if r.re.MatchString(name) {
+		return nil
+	}
+	return syscall.ENOENT
+}
+
+func (r *RegexpFs) dirOrMatches(name string) error {
+	dir, err := IsDir(r.source, name)
+	if err != nil {
+		return err
+	}
+	if dir {
+		return nil
+	}
+	return r.matchesName(name)
+}
+
+func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
+	if err := r.dirOrMatches(name); err != nil {
+		return err
+	}
+	return r.source.Chtimes(name, a, m)
+}
+
+func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
+	if err := r.dirOrMatches(name); err != nil {
+		return err
+	}
+	return r.source.Chmod(name, mode)
+}
+
+func (r *RegexpFs) Name() string {
+	return "RegexpFs"
+}
+
+func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
+	if err := r.dirOrMatches(name); err != nil {
+		return nil, err
+	}
+	return r.source.Stat(name)
+}
+
+func (r *RegexpFs) Rename(oldname, newname string) error {
+	dir, err := IsDir(r.source, oldname)
+	if err != nil {
+		return err
+	}
+	if dir {
+		return nil
+	}
+	if err := r.matchesName(oldname); err != nil {
+		return err
+	}
+	if err := r.matchesName(newname); err != nil {
+		return err
+	}
+	return r.source.Rename(oldname, newname)
+}
+
+func (r *RegexpFs) RemoveAll(p string) error {
+	dir, err := IsDir(r.source, p)
+	if err != nil {
+		return err
+	}
+	if !dir {
+		if err := r.matchesName(p); err != nil {
+			return err
+		}
+	}
+	return r.source.RemoveAll(p)
+}
+
+func (r *RegexpFs) Remove(name string) error {
+	if err := r.dirOrMatches(name); err != nil {
+		return err
+	}
+	return r.source.Remove(name)
+}
+
+func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	if err := r.dirOrMatches(name); err != nil {
+		return nil, err
+	}
+	return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *RegexpFs) Open(name string) (File, error) {
+	dir, err := IsDir(r.source, name)
+	if err != nil {
+		return nil, err
+	}
+	if !dir {
+		if err := r.matchesName(name); err != nil {
+			return nil, err
+		}
+	}
+	f, err := r.source.Open(name)
+	return &RegexpFile{f: f, re: r.re}, nil
+}
+
+func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
+	return r.source.Mkdir(n, p)
+}
+
+func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
+	return r.source.MkdirAll(n, p)
+}
+
+func (r *RegexpFs) Create(name string) (File, error) {
+	if err := r.matchesName(name); err != nil {
+		return nil, err
+	}
+	return r.source.Create(name)
+}
+
+func (f *RegexpFile) Close() error {
+	return f.f.Close()
+}
+
+func (f *RegexpFile) Read(s []byte) (int, error) {
+	return f.f.Read(s)
+}
+
+func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
+	return f.f.ReadAt(s, o)
+}
+
+func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
+	return f.f.Seek(o, w)
+}
+
+func (f *RegexpFile) Write(s []byte) (int, error) {
+	return f.f.Write(s)
+}
+
+func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
+	return f.f.WriteAt(s, o)
+}
+
+func (f *RegexpFile) Name() string {
+	return f.f.Name()
+}
+
+func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
+	var rfi []os.FileInfo
+	rfi, err = f.f.Readdir(c)
+	if err != nil {
+		return nil, err
+	}
+	for _, i := range rfi {
+		if i.IsDir() || f.re.MatchString(i.Name()) {
+			fi = append(fi, i)
+		}
+	}
+	return fi, nil
+}
+
+func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
+	fi, err := f.Readdir(c)
+	if err != nil {
+		return nil, err
+	}
+	for _, s := range fi {
+		n = append(n, s.Name())
+	}
+	return n, nil
+}
+
+func (f *RegexpFile) Stat() (os.FileInfo, error) {
+	return f.f.Stat()
+}
+
+func (f *RegexpFile) Sync() error {
+	return f.f.Sync()
+}
+
+func (f *RegexpFile) Truncate(s int64) error {
+	return f.f.Truncate(s)
+}
+
+func (f *RegexpFile) WriteString(s string) (int, error) {
+	return f.f.WriteString(s)
+}
diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go
new file mode 100644
index 000000000..1e78f7d1e
--- /dev/null
+++ b/vendor/github.com/spf13/afero/unionFile.go
@@ -0,0 +1,305 @@
+package afero
+
+import (
+	"io"
+	"os"
+	"path/filepath"
+	"syscall"
+)
+
+// The UnionFile implements the afero.File interface and will be returned
+// when reading a directory present at least in the overlay or opening a file
+// for writing.
+//
+// The calls to
+// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
+// base and the overlay - for files present in both layers, only those
+// from the overlay will be used.
+//
+// When opening files for writing (Create() / OpenFile() with the right flags)
+// the operations will be done in both layers, starting with the overlay. A
+// successful read in the overlay will move the cursor position in the base layer
+// by the number of bytes read.
+type UnionFile struct {
+	Base   File
+	Layer  File
+	Merger DirsMerger
+	off    int
+	files  []os.FileInfo
+}
+
+func (f *UnionFile) Close() error {
+	// first close base, so we have a newer timestamp in the overlay. If we'd close
+	// the overlay first, we'd get a cacheStale the next time we access this file
+	// -> cache would be useless ;-)
+	if f.Base != nil {
+		f.Base.Close()
+	}
+	if f.Layer != nil {
+		return f.Layer.Close()
+	}
+	return BADFD
+}
+
+func (f *UnionFile) Read(s []byte) (int, error) {
+	if f.Layer != nil {
+		n, err := f.Layer.Read(s)
+		if (err == nil || err == io.EOF) && f.Base != nil {
+			// advance the file position also in the base file, the next
+			// call may be a write at this position (or a seek with SEEK_CUR)
+			if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
+				// only overwrite err in case the seek fails: we need to
+				// report an eventual io.EOF to the caller
+				err = seekErr
+			}
+		}
+		return n, err
+	}
+	if f.Base != nil {
+		return f.Base.Read(s)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
+	if f.Layer != nil {
+		n, err := f.Layer.ReadAt(s, o)
+		if (err == nil || err == io.EOF) && f.Base != nil {
+			_, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
+		}
+		return n, err
+	}
+	if f.Base != nil {
+		return f.Base.ReadAt(s, o)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
+	if f.Layer != nil {
+		pos, err = f.Layer.Seek(o, w)
+		if (err == nil || err == io.EOF) && f.Base != nil {
+			_, err = f.Base.Seek(o, w)
+		}
+		return pos, err
+	}
+	if f.Base != nil {
+		return f.Base.Seek(o, w)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) Write(s []byte) (n int, err error) {
+	if f.Layer != nil {
+		n, err = f.Layer.Write(s)
+		if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
+			_, err = f.Base.Write(s)
+		}
+		return n, err
+	}
+	if f.Base != nil {
+		return f.Base.Write(s)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
+	if f.Layer != nil {
+		n, err = f.Layer.WriteAt(s, o)
+		if err == nil && f.Base != nil {
+			_, err = f.Base.WriteAt(s, o)
+		}
+		return n, err
+	}
+	if f.Base != nil {
+		return f.Base.WriteAt(s, o)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) Name() string {
+	if f.Layer != nil {
+		return f.Layer.Name()
+	}
+	return f.Base.Name()
+}
+
+// DirsMerger is how UnionFile weaves two directories together.
+// It takes the FileInfo slices from the layer and the base and returns a
+// single view.
+type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
+
+var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
+	var files = make(map[string]os.FileInfo)
+
+	for _, fi := range lofi {
+		files[fi.Name()] = fi
+	}
+
+	for _, fi := range bofi {
+		if _, exists := files[fi.Name()]; !exists {
+			files[fi.Name()] = fi
+		}
+	}
+
+	rfi := make([]os.FileInfo, len(files))
+
+	i := 0
+	for _, fi := range files {
+		rfi[i] = fi
+		i++
+	}
+
+	return rfi, nil
+
+}
+
+// Readdir will weave the two directories together and
+// return a single view of the overlayed directories
+func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
+	var merge DirsMerger = f.Merger
+	if merge == nil {
+		merge = defaultUnionMergeDirsFn
+	}
+
+	if f.off == 0 {
+		var lfi []os.FileInfo
+		if f.Layer != nil {
+			lfi, err = f.Layer.Readdir(-1)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		var bfi []os.FileInfo
+		if f.Base != nil {
+			bfi, err = f.Base.Readdir(-1)
+			if err != nil {
+				return nil, err
+			}
+
+		}
+		merged, err := merge(lfi, bfi)
+		if err != nil {
+			return nil, err
+		}
+		f.files = append(f.files, merged...)
+	}
+	if c == -1 {
+		return f.files[f.off:], nil
+	}
+	defer func() { f.off += c }()
+	return f.files[f.off:c], nil
+}
+
+func (f *UnionFile) Readdirnames(c int) ([]string, error) {
+	rfi, err := f.Readdir(c)
+	if err != nil {
+		return nil, err
+	}
+	var names []string
+	for _, fi := range rfi {
+		names = append(names, fi.Name())
+	}
+	return names, nil
+}
+
+func (f *UnionFile) Stat() (os.FileInfo, error) {
+	if f.Layer != nil {
+		return f.Layer.Stat()
+	}
+	if f.Base != nil {
+		return f.Base.Stat()
+	}
+	return nil, BADFD
+}
+
+func (f *UnionFile) Sync() (err error) {
+	if f.Layer != nil {
+		err = f.Layer.Sync()
+		if err == nil && f.Base != nil {
+			err = f.Base.Sync()
+		}
+		return err
+	}
+	if f.Base != nil {
+		return f.Base.Sync()
+	}
+	return BADFD
+}
+
+func (f *UnionFile) Truncate(s int64) (err error) {
+	if f.Layer != nil {
+		err = f.Layer.Truncate(s)
+		if err == nil && f.Base != nil {
+			err = f.Base.Truncate(s)
+		}
+		return err
+	}
+	if f.Base != nil {
+		return f.Base.Truncate(s)
+	}
+	return BADFD
+}
+
+func (f *UnionFile) WriteString(s string) (n int, err error) {
+	if f.Layer != nil {
+		n, err = f.Layer.WriteString(s)
+		if err == nil && f.Base != nil {
+			_, err = f.Base.WriteString(s)
+		}
+		return n, err
+	}
+	if f.Base != nil {
+		return f.Base.WriteString(s)
+	}
+	return 0, BADFD
+}
+
+func copyToLayer(base Fs, layer Fs, name string) error {
+	bfh, err := base.Open(name)
+	if err != nil {
+		return err
+	}
+	defer bfh.Close()
+
+	// First make sure the directory exists
+	exists, err := Exists(layer, filepath.Dir(name))
+	if err != nil {
+		return err
+	}
+	if !exists {
+		err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
+		if err != nil {
+			return err
+		}
+	}
+
+	// Create the file on the overlay
+	lfh, err := layer.Create(name)
+	if err != nil {
+		return err
+	}
+	n, err := io.Copy(lfh, bfh)
+	if err != nil {
+		// If anything fails, clean up the file
+		layer.Remove(name)
+		lfh.Close()
+		return err
+	}
+
+	bfi, err := bfh.Stat()
+	if err != nil || bfi.Size() != n {
+		layer.Remove(name)
+		lfh.Close()
+		return syscall.EIO
+	}
+
+	err = lfh.Close()
+	if err != nil {
+		layer.Remove(name)
+		lfh.Close()
+		return err
+	}
+	return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
+}
diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go
new file mode 100644
index 000000000..4f253f481
--- /dev/null
+++ b/vendor/github.com/spf13/afero/util.go
@@ -0,0 +1,330 @@
+// Copyright ©2015 Steve Francia 
+// Portions Copyright ©2015 The Hugo Authors
+// Portions Copyright 2016-present Bjørn Erik Pedersen 
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"unicode"
+
+	"golang.org/x/text/transform"
+	"golang.org/x/text/unicode/norm"
+)
+
+// Filepath separator defined by os.Separator.
+const FilePathSeparator = string(filepath.Separator)
+
+// Takes a reader and a path and writes the content
+func (a Afero) WriteReader(path string, r io.Reader) (err error) {
+	return WriteReader(a.Fs, path, r)
+}
+
+func WriteReader(fs Fs, path string, r io.Reader) (err error) {
+	dir, _ := filepath.Split(path)
+	ospath := filepath.FromSlash(dir)
+
+	if ospath != "" {
+		err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+		if err != nil {
+			if err != os.ErrExist {
+				return err
+			}
+		}
+	}
+
+	file, err := fs.Create(path)
+	if err != nil {
+		return
+	}
+	defer file.Close()
+
+	_, err = io.Copy(file, r)
+	return
+}
+
+// Same as WriteReader but checks to see if file/directory already exists.
+func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
+	return SafeWriteReader(a.Fs, path, r)
+}
+
+func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
+	dir, _ := filepath.Split(path)
+	ospath := filepath.FromSlash(dir)
+
+	if ospath != "" {
+		err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+		if err != nil {
+			return
+		}
+	}
+
+	exists, err := Exists(fs, path)
+	if err != nil {
+		return
+	}
+	if exists {
+		return fmt.Errorf("%v already exists", path)
+	}
+
+	file, err := fs.Create(path)
+	if err != nil {
+		return
+	}
+	defer file.Close()
+
+	_, err = io.Copy(file, r)
+	return
+}
+
+func (a Afero) GetTempDir(subPath string) string {
+	return GetTempDir(a.Fs, subPath)
+}
+
+// GetTempDir returns the default temp directory with trailing slash
+// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
+func GetTempDir(fs Fs, subPath string) string {
+	addSlash := func(p string) string {
+		if FilePathSeparator != p[len(p)-1:] {
+			p = p + FilePathSeparator
+		}
+		return p
+	}
+	dir := addSlash(os.TempDir())
+
+	if subPath != "" {
+		// preserve windows backslash :-(
+		if FilePathSeparator == "\\" {
+			subPath = strings.Replace(subPath, "\\", "____", -1)
+		}
+		dir = dir + UnicodeSanitize((subPath))
+		if FilePathSeparator == "\\" {
+			dir = strings.Replace(dir, "____", "\\", -1)
+		}
+
+		if exists, _ := Exists(fs, dir); exists {
+			return addSlash(dir)
+		}
+
+		err := fs.MkdirAll(dir, 0777)
+		if err != nil {
+			panic(err)
+		}
+		dir = addSlash(dir)
+	}
+	return dir
+}
+
+// Rewrite string to remove non-standard path characters
+func UnicodeSanitize(s string) string {
+	source := []rune(s)
+	target := make([]rune, 0, len(source))
+
+	for _, r := range source {
+		if unicode.IsLetter(r) ||
+			unicode.IsDigit(r) ||
+			unicode.IsMark(r) ||
+			r == '.' ||
+			r == '/' ||
+			r == '\\' ||
+			r == '_' ||
+			r == '-' ||
+			r == '%' ||
+			r == ' ' ||
+			r == '#' {
+			target = append(target, r)
+		}
+	}
+
+	return string(target)
+}
+
+// Transform characters with accents into plain forms.
+func NeuterAccents(s string) string {
+	t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
+	result, _, _ := transform.String(t, string(s))
+
+	return result
+}
+
+func isMn(r rune) bool {
+	return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
+}
+
+func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
+	return FileContainsBytes(a.Fs, filename, subslice)
+}
+
+// Check if a file contains a specified byte slice.
+func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
+	f, err := fs.Open(filename)
+	if err != nil {
+		return false, err
+	}
+	defer f.Close()
+
+	return readerContainsAny(f, subslice), nil
+}
+
+func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
+	return FileContainsAnyBytes(a.Fs, filename, subslices)
+}
+
+// Check if a file contains any of the specified byte slices.
+func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
+	f, err := fs.Open(filename)
+	if err != nil {
+		return false, err
+	}
+	defer f.Close()
+
+	return readerContainsAny(f, subslices...), nil
+}
+
+// readerContains reports whether any of the subslices is within r.
+func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
+
+	if r == nil || len(subslices) == 0 {
+		return false
+	}
+
+	largestSlice := 0
+
+	for _, sl := range subslices {
+		if len(sl) > largestSlice {
+			largestSlice = len(sl)
+		}
+	}
+
+	if largestSlice == 0 {
+		return false
+	}
+
+	bufflen := largestSlice * 4
+	halflen := bufflen / 2
+	buff := make([]byte, bufflen)
+	var err error
+	var n, i int
+
+	for {
+		i++
+		if i == 1 {
+			n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
+		} else {
+			if i != 2 {
+				// shift left to catch overlapping matches
+				copy(buff[:], buff[halflen:])
+			}
+			n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
+		}
+
+		if n > 0 {
+			for _, sl := range subslices {
+				if bytes.Contains(buff, sl) {
+					return true
+				}
+			}
+		}
+
+		if err != nil {
+			break
+		}
+	}
+	return false
+}
+
+func (a Afero) DirExists(path string) (bool, error) {
+	return DirExists(a.Fs, path)
+}
+
+// DirExists checks if a path exists and is a directory.
+func DirExists(fs Fs, path string) (bool, error) {
+	fi, err := fs.Stat(path)
+	if err == nil && fi.IsDir() {
+		return true, nil
+	}
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	return false, err
+}
+
+func (a Afero) IsDir(path string) (bool, error) {
+	return IsDir(a.Fs, path)
+}
+
+// IsDir checks if a given path is a directory.
+func IsDir(fs Fs, path string) (bool, error) {
+	fi, err := fs.Stat(path)
+	if err != nil {
+		return false, err
+	}
+	return fi.IsDir(), nil
+}
+
+func (a Afero) IsEmpty(path string) (bool, error) {
+	return IsEmpty(a.Fs, path)
+}
+
+// IsEmpty checks if a given file or directory is empty.
+func IsEmpty(fs Fs, path string) (bool, error) {
+	if b, _ := Exists(fs, path); !b {
+		return false, fmt.Errorf("%q path does not exist", path)
+	}
+	fi, err := fs.Stat(path)
+	if err != nil {
+		return false, err
+	}
+	if fi.IsDir() {
+		f, err := fs.Open(path)
+		if err != nil {
+			return false, err
+		}
+		defer f.Close()
+		list, err := f.Readdir(-1)
+		return len(list) == 0, nil
+	}
+	return fi.Size() == 0, nil
+}
+
+func (a Afero) Exists(path string) (bool, error) {
+	return Exists(a.Fs, path)
+}
+
+// Check if a file or directory exists.
+func Exists(fs Fs, path string) (bool, error) {
+	_, err := fs.Stat(path)
+	if err == nil {
+		return true, nil
+	}
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	return false, err
+}
+
+func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
+	combinedPath := filepath.Join(basePathFs.path, relativePath)
+	if parent, ok := basePathFs.source.(*BasePathFs); ok {
+		return FullBaseFsPath(parent, combinedPath)
+	}
+
+	return combinedPath
+}
diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/vendor/github.com/spf13/cast/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
new file mode 100644
index 000000000..8b8c208be
--- /dev/null
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -0,0 +1,159 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package cast provides easy and safe casting in Go.
+package cast
+
+import "time"
+
+// ToBool casts an interface to a bool type.
+func ToBool(i interface{}) bool {
+	v, _ := ToBoolE(i)
+	return v
+}
+
+// ToTime casts an interface to a time.Time type.
+func ToTime(i interface{}) time.Time {
+	v, _ := ToTimeE(i)
+	return v
+}
+
+// ToDuration casts an interface to a time.Duration type.
+func ToDuration(i interface{}) time.Duration {
+	v, _ := ToDurationE(i)
+	return v
+}
+
+// ToFloat64 casts an interface to a float64 type.
+func ToFloat64(i interface{}) float64 {
+	v, _ := ToFloat64E(i)
+	return v
+}
+
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+	v, _ := ToFloat32E(i)
+	return v
+}
+
+// ToInt64 casts an interface to an int64 type.
+func ToInt64(i interface{}) int64 {
+	v, _ := ToInt64E(i)
+	return v
+}
+
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+	v, _ := ToInt32E(i)
+	return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+	v, _ := ToInt16E(i)
+	return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+	v, _ := ToInt8E(i)
+	return v
+}
+
+// ToInt casts an interface to an int type.
+func ToInt(i interface{}) int {
+	v, _ := ToIntE(i)
+	return v
+}
+
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+	v, _ := ToUintE(i)
+	return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+	v, _ := ToUint64E(i)
+	return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+	v, _ := ToUint32E(i)
+	return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+	v, _ := ToUint16E(i)
+	return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+	v, _ := ToUint8E(i)
+	return v
+}
+
+// ToString casts an interface to a string type.
+func ToString(i interface{}) string {
+	v, _ := ToStringE(i)
+	return v
+}
+
+// ToStringMapString casts an interface to a map[string]string type.
+func ToStringMapString(i interface{}) map[string]string {
+	v, _ := ToStringMapStringE(i)
+	return v
+}
+
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
+func ToStringMapStringSlice(i interface{}) map[string][]string {
+	v, _ := ToStringMapStringSliceE(i)
+	return v
+}
+
+// ToStringMapBool casts an interface to a map[string]bool type.
+func ToStringMapBool(i interface{}) map[string]bool {
+	v, _ := ToStringMapBoolE(i)
+	return v
+}
+
+// ToStringMap casts an interface to a map[string]interface{} type.
+func ToStringMap(i interface{}) map[string]interface{} {
+	v, _ := ToStringMapE(i)
+	return v
+}
+
+// ToSlice casts an interface to a []interface{} type.
+func ToSlice(i interface{}) []interface{} {
+	v, _ := ToSliceE(i)
+	return v
+}
+
+// ToBoolSlice casts an interface to a []bool type.
+func ToBoolSlice(i interface{}) []bool {
+	v, _ := ToBoolSliceE(i)
+	return v
+}
+
+// ToStringSlice casts an interface to a []string type.
+func ToStringSlice(i interface{}) []string {
+	v, _ := ToStringSliceE(i)
+	return v
+}
+
+// ToIntSlice casts an interface to a []int type.
+func ToIntSlice(i interface{}) []int {
+	v, _ := ToIntSliceE(i)
+	return v
+}
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+	v, _ := ToDurationSliceE(i)
+	return v
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
new file mode 100644
index 000000000..4fe192893
--- /dev/null
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -0,0 +1,1166 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package cast
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"html/template"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
+func ToTimeE(i interface{}) (tim time.Time, err error) {
+	i = indirect(i)
+
+	switch v := i.(type) {
+	case time.Time:
+		return v, nil
+	case string:
+		return StringToDate(v)
+	case int:
+		return time.Unix(int64(v), 0), nil
+	case int64:
+		return time.Unix(v, 0), nil
+	case int32:
+		return time.Unix(int64(v), 0), nil
+	case uint:
+		return time.Unix(int64(v), 0), nil
+	case uint64:
+		return time.Unix(int64(v), 0), nil
+	case uint32:
+		return time.Unix(int64(v), 0), nil
+	default:
+		return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
+	}
+}
+
+// ToDurationE casts an interface to a time.Duration type.
+func ToDurationE(i interface{}) (d time.Duration, err error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case time.Duration:
+		return s, nil
+	case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
+		d = time.Duration(ToInt64(s))
+		return
+	case float32, float64:
+		d = time.Duration(ToFloat64(s))
+		return
+	case string:
+		if strings.ContainsAny(s, "nsuµmh") {
+			d, err = time.ParseDuration(s)
+		} else {
+			d, err = time.ParseDuration(s + "ns")
+		}
+		return
+	default:
+		err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
+		return
+	}
+}
+
+// ToBoolE casts an interface to a bool type.
+func ToBoolE(i interface{}) (bool, error) {
+	i = indirect(i)
+
+	switch b := i.(type) {
+	case bool:
+		return b, nil
+	case nil:
+		return false, nil
+	case int:
+		if i.(int) != 0 {
+			return true, nil
+		}
+		return false, nil
+	case string:
+		return strconv.ParseBool(i.(string))
+	default:
+		return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
+	}
+}
+
+// ToFloat64E casts an interface to a float64 type.
+func ToFloat64E(i interface{}) (float64, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case float64:
+		return s, nil
+	case float32:
+		return float64(s), nil
+	case int:
+		return float64(s), nil
+	case int64:
+		return float64(s), nil
+	case int32:
+		return float64(s), nil
+	case int16:
+		return float64(s), nil
+	case int8:
+		return float64(s), nil
+	case uint:
+		return float64(s), nil
+	case uint64:
+		return float64(s), nil
+	case uint32:
+		return float64(s), nil
+	case uint16:
+		return float64(s), nil
+	case uint8:
+		return float64(s), nil
+	case string:
+		v, err := strconv.ParseFloat(s, 64)
+		if err == nil {
+			return v, nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+	}
+}
+
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case float64:
+		return float32(s), nil
+	case float32:
+		return s, nil
+	case int:
+		return float32(s), nil
+	case int64:
+		return float32(s), nil
+	case int32:
+		return float32(s), nil
+	case int16:
+		return float32(s), nil
+	case int8:
+		return float32(s), nil
+	case uint:
+		return float32(s), nil
+	case uint64:
+		return float32(s), nil
+	case uint32:
+		return float32(s), nil
+	case uint16:
+		return float32(s), nil
+	case uint8:
+		return float32(s), nil
+	case string:
+		v, err := strconv.ParseFloat(s, 32)
+		if err == nil {
+			return float32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+	}
+}
+
+// ToInt64E casts an interface to an int64 type.
+func ToInt64E(i interface{}) (int64, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int64(s), nil
+	case int64:
+		return s, nil
+	case int32:
+		return int64(s), nil
+	case int16:
+		return int64(s), nil
+	case int8:
+		return int64(s), nil
+	case uint:
+		return int64(s), nil
+	case uint64:
+		return int64(s), nil
+	case uint32:
+		return int64(s), nil
+	case uint16:
+		return int64(s), nil
+	case uint8:
+		return int64(s), nil
+	case float64:
+		return int64(s), nil
+	case float32:
+		return int64(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return v, nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+	}
+}
+
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int32(s), nil
+	case int64:
+		return int32(s), nil
+	case int32:
+		return s, nil
+	case int16:
+		return int32(s), nil
+	case int8:
+		return int32(s), nil
+	case uint:
+		return int32(s), nil
+	case uint64:
+		return int32(s), nil
+	case uint32:
+		return int32(s), nil
+	case uint16:
+		return int32(s), nil
+	case uint8:
+		return int32(s), nil
+	case float64:
+		return int32(s), nil
+	case float32:
+		return int32(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+	}
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int16(s), nil
+	case int64:
+		return int16(s), nil
+	case int32:
+		return int16(s), nil
+	case int16:
+		return s, nil
+	case int8:
+		return int16(s), nil
+	case uint:
+		return int16(s), nil
+	case uint64:
+		return int16(s), nil
+	case uint32:
+		return int16(s), nil
+	case uint16:
+		return int16(s), nil
+	case uint8:
+		return int16(s), nil
+	case float64:
+		return int16(s), nil
+	case float32:
+		return int16(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int16(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+	}
+}
+
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int8(s), nil
+	case int64:
+		return int8(s), nil
+	case int32:
+		return int8(s), nil
+	case int16:
+		return int8(s), nil
+	case int8:
+		return s, nil
+	case uint:
+		return int8(s), nil
+	case uint64:
+		return int8(s), nil
+	case uint32:
+		return int8(s), nil
+	case uint16:
+		return int8(s), nil
+	case uint8:
+		return int8(s), nil
+	case float64:
+		return int8(s), nil
+	case float32:
+		return int8(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int8(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+	}
+}
+
+// ToIntE casts an interface to an int type.
+func ToIntE(i interface{}) (int, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return s, nil
+	case int64:
+		return int(s), nil
+	case int32:
+		return int(s), nil
+	case int16:
+		return int(s), nil
+	case int8:
+		return int(s), nil
+	case uint:
+		return int(s), nil
+	case uint64:
+		return int(s), nil
+	case uint32:
+		return int(s), nil
+	case uint16:
+		return int(s), nil
+	case uint8:
+		return int(s), nil
+	case float64:
+		return int(s), nil
+	case float32:
+		return int(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+	}
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 0)
+		if err == nil {
+			return uint(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case uint:
+		return s, nil
+	case uint64:
+		return uint(s), nil
+	case uint32:
+		return uint(s), nil
+	case uint16:
+		return uint(s), nil
+	case uint8:
+		return uint(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+	}
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 64)
+		if err == nil {
+			return v, nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case uint:
+		return uint64(s), nil
+	case uint64:
+		return s, nil
+	case uint32:
+		return uint64(s), nil
+	case uint16:
+		return uint64(s), nil
+	case uint8:
+		return uint64(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+	}
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 32)
+		if err == nil {
+			return uint32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case uint:
+		return uint32(s), nil
+	case uint64:
+		return uint32(s), nil
+	case uint32:
+		return s, nil
+	case uint16:
+		return uint32(s), nil
+	case uint8:
+		return uint32(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+	}
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 16)
+		if err == nil {
+			return uint16(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case uint:
+		return uint16(s), nil
+	case uint64:
+		return uint16(s), nil
+	case uint32:
+		return uint16(s), nil
+	case uint16:
+		return s, nil
+	case uint8:
+		return uint16(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+	}
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 8)
+		if err == nil {
+			return uint8(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case uint:
+		return uint8(s), nil
+	case uint64:
+		return uint8(s), nil
+	case uint32:
+		return uint8(s), nil
+	case uint16:
+		return uint8(s), nil
+	case uint8:
+		return s, nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
+	}
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+	if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+		// Avoid creating a reflect.Value if it's not a pointer.
+		return a
+	}
+	v := reflect.ValueOf(a)
+	for v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+
+	var errorType = reflect.TypeOf((*error)(nil)).Elem()
+	var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+	v := reflect.ValueOf(a)
+	for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+// ToStringE casts an interface to a string type.
+func ToStringE(i interface{}) (string, error) {
+	i = indirectToStringerOrError(i)
+
+	switch s := i.(type) {
+	case string:
+		return s, nil
+	case bool:
+		return strconv.FormatBool(s), nil
+	case float64:
+		return strconv.FormatFloat(s, 'f', -1, 64), nil
+	case float32:
+		return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
+	case int:
+		return strconv.Itoa(s), nil
+	case int64:
+		return strconv.FormatInt(s, 10), nil
+	case int32:
+		return strconv.Itoa(int(s)), nil
+	case int16:
+		return strconv.FormatInt(int64(s), 10), nil
+	case int8:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint64:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint32:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint16:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint8:
+		return strconv.FormatInt(int64(s), 10), nil
+	case []byte:
+		return string(s), nil
+	case template.HTML:
+		return string(s), nil
+	case template.URL:
+		return string(s), nil
+	case template.JS:
+		return string(s), nil
+	case template.CSS:
+		return string(s), nil
+	case template.HTMLAttr:
+		return string(s), nil
+	case nil:
+		return "", nil
+	case fmt.Stringer:
+		return s.String(), nil
+	case error:
+		return s.Error(), nil
+	default:
+		return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
+	}
+}
+
+// ToStringMapStringE casts an interface to a map[string]string type.
+func ToStringMapStringE(i interface{}) (map[string]string, error) {
+	var m = map[string]string{}
+
+	switch v := i.(type) {
+	case map[string]string:
+		return v, nil
+	case map[string]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToString(val)
+		}
+		return m, nil
+	case map[interface{}]string:
+		for k, val := range v {
+			m[ToString(k)] = ToString(val)
+		}
+		return m, nil
+	case map[interface{}]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToString(val)
+		}
+		return m, nil
+	case string:
+		err := jsonStringToObject(v, &m)
+		return m, err
+	default:
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
+	}
+}
+
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
+func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
+	var m = map[string][]string{}
+
+	switch v := i.(type) {
+	case map[string][]string:
+		return v, nil
+	case map[string][]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToStringSlice(val)
+		}
+		return m, nil
+	case map[string]string:
+		for k, val := range v {
+			m[ToString(k)] = []string{val}
+		}
+	case map[string]interface{}:
+		for k, val := range v {
+			switch vt := val.(type) {
+			case []interface{}:
+				m[ToString(k)] = ToStringSlice(vt)
+			case []string:
+				m[ToString(k)] = vt
+			default:
+				m[ToString(k)] = []string{ToString(val)}
+			}
+		}
+		return m, nil
+	case map[interface{}][]string:
+		for k, val := range v {
+			m[ToString(k)] = ToStringSlice(val)
+		}
+		return m, nil
+	case map[interface{}]string:
+		for k, val := range v {
+			m[ToString(k)] = ToStringSlice(val)
+		}
+		return m, nil
+	case map[interface{}][]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToStringSlice(val)
+		}
+		return m, nil
+	case map[interface{}]interface{}:
+		for k, val := range v {
+			key, err := ToStringE(k)
+			if err != nil {
+				return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+			}
+			value, err := ToStringSliceE(val)
+			if err != nil {
+				return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+			}
+			m[key] = value
+		}
+	case string:
+		err := jsonStringToObject(v, &m)
+		return m, err
+	default:
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+	}
+	return m, nil
+}
+
+// ToStringMapBoolE casts an interface to a map[string]bool type.
+func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
+	var m = map[string]bool{}
+
+	switch v := i.(type) {
+	case map[interface{}]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToBool(val)
+		}
+		return m, nil
+	case map[string]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToBool(val)
+		}
+		return m, nil
+	case map[string]bool:
+		return v, nil
+	case string:
+		err := jsonStringToObject(v, &m)
+		return m, err
+	default:
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
+	}
+}
+
+// ToStringMapE casts an interface to a map[string]interface{} type.
+func ToStringMapE(i interface{}) (map[string]interface{}, error) {
+	var m = map[string]interface{}{}
+
+	switch v := i.(type) {
+	case map[interface{}]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = val
+		}
+		return m, nil
+	case map[string]interface{}:
+		return v, nil
+	case string:
+		err := jsonStringToObject(v, &m)
+		return m, err
+	default:
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
+	}
+}
+
+// ToSliceE casts an interface to a []interface{} type.
+func ToSliceE(i interface{}) ([]interface{}, error) {
+	var s []interface{}
+
+	switch v := i.(type) {
+	case []interface{}:
+		return append(s, v...), nil
+	case []map[string]interface{}:
+		for _, u := range v {
+			s = append(s, u)
+		}
+		return s, nil
+	default:
+		return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
+	}
+}
+
+// ToBoolSliceE casts an interface to a []bool type.
+func ToBoolSliceE(i interface{}) ([]bool, error) {
+	if i == nil {
+		return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+	}
+
+	switch v := i.(type) {
+	case []bool:
+		return v, nil
+	}
+
+	kind := reflect.TypeOf(i).Kind()
+	switch kind {
+	case reflect.Slice, reflect.Array:
+		s := reflect.ValueOf(i)
+		a := make([]bool, s.Len())
+		for j := 0; j < s.Len(); j++ {
+			val, err := ToBoolE(s.Index(j).Interface())
+			if err != nil {
+				return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+			}
+			a[j] = val
+		}
+		return a, nil
+	default:
+		return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+	}
+}
+
+// ToStringSliceE casts an interface to a []string type.
+func ToStringSliceE(i interface{}) ([]string, error) {
+	var a []string
+
+	switch v := i.(type) {
+	case []interface{}:
+		for _, u := range v {
+			a = append(a, ToString(u))
+		}
+		return a, nil
+	case []string:
+		return v, nil
+	case string:
+		return strings.Fields(v), nil
+	case interface{}:
+		str, err := ToStringE(v)
+		if err != nil {
+			return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+		}
+		return []string{str}, nil
+	default:
+		return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+	}
+}
+
+// ToIntSliceE casts an interface to a []int type.
+func ToIntSliceE(i interface{}) ([]int, error) {
+	if i == nil {
+		return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+	}
+
+	switch v := i.(type) {
+	case []int:
+		return v, nil
+	}
+
+	kind := reflect.TypeOf(i).Kind()
+	switch kind {
+	case reflect.Slice, reflect.Array:
+		s := reflect.ValueOf(i)
+		a := make([]int, s.Len())
+		for j := 0; j < s.Len(); j++ {
+			val, err := ToIntE(s.Index(j).Interface())
+			if err != nil {
+				return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+			}
+			a[j] = val
+		}
+		return a, nil
+	default:
+		return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+	}
+}
+
+// ToDurationSliceE casts an interface to a []time.Duration type.
+func ToDurationSliceE(i interface{}) ([]time.Duration, error) {
+	if i == nil {
+		return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+	}
+
+	switch v := i.(type) {
+	case []time.Duration:
+		return v, nil
+	}
+
+	kind := reflect.TypeOf(i).Kind()
+	switch kind {
+	case reflect.Slice, reflect.Array:
+		s := reflect.ValueOf(i)
+		a := make([]time.Duration, s.Len())
+		for j := 0; j < s.Len(); j++ {
+			val, err := ToDurationE(s.Index(j).Interface())
+			if err != nil {
+				return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+			}
+			a[j] = val
+		}
+		return a, nil
+	default:
+		return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+	}
+}
+
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats.  If no suitable format is found, an error is
+// returned.
+func StringToDate(s string) (time.Time, error) {
+	return parseDateWith(s, []string{
+		time.RFC3339,
+		"2006-01-02T15:04:05", // iso8601 without timezone
+		time.RFC1123Z,
+		time.RFC1123,
+		time.RFC822Z,
+		time.RFC822,
+		time.RFC850,
+		time.ANSIC,
+		time.UnixDate,
+		time.RubyDate,
+		"2006-01-02 15:04:05.999999999 -0700 MST", // Time.String()
+		"2006-01-02",
+		"02 Jan 2006",
+		"2006-01-02 15:04:05 -07:00",
+		"2006-01-02 15:04:05 -0700",
+		"2006-01-02 15:04:05Z07:00", // RFC3339 without T
+		"2006-01-02 15:04:05",
+		time.Kitchen,
+		time.Stamp,
+		time.StampMilli,
+		time.StampMicro,
+		time.StampNano,
+	})
+}
+
+func parseDateWith(s string, dates []string) (d time.Time, e error) {
+	for _, dateType := range dates {
+		if d, e = time.Parse(dateType, s); e == nil {
+			return
+		}
+	}
+	return d, fmt.Errorf("unable to parse date: %s", s)
+}
+
+// jsonStringToObject attempts to unmarshall a string as JSON into
+// the object passed as pointer.
+func jsonStringToObject(s string, v interface{}) error {
+	data := []byte(s)
+	return json.Unmarshal(data, v)
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
new file mode 100644
index 000000000..bcb763403
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
@@ -0,0 +1,113 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+var (
+	TRACE    *log.Logger
+	DEBUG    *log.Logger
+	INFO     *log.Logger
+	WARN     *log.Logger
+	ERROR    *log.Logger
+	CRITICAL *log.Logger
+	FATAL    *log.Logger
+
+	LOG      *log.Logger
+	FEEDBACK *Feedback
+
+	defaultNotepad *Notepad
+)
+
+func reloadDefaultNotepad() {
+	TRACE = defaultNotepad.TRACE
+	DEBUG = defaultNotepad.DEBUG
+	INFO = defaultNotepad.INFO
+	WARN = defaultNotepad.WARN
+	ERROR = defaultNotepad.ERROR
+	CRITICAL = defaultNotepad.CRITICAL
+	FATAL = defaultNotepad.FATAL
+
+	LOG = defaultNotepad.LOG
+	FEEDBACK = defaultNotepad.FEEDBACK
+}
+
+func init() {
+	defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+	reloadDefaultNotepad()
+}
+
+// SetLogThreshold set the log threshold for the default notepad. Trace by default.
+func SetLogThreshold(threshold Threshold) {
+	defaultNotepad.SetLogThreshold(threshold)
+	reloadDefaultNotepad()
+}
+
+// SetLogOutput set the log output for the default notepad. Discarded by default.
+func SetLogOutput(handle io.Writer) {
+	defaultNotepad.SetLogOutput(handle)
+	reloadDefaultNotepad()
+}
+
+// SetStdoutThreshold set the standard output threshold for the default notepad.
+// Info by default.
+func SetStdoutThreshold(threshold Threshold) {
+	defaultNotepad.SetStdoutThreshold(threshold)
+	reloadDefaultNotepad()
+}
+
+// SetPrefix set the prefix for the default logger. Empty by default.
+func SetPrefix(prefix string) {
+	defaultNotepad.SetPrefix(prefix)
+	reloadDefaultNotepad()
+}
+
+// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default.
+func SetFlags(flags int) {
+	defaultNotepad.SetFlags(flags)
+	reloadDefaultNotepad()
+}
+
+// Level returns the current global log threshold.
+func LogThreshold() Threshold {
+	return defaultNotepad.logThreshold
+}
+
+// Level returns the current global output threshold.
+func StdoutThreshold() Threshold {
+	return defaultNotepad.stdoutThreshold
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func GetLogThreshold() Threshold {
+	return defaultNotepad.GetLogThreshold()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func GetStdoutThreshold() Threshold {
+	return defaultNotepad.GetStdoutThreshold()
+}
+
+// LogCountForLevel returns the number of log invocations for a given threshold.
+func LogCountForLevel(l Threshold) uint64 {
+	return defaultNotepad.LogCountForLevel(l)
+}
+
+// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
+// greater than or equal to a given threshold.
+func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
+	return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold)
+}
+
+// ResetLogCounters resets the invocation counters for all levels.
+func ResetLogCounters() {
+	defaultNotepad.ResetLogCounters()
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
new file mode 100644
index 000000000..11423ac41
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
@@ -0,0 +1,55 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"sync/atomic"
+)
+
+type logCounter struct {
+	counter uint64
+}
+
+func (c *logCounter) incr() {
+	atomic.AddUint64(&c.counter, 1)
+}
+
+func (c *logCounter) resetCounter() {
+	atomic.StoreUint64(&c.counter, 0)
+}
+
+func (c *logCounter) getCount() uint64 {
+	return atomic.LoadUint64(&c.counter)
+}
+
+func (c *logCounter) Write(p []byte) (n int, err error) {
+	c.incr()
+	return len(p), nil
+}
+
+// LogCountForLevel returns the number of log invocations for a given threshold.
+func (n *Notepad) LogCountForLevel(l Threshold) uint64 {
+	return n.logCounters[l].getCount()
+}
+
+// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
+// greater than or equal to a given threshold.
+func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
+	var cnt uint64
+
+	for i := int(threshold); i < len(n.logCounters); i++ {
+		cnt += n.LogCountForLevel(Threshold(i))
+	}
+
+	return cnt
+}
+
+// ResetLogCounters resets the invocation counters for all levels.
+func (n *Notepad) ResetLogCounters() {
+	for _, np := range n.logCounters {
+		np.resetCounter()
+	}
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go
new file mode 100644
index 000000000..ae5aaf711
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -0,0 +1,194 @@
+// Copyright © 2016 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"fmt"
+	"io"
+	"log"
+)
+
+type Threshold int
+
+func (t Threshold) String() string {
+	return prefixes[t]
+}
+
+const (
+	LevelTrace Threshold = iota
+	LevelDebug
+	LevelInfo
+	LevelWarn
+	LevelError
+	LevelCritical
+	LevelFatal
+)
+
+var prefixes map[Threshold]string = map[Threshold]string{
+	LevelTrace:    "TRACE",
+	LevelDebug:    "DEBUG",
+	LevelInfo:     "INFO",
+	LevelWarn:     "WARN",
+	LevelError:    "ERROR",
+	LevelCritical: "CRITICAL",
+	LevelFatal:    "FATAL",
+}
+
+// Notepad is where you leave a note!
+type Notepad struct {
+	TRACE    *log.Logger
+	DEBUG    *log.Logger
+	INFO     *log.Logger
+	WARN     *log.Logger
+	ERROR    *log.Logger
+	CRITICAL *log.Logger
+	FATAL    *log.Logger
+
+	LOG      *log.Logger
+	FEEDBACK *Feedback
+
+	loggers         [7]**log.Logger
+	logHandle       io.Writer
+	outHandle       io.Writer
+	logThreshold    Threshold
+	stdoutThreshold Threshold
+	prefix          string
+	flags           int
+
+	// One per Threshold
+	logCounters [7]*logCounter
+}
+
+// NewNotepad create a new notepad.
+func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad {
+	n := &Notepad{}
+
+	n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL}
+	n.outHandle = outHandle
+	n.logHandle = logHandle
+	n.stdoutThreshold = outThreshold
+	n.logThreshold = logThreshold
+
+	if len(prefix) != 0 {
+		n.prefix = "[" + prefix + "] "
+	} else {
+		n.prefix = ""
+	}
+
+	n.flags = flags
+
+	n.LOG = log.New(n.logHandle,
+		"LOG:   ",
+		n.flags)
+	n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG}
+
+	n.init()
+	return n
+}
+
+// init creates the loggers for each level depending on the notepad thresholds.
+func (n *Notepad) init() {
+	logAndOut := io.MultiWriter(n.outHandle, n.logHandle)
+
+	for t, logger := range n.loggers {
+		threshold := Threshold(t)
+		counter := &logCounter{}
+		n.logCounters[t] = counter
+		prefix := n.prefix + threshold.String() + " "
+
+		switch {
+		case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
+			*logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags)
+
+		case threshold >= n.logThreshold:
+			*logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags)
+
+		case threshold >= n.stdoutThreshold:
+			*logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags)
+
+		default:
+			// counter doesn't care about prefix and flags, so don't use them
+			// for performance.
+			*logger = log.New(counter, "", 0)
+		}
+	}
+}
+
+// SetLogThreshold changes the threshold above which messages are written to the
+// log file.
+func (n *Notepad) SetLogThreshold(threshold Threshold) {
+	n.logThreshold = threshold
+	n.init()
+}
+
+// SetLogOutput changes the file where log messages are written.
+func (n *Notepad) SetLogOutput(handle io.Writer) {
+	n.logHandle = handle
+	n.init()
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func (n *Notepad) GetLogThreshold() Threshold {
+	return n.logThreshold
+}
+
+// SetStdoutThreshold changes the threshold above which messages are written to the
+// standard output.
+func (n *Notepad) SetStdoutThreshold(threshold Threshold) {
+	n.stdoutThreshold = threshold
+	n.init()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func (n *Notepad) GetStdoutThreshold() Threshold {
+	return n.stdoutThreshold
+}
+
+// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between
+// brackets at the beginning of the line. An empty prefix won't be displayed at all.
+func (n *Notepad) SetPrefix(prefix string) {
+	if len(prefix) != 0 {
+		n.prefix = "[" + prefix + "] "
+	} else {
+		n.prefix = ""
+	}
+	n.init()
+}
+
+// SetFlags choose which flags the logger will display (after prefix and message
+// level). See the package log for more informations on this.
+func (n *Notepad) SetFlags(flags int) {
+	n.flags = flags
+	n.init()
+}
+
+// Feedback writes plainly to the outHandle while
+// logging with the standard extra information (date, file, etc).
+type Feedback struct {
+	out *log.Logger
+	log *log.Logger
+}
+
+func (fb *Feedback) Println(v ...interface{}) {
+	fb.output(fmt.Sprintln(v...))
+}
+
+func (fb *Feedback) Printf(format string, v ...interface{}) {
+	fb.output(fmt.Sprintf(format, v...))
+}
+
+func (fb *Feedback) Print(v ...interface{}) {
+	fb.output(fmt.Sprint(v...))
+}
+
+func (fb *Feedback) output(s string) {
+	if fb.out != nil {
+		fb.out.Output(2, s)
+	}
+	if fb.log != nil {
+		fb.log.Output(2, s)
+	}
+}
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 000000000..63ed1cfea
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 000000000..c4c5c0bfd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+	Value
+	IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+	*p = val
+	return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+	v, err := strconv.ParseBool(s)
+	*b = boolValue(v)
+	return err
+}
+
+func (b *boolValue) Type() string {
+	return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+	return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+	val, err := f.getFlagType(name, "bool", boolConv)
+	if err != nil {
+		return false, err
+	}
+	return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+	f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+	flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+	flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+	BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+	flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+	flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+	return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+	p := new(bool)
+	f.BoolVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+	return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+	b := CommandLine.BoolP(name, shorthand, value, usage)
+	return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 000000000..5af02f1a7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,147 @@
+package pflag
+
+import (
+	"io"
+	"strconv"
+	"strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+	value   *[]bool
+	changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+	bsv := new(boolSliceValue)
+	bsv.value = p
+	*bsv.value = val
+	return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+	// remove all quote characters
+	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+	// read flag arguments with CSV parser
+	boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	// parse boolean values into slice
+	out := make([]bool, 0, len(boolStrSlice))
+	for _, boolStr := range boolStrSlice {
+		b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+		if err != nil {
+			return err
+		}
+		out = append(out, b)
+	}
+
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+
+	s.changed = true
+
+	return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+	return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+	boolStrSlice := make([]string, len(*s.value))
+	for i, b := range *s.value {
+		boolStrSlice[i] = strconv.FormatBool(b)
+	}
+
+	out, _ := writeAsCSV(boolStrSlice)
+
+	return "[" + out + "]"
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []bool{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]bool, len(ss))
+	for i, t := range ss {
+		var err error
+		out[i], err = strconv.ParseBool(t)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+	val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+	if err != nil {
+		return []bool{}, err
+	}
+	return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+	f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+	f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+	CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+	CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+	p := []bool{}
+	f.BoolSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+	p := []bool{}
+	f.BoolSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+	return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+	return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 000000000..67d530457
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+	"encoding/base64"
+	"encoding/hex"
+	"fmt"
+	"strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+	return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+	bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+	if err != nil {
+		return err
+	}
+
+	*bytesHex = bin
+
+	return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+	return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+	*p = val
+	return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+	bin, err := hex.DecodeString(sval)
+
+	if err == nil {
+		return bin, nil
+	}
+
+	return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+	val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+	if err != nil {
+		return []byte{}, err
+	}
+
+	return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+	f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+	CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesHexVarP(p, name, "", value, usage)
+	return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesHexVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+	return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+	bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+	if err != nil {
+		return err
+	}
+
+	*bytesBase64 = bin
+
+	return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+	return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+	*p = val
+	return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+	bin, err := base64.StdEncoding.DecodeString(sval)
+	if err == nil {
+		return bin, nil
+	}
+
+	return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+	val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+	if err != nil {
+		return []byte{}, err
+	}
+
+	return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+	f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+	CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesBase64VarP(p, name, "", value, usage)
+	return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesBase64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 000000000..aa126e44d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+	*p = val
+	return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+	// "+1" means that no specific value was passed, so increment
+	if s == "+1" {
+		*i = countValue(*i + 1)
+		return nil
+	}
+	v, err := strconv.ParseInt(s, 0, 0)
+	*i = countValue(v)
+	return err
+}
+
+func (i *countValue) Type() string {
+	return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+	i, err := strconv.Atoi(sval)
+	if err != nil {
+		return nil, err
+	}
+	return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+	val, err := f.getFlagType(name, "count", countConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+	f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+	flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+	flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+	CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+	CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+	p := new(int)
+	f.CountVarP(p, name, "", usage)
+	return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+	p := new(int)
+	f.CountVarP(p, name, shorthand, usage)
+	return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+	return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+	return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 000000000..e9debef88
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+	"time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+	*p = val
+	return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+	v, err := time.ParseDuration(s)
+	*d = durationValue(v)
+	return err
+}
+
+func (d *durationValue) Type() string {
+	return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+	return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+	val, err := f.getFlagType(name, "duration", durationConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+	f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+	f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+	CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+	CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+	p := new(time.Duration)
+	f.DurationVarP(p, name, "", value, usage)
+	return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+	p := new(time.Duration)
+	f.DurationVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+	return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+	return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 000000000..52c6b6dc1
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+	"fmt"
+	"strings"
+	"time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+	value   *[]time.Duration
+	changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+	dsv := new(durationSliceValue)
+	dsv.value = p
+	*dsv.value = val
+	return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]time.Duration, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = time.ParseDuration(d)
+		if err != nil {
+			return err
+		}
+
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *durationSliceValue) Type() string {
+	return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%s", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []time.Duration{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]time.Duration, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = time.ParseDuration(d)
+		if err != nil {
+			return nil, err
+		}
+
+	}
+	return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+	val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+	if err != nil {
+		return []time.Duration{}, err
+	}
+	return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+	f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+	f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+	CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+	CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+	p := []time.Duration{}
+	f.DurationSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+	p := []time.Duration{}
+	f.DurationSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+	return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+	return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 000000000..5cc710ccd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1224 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+	import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+	var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+	var flagvar int
+	func init() {
+		flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+	}
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+	flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+	flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+	fmt.Println("ip has value ", *ip)
+	fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+	var ip = flag.IntP("flagname", "f", 1234, "help message")
+	var flagvar bool
+	func init() {
+		flag.BoolVarP("boolname", "b", true, "help message")
+	}
+	flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+	--flag    // boolean flags only
+	--flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+	// boolean flags
+	-f
+	-abc
+	// non-boolean flags
+	-n 1234
+	-Ifile
+	// mixed
+	-abcs "hello"
+	-abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions.  The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+	"bytes"
+	"errors"
+	goflag "flag"
+	"fmt"
+	"io"
+	"os"
+	"sort"
+	"strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+	// ContinueOnError will return an err from Parse() if an error is found
+	ContinueOnError ErrorHandling = iota
+	// ExitOnError will call os.Exit(2) if an error is found when parsing
+	ExitOnError
+	// PanicOnError will panic() if an error is found when parsing flags
+	PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+	// UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+	UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+	// Usage is the function called when an error occurs while parsing flags.
+	// The field is a function (not a method) that may be changed to point to
+	// a custom error handler.
+	Usage func()
+
+	// SortFlags is used to indicate, if user wants to have sorted flags in
+	// help/usage messages.
+	SortFlags bool
+
+	// ParseErrorsWhitelist is used to configure a whitelist of errors
+	ParseErrorsWhitelist ParseErrorsWhitelist
+
+	name              string
+	parsed            bool
+	actual            map[NormalizedName]*Flag
+	orderedActual     []*Flag
+	sortedActual      []*Flag
+	formal            map[NormalizedName]*Flag
+	orderedFormal     []*Flag
+	sortedFormal      []*Flag
+	shorthands        map[byte]*Flag
+	args              []string // arguments after flags
+	argsLenAtDash     int      // len(args) when a '--' was located when parsing, or -1 if no --
+	errorHandling     ErrorHandling
+	output            io.Writer // nil means stderr; use out() accessor
+	interspersed      bool      // allow interspersed option/non-option args
+	normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+	addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+	Name                string              // name as it appears on command line
+	Shorthand           string              // one-letter abbreviated flag
+	Usage               string              // help message
+	Value               Value               // value as set
+	DefValue            string              // default value (as text); for usage message
+	Changed             bool                // If the user set the value (or if left to default)
+	NoOptDefVal         string              // default value (as text); if the flag is on the command line without any options
+	Deprecated          string              // If this flag is deprecated, this string is the new or now thing to use
+	Hidden              bool                // used by cobra.Command to allow flags to be hidden from help/usage text
+	ShorthandDeprecated string              // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+	Annotations         map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+	String() string
+	Set(string) error
+	Type() string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+	list := make(sort.StringSlice, len(flags))
+	i := 0
+	for k := range flags {
+		list[i] = string(k)
+		i++
+	}
+	list.Sort()
+	result := make([]*Flag, len(list))
+	for i, name := range list {
+		result[i] = flags[NormalizedName(name)]
+	}
+	return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl".  A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+	f.normalizeNameFunc = n
+	f.sortedFormal = f.sortedFormal[:0]
+	for fname, flag := range f.formal {
+		nname := f.normalizeFlagName(flag.Name)
+		if fname == nname {
+			continue
+		}
+		flag.Name = string(nname)
+		delete(f.formal, fname)
+		f.formal[nname] = flag
+		if _, set := f.actual[fname]; set {
+			delete(f.actual, fname)
+			f.actual[nname] = flag
+		}
+	}
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+	if f.normalizeNameFunc != nil {
+		return f.normalizeNameFunc
+	}
+	return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+	n := f.GetNormalizeFunc()
+	return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+	if f.output == nil {
+		return os.Stderr
+	}
+	return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+	f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+	if len(f.formal) == 0 {
+		return
+	}
+
+	var flags []*Flag
+	if f.SortFlags {
+		if len(f.formal) != len(f.sortedFormal) {
+			f.sortedFormal = sortFlags(f.formal)
+		}
+		flags = f.sortedFormal
+	} else {
+		flags = f.orderedFormal
+	}
+
+	for _, flag := range flags {
+		fn(flag)
+	}
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+	return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+	for _, flag := range f.formal {
+		if !flag.Hidden {
+			return true
+		}
+	}
+	return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+	CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+	if len(f.actual) == 0 {
+		return
+	}
+
+	var flags []*Flag
+	if f.SortFlags {
+		if len(f.actual) != len(f.sortedActual) {
+			f.sortedActual = sortFlags(f.actual)
+		}
+		flags = f.sortedActual
+	} else {
+		flags = f.orderedActual
+	}
+
+	for _, flag := range flags {
+		fn(flag)
+	}
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+	CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+	return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+	if name == "" {
+		return nil
+	}
+	if len(name) > 1 {
+		msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	c := name[0]
+	return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+	return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+	flag := f.Lookup(name)
+	if flag == nil {
+		err := fmt.Errorf("flag accessed but not defined: %s", name)
+		return nil, err
+	}
+
+	if flag.Value.Type() != ftype {
+		err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+		return nil, err
+	}
+
+	sval := flag.Value.String()
+	result, err := convFunc(sval)
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+	return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	if usageMessage == "" {
+		return fmt.Errorf("deprecated message for flag %q must be set", name)
+	}
+	flag.Deprecated = usageMessage
+	flag.Hidden = true
+	return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	if usageMessage == "" {
+		return fmt.Errorf("deprecated message for flag %q must be set", name)
+	}
+	flag.ShorthandDeprecated = usageMessage
+	return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	flag.Hidden = true
+	return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+	return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+	return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+	normalName := f.normalizeFlagName(name)
+	flag, ok := f.formal[normalName]
+	if !ok {
+		return fmt.Errorf("no such flag -%v", name)
+	}
+
+	err := flag.Value.Set(value)
+	if err != nil {
+		var flagName string
+		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+			flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+		} else {
+			flagName = fmt.Sprintf("--%s", flag.Name)
+		}
+		return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+	}
+
+	if !flag.Changed {
+		if f.actual == nil {
+			f.actual = make(map[NormalizedName]*Flag)
+		}
+		f.actual[normalName] = flag
+		f.orderedActual = append(f.orderedActual, flag)
+
+		flag.Changed = true
+	}
+
+	if flag.Deprecated != "" {
+		fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+	}
+	return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+	normalName := f.normalizeFlagName(name)
+	flag, ok := f.formal[normalName]
+	if !ok {
+		return fmt.Errorf("no such flag -%v", name)
+	}
+	if flag.Annotations == nil {
+		flag.Annotations = map[string][]string{}
+	}
+	flag.Annotations[key] = values
+	return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+	flag := f.Lookup(name)
+	// If a flag doesn't exist, it wasn't changed....
+	if flag == nil {
+		return false
+	}
+	return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+	return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+	usages := f.FlagUsages()
+	fmt.Fprint(f.out(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+	switch f.Value.(type) {
+	case boolFlag:
+		return f.DefValue == "false"
+	case *durationValue:
+		// Beginning in Go 1.7, duration zero values are "0s"
+		return f.DefValue == "0" || f.DefValue == "0s"
+	case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+		return f.DefValue == "0"
+	case *stringValue:
+		return f.DefValue == ""
+	case *ipValue, *ipMaskValue, *ipNetValue:
+		return f.DefValue == ""
+	case *intSliceValue, *stringSliceValue, *stringArrayValue:
+		return f.DefValue == "[]"
+	default:
+		switch f.Value.String() {
+		case "false":
+			return true
+		case "":
+			return true
+		case "":
+			return true
+		case "0":
+			return true
+		}
+		return false
+	}
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+	// Look for a back-quoted name, but avoid the strings package.
+	usage = flag.Usage
+	for i := 0; i < len(usage); i++ {
+		if usage[i] == '`' {
+			for j := i + 1; j < len(usage); j++ {
+				if usage[j] == '`' {
+					name = usage[i+1 : j]
+					usage = usage[:i] + name + usage[j+1:]
+					return name, usage
+				}
+			}
+			break // Only one back quote; use type name.
+		}
+	}
+
+	name = flag.Value.Type()
+	switch name {
+	case "bool":
+		name = ""
+	case "float64":
+		name = "float"
+	case "int64":
+		name = "int"
+	case "uint64":
+		name = "uint"
+	case "stringSlice":
+		name = "strings"
+	case "intSlice":
+		name = "ints"
+	case "uintSlice":
+		name = "uints"
+	case "boolSlice":
+		name = "bools"
+	}
+
+	return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+	if i+slop > len(s) {
+		return s, ""
+	}
+
+	w := strings.LastIndexAny(s[:i], " \t\n")
+	if w <= 0 {
+		return s, ""
+	}
+	nlPos := strings.LastIndex(s[:i], "\n")
+	if nlPos > 0 && nlPos < w {
+		return s[:nlPos], s[nlPos+1:]
+	}
+	return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+	if w == 0 {
+		return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+	}
+
+	// space between indent i and end of line width w into which
+	// we should wrap the text.
+	wrap := w - i
+
+	var r, l string
+
+	// Not enough space for sensible wrapping. Wrap as a block on
+	// the next line instead.
+	if wrap < 24 {
+		i = 16
+		wrap = w - i
+		r += "\n" + strings.Repeat(" ", i)
+	}
+	// If still not enough space then don't even try to wrap.
+	if wrap < 24 {
+		return strings.Replace(s, "\n", r, -1)
+	}
+
+	// Try to avoid short orphan words on the final line, by
+	// allowing wrapN to go a bit over if that would fit in the
+	// remainder of the line.
+	slop := 5
+	wrap = wrap - slop
+
+	// Handle first line, which is indented by the caller (or the
+	// special case above)
+	l, s = wrapN(wrap, slop, s)
+	r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+	// Now wrap the rest
+	for s != "" {
+		var t string
+
+		t, s = wrapN(wrap, slop, s)
+		r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+	}
+
+	return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+	buf := new(bytes.Buffer)
+
+	lines := make([]string, 0, len(f.formal))
+
+	maxlen := 0
+	f.VisitAll(func(flag *Flag) {
+		if flag.Hidden {
+			return
+		}
+
+		line := ""
+		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+			line = fmt.Sprintf("  -%s, --%s", flag.Shorthand, flag.Name)
+		} else {
+			line = fmt.Sprintf("      --%s", flag.Name)
+		}
+
+		varname, usage := UnquoteUsage(flag)
+		if varname != "" {
+			line += " " + varname
+		}
+		if flag.NoOptDefVal != "" {
+			switch flag.Value.Type() {
+			case "string":
+				line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+			case "bool":
+				if flag.NoOptDefVal != "true" {
+					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+				}
+			case "count":
+				if flag.NoOptDefVal != "+1" {
+					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+				}
+			default:
+				line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+			}
+		}
+
+		// This special character will be replaced with spacing once the
+		// correct alignment is calculated
+		line += "\x00"
+		if len(line) > maxlen {
+			maxlen = len(line)
+		}
+
+		line += usage
+		if !flag.defaultIsZeroValue() {
+			if flag.Value.Type() == "string" {
+				line += fmt.Sprintf(" (default %q)", flag.DefValue)
+			} else {
+				line += fmt.Sprintf(" (default %s)", flag.DefValue)
+			}
+		}
+		if len(flag.Deprecated) != 0 {
+			line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+		}
+
+		lines = append(lines, line)
+	})
+
+	for _, line := range lines {
+		sidx := strings.Index(line, "\x00")
+		spacing := strings.Repeat(" ", maxlen-sidx)
+		// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+		fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+	}
+
+	return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+	return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+	CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+	fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+	f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+	fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+	PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument.  Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+	if i < 0 || i >= len(f.args) {
+		return ""
+	}
+	return f.args[i]
+}
+
+// Arg returns the i'th command-line argument.  Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+	return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+	f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+	// Remember the default value as a string; it won't change.
+	flag := &Flag{
+		Name:      name,
+		Shorthand: shorthand,
+		Usage:     usage,
+		Value:     value,
+		DefValue:  value.String(),
+	}
+	f.AddFlag(flag)
+	return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+	f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+	normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+	_, alreadyThere := f.formal[normalizedFlagName]
+	if alreadyThere {
+		msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+		fmt.Fprintln(f.out(), msg)
+		panic(msg) // Happens only if flags are declared with identical names
+	}
+	if f.formal == nil {
+		f.formal = make(map[NormalizedName]*Flag)
+	}
+
+	flag.Name = string(normalizedFlagName)
+	f.formal[normalizedFlagName] = flag
+	f.orderedFormal = append(f.orderedFormal, flag)
+
+	if flag.Shorthand == "" {
+		return
+	}
+	if len(flag.Shorthand) > 1 {
+		msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	if f.shorthands == nil {
+		f.shorthands = make(map[byte]*Flag)
+	}
+	c := flag.Shorthand[0]
+	used, alreadyThere := f.shorthands[c]
+	if alreadyThere {
+		msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+	if newSet == nil {
+		return
+	}
+	newSet.VisitAll(func(flag *Flag) {
+		if f.Lookup(flag.Name) == nil {
+			f.AddFlag(flag)
+		}
+	})
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+	CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+	CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+	err := fmt.Errorf(format, a...)
+	if f.errorHandling != ContinueOnError {
+		fmt.Fprintln(f.out(), err)
+		f.usage()
+	}
+	return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+	if f == CommandLine {
+		Usage()
+	} else if f.Usage == nil {
+		defaultUsage(f)
+	} else {
+		f.Usage()
+	}
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+	if len(args) == 0 {
+		//--unknown
+		return args
+	}
+
+	first := args[0]
+	if first[0] == '-' {
+		//--unknown --next-flag ...
+		return args
+	}
+
+	//--unknown arg ... (args will be arg ...)
+	return args[1:]
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+	a = args
+	name := s[2:]
+	if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+		err = f.failf("bad flag syntax: %s", s)
+		return
+	}
+
+	split := strings.SplitN(name, "=", 2)
+	name = split[0]
+	flag, exists := f.formal[f.normalizeFlagName(name)]
+
+	if !exists {
+		switch {
+		case name == "help":
+			f.usage()
+			return a, ErrHelp
+		case f.ParseErrorsWhitelist.UnknownFlags:
+			// --unknown=unknownval arg ...
+			// we do not want to lose arg in this case
+			if len(split) >= 2 {
+				return a, nil
+			}
+
+			return stripUnknownFlagValue(a), nil
+		default:
+			err = f.failf("unknown flag: --%s", name)
+			return
+		}
+	}
+
+	var value string
+	if len(split) == 2 {
+		// '--flag=arg'
+		value = split[1]
+	} else if flag.NoOptDefVal != "" {
+		// '--flag' (arg was optional)
+		value = flag.NoOptDefVal
+	} else if len(a) > 0 {
+		// '--flag arg'
+		value = a[0]
+		a = a[1:]
+	} else {
+		// '--flag' (arg was required)
+		err = f.failf("flag needs an argument: %s", s)
+		return
+	}
+
+	err = fn(flag, value)
+	if err != nil {
+		f.failf(err.Error())
+	}
+	return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+	outArgs = args
+
+	if strings.HasPrefix(shorthands, "test.") {
+		return
+	}
+
+	outShorts = shorthands[1:]
+	c := shorthands[0]
+
+	flag, exists := f.shorthands[c]
+	if !exists {
+		switch {
+		case c == 'h':
+			f.usage()
+			err = ErrHelp
+			return
+		case f.ParseErrorsWhitelist.UnknownFlags:
+			// '-f=arg arg ...'
+			// we do not want to lose arg in this case
+			if len(shorthands) > 2 && shorthands[1] == '=' {
+				outShorts = ""
+				return
+			}
+
+			outArgs = stripUnknownFlagValue(outArgs)
+			return
+		default:
+			err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+			return
+		}
+	}
+
+	var value string
+	if len(shorthands) > 2 && shorthands[1] == '=' {
+		// '-f=arg'
+		value = shorthands[2:]
+		outShorts = ""
+	} else if flag.NoOptDefVal != "" {
+		// '-f' (arg was optional)
+		value = flag.NoOptDefVal
+	} else if len(shorthands) > 1 {
+		// '-farg'
+		value = shorthands[1:]
+		outShorts = ""
+	} else if len(args) > 0 {
+		// '-f arg'
+		value = args[0]
+		outArgs = args[1:]
+	} else {
+		// '-f' (arg was required)
+		err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+		return
+	}
+
+	if flag.ShorthandDeprecated != "" {
+		fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+	}
+
+	err = fn(flag, value)
+	if err != nil {
+		f.failf(err.Error())
+	}
+	return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+	a = args
+	shorthands := s[1:]
+
+	// "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+	for len(shorthands) > 0 {
+		shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+		if err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+	for len(args) > 0 {
+		s := args[0]
+		args = args[1:]
+		if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+			if !f.interspersed {
+				f.args = append(f.args, s)
+				f.args = append(f.args, args...)
+				return nil
+			}
+			f.args = append(f.args, s)
+			continue
+		}
+
+		if s[1] == '-' {
+			if len(s) == 2 { // "--" terminates the flags
+				f.argsLenAtDash = len(f.args)
+				f.args = append(f.args, args...)
+				break
+			}
+			args, err = f.parseLongArg(s, args, fn)
+		} else {
+			args, err = f.parseShortArg(s, args, fn)
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name.  Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+	if f.addedGoFlagSets != nil {
+		for _, goFlagSet := range f.addedGoFlagSets {
+			goFlagSet.Parse(nil)
+		}
+	}
+	f.parsed = true
+
+	if len(arguments) < 0 {
+		return nil
+	}
+
+	f.args = make([]string, 0, len(arguments))
+
+	set := func(flag *Flag, value string) error {
+		return f.Set(flag.Name, value)
+	}
+
+	err := f.parseArgs(arguments, set)
+	if err != nil {
+		switch f.errorHandling {
+		case ContinueOnError:
+			return err
+		case ExitOnError:
+			fmt.Println(err)
+			os.Exit(2)
+		case PanicOnError:
+			panic(err)
+		}
+	}
+	return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+	f.parsed = true
+	f.args = make([]string, 0, len(arguments))
+
+	err := f.parseArgs(arguments, fn)
+	if err != nil {
+		switch f.errorHandling {
+		case ContinueOnError:
+			return err
+		case ExitOnError:
+			os.Exit(2)
+		case PanicOnError:
+			panic(err)
+		}
+	}
+	return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+	return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:].  Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+	// Ignore errors; CommandLine is set for ExitOnError.
+	CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+	// Ignore errors; CommandLine is set for ExitOnError.
+	CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+	CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+	return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+	f := &FlagSet{
+		name:          name,
+		errorHandling: errorHandling,
+		argsLenAtDash: -1,
+		interspersed:  true,
+		SortFlags:     true,
+	}
+	return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+	f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+	f.name = name
+	f.errorHandling = errorHandling
+	f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 000000000..a243f81f7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+	*p = val
+	return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+	v, err := strconv.ParseFloat(s, 32)
+	*f = float32Value(v)
+	return err
+}
+
+func (f *float32Value) Type() string {
+	return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseFloat(sval, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+	val, err := f.getFlagType(name, "float32", float32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+	f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+	f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+	CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+	CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+	p := new(float32)
+	f.Float32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+	p := new(float32)
+	f.Float32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+	return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+	return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 000000000..04b5492a7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+	*p = val
+	return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+	v, err := strconv.ParseFloat(s, 64)
+	*f = float64Value(v)
+	return err
+}
+
+func (f *float64Value) Type() string {
+	return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+	return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+	val, err := f.getFlagType(name, "float64", float64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+	f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+	f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+	CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+	CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+	p := new(float64)
+	f.Float64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+	p := new(float64)
+	f.Float64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+	return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+	return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 000000000..d3dd72b7f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+	goflag "flag"
+	"reflect"
+	"strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value.  The main
+// difference here is the addition of the Type method that returns a string
+// name of the type.  As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+	inner    goflag.Value
+	flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+	goflag.Value
+	IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+	// If the flag.Value happens to also be a pflag.Value, just use it directly.
+	if pv, ok := v.(Value); ok {
+		return pv
+	}
+
+	pv := &flagValueWrapper{
+		inner: v,
+	}
+
+	t := reflect.TypeOf(v)
+	if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+
+	pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+	return pv
+}
+
+func (v *flagValueWrapper) String() string {
+	return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+	return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+	return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+	// Remember the default value as a string; it won't change.
+	flag := &Flag{
+		Name:  goflag.Name,
+		Usage: goflag.Usage,
+		Value: wrapFlagValue(goflag.Value),
+		// Looks like golang flags don't set DefValue correctly  :-(
+		//DefValue: goflag.DefValue,
+		DefValue: goflag.Value.String(),
+	}
+	// Ex: if the golang flag was -v, allow both -v and --v to work
+	if len(flag.Name) == 1 {
+		flag.Shorthand = flag.Name
+	}
+	if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+		flag.NoOptDefVal = "true"
+	}
+	return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+	if f.Lookup(goflag.Name) != nil {
+		return
+	}
+	newflag := PFlagFromGoFlag(goflag)
+	f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+	if newSet == nil {
+		return
+	}
+	newSet.VisitAll(func(goflag *goflag.Flag) {
+		f.AddGoFlag(goflag)
+	})
+	if f.addedGoFlagSets == nil {
+		f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+	}
+	f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 000000000..1474b89df
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+	*p = val
+	return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 64)
+	*i = intValue(v)
+	return err
+}
+
+func (i *intValue) Type() string {
+	return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+	return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+	val, err := f.getFlagType(name, "int", intConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+	f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+	f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+	CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+	CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+	p := new(int)
+	f.IntVarP(p, name, "", value, usage)
+	return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+	p := new(int)
+	f.IntVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+	return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+	return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 000000000..f1a01d05e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+	*p = val
+	return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 16)
+	*i = int16Value(v)
+	return err
+}
+
+func (i *int16Value) Type() string {
+	return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 16)
+	if err != nil {
+		return 0, err
+	}
+	return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+	val, err := f.getFlagType(name, "int16", int16Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+	f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+	f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+	CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+	CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+	p := new(int16)
+	f.Int16VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+	p := new(int16)
+	f.Int16VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+	return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+	return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 000000000..9b95944f0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+	*p = val
+	return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 32)
+	*i = int32Value(v)
+	return err
+}
+
+func (i *int32Value) Type() string {
+	return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+	val, err := f.getFlagType(name, "int32", int32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+	f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+	f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+	CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+	CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+	p := new(int32)
+	f.Int32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+	p := new(int32)
+	f.Int32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+	return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+	return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 000000000..0026d781d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+	*p = val
+	return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 64)
+	*i = int64Value(v)
+	return err
+}
+
+func (i *int64Value) Type() string {
+	return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+	return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+	val, err := f.getFlagType(name, "int64", int64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+	f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+	f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+	CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+	CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+	p := new(int64)
+	f.Int64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+	p := new(int64)
+	f.Int64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+	return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+	return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 000000000..4da92228e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+	*p = val
+	return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 8)
+	*i = int8Value(v)
+	return err
+}
+
+func (i *int8Value) Type() string {
+	return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 8)
+	if err != nil {
+		return 0, err
+	}
+	return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+	val, err := f.getFlagType(name, "int8", int8Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+	f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+	f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+	CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+	CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+	p := new(int8)
+	f.Int8VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+	p := new(int8)
+	f.Int8VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+	return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+	return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 000000000..1e7c9edde
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+	value   *[]int
+	changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+	isv := new(intSliceValue)
+	isv.value = p
+	*isv.value = val
+	return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]int, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = strconv.Atoi(d)
+		if err != nil {
+			return err
+		}
+
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *intSliceValue) Type() string {
+	return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%d", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func intSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []int{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]int, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = strconv.Atoi(d)
+		if err != nil {
+			return nil, err
+		}
+
+	}
+	return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+	val, err := f.getFlagType(name, "intSlice", intSliceConv)
+	if err != nil {
+		return []int{}, err
+	}
+	return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+	f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+	f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+	CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+	CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+	p := []int{}
+	f.IntSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+	p := []int{}
+	f.IntSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+	return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+	return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 000000000..3d414ba69
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,94 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+	*p = val
+	return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+	ip := net.ParseIP(strings.TrimSpace(s))
+	if ip == nil {
+		return fmt.Errorf("failed to parse IP: %q", s)
+	}
+	*i = ipValue(ip)
+	return nil
+}
+
+func (i *ipValue) Type() string {
+	return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+	ip := net.ParseIP(sval)
+	if ip != nil {
+		return ip, nil
+	}
+	return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+	val, err := f.getFlagType(name, "ip", ipConv)
+	if err != nil {
+		return nil, err
+	}
+	return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+	f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+	f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+	CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+	CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+	p := new(net.IP)
+	f.IPVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+	p := new(net.IP)
+	f.IPVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+	return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+	return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 000000000..7dd196fe3
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,148 @@
+package pflag
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+	value   *[]net.IP
+	changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+	ipsv := new(ipSliceValue)
+	ipsv.value = p
+	*ipsv.value = val
+	return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+	// remove all quote characters
+	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+	// read flag arguments with CSV parser
+	ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	// parse ip values into slice
+	out := make([]net.IP, 0, len(ipStrSlice))
+	for _, ipStr := range ipStrSlice {
+		ip := net.ParseIP(strings.TrimSpace(ipStr))
+		if ip == nil {
+			return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+		}
+		out = append(out, ip)
+	}
+
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+
+	s.changed = true
+
+	return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+	return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+	ipStrSlice := make([]string, len(*s.value))
+	for i, ip := range *s.value {
+		ipStrSlice[i] = ip.String()
+	}
+
+	out, _ := writeAsCSV(ipStrSlice)
+
+	return "[" + out + "]"
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Emtpy string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []net.IP{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]net.IP, len(ss))
+	for i, sval := range ss {
+		ip := net.ParseIP(strings.TrimSpace(sval))
+		if ip == nil {
+			return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+		}
+		out[i] = ip
+	}
+	return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+	val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+	if err != nil {
+		return []net.IP{}, err
+	}
+	return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+	f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+	f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+	CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+	CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+	p := []net.IP{}
+	f.IPSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+	p := []net.IP{}
+	f.IPSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+	return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+	return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 000000000..5bd44bd21
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+	*p = val
+	return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+	ip := ParseIPv4Mask(s)
+	if ip == nil {
+		return fmt.Errorf("failed to parse IP mask: %q", s)
+	}
+	*i = ipMaskValue(ip)
+	return nil
+}
+
+func (i *ipMaskValue) Type() string {
+	return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+	mask := net.ParseIP(s)
+	if mask == nil {
+		if len(s) != 8 {
+			return nil
+		}
+		// net.IPMask.String() actually outputs things like ffffff00
+		// so write a horrible parser for that as well  :-(
+		m := []int{}
+		for i := 0; i < 4; i++ {
+			b := "0x" + s[2*i:2*i+2]
+			d, err := strconv.ParseInt(b, 0, 0)
+			if err != nil {
+				return nil
+			}
+			m = append(m, int(d))
+		}
+		s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+		mask = net.ParseIP(s)
+		if mask == nil {
+			return nil
+		}
+	}
+	return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+	mask := ParseIPv4Mask(sval)
+	if mask == nil {
+		return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+	}
+	return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+	val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+	if err != nil {
+		return nil, err
+	}
+	return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+	f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+	f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+	CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+	CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+	p := new(net.IPMask)
+	f.IPMaskVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+	p := new(net.IPMask)
+	f.IPMaskVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+	return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+	return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 000000000..e2c1b8bcd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+	n := net.IPNet(ipnet)
+	return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+	_, n, err := net.ParseCIDR(strings.TrimSpace(value))
+	if err != nil {
+		return err
+	}
+	*ipnet = ipNetValue(*n)
+	return nil
+}
+
+func (*ipNetValue) Type() string {
+	return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+	*p = val
+	return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+	_, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+	if err == nil {
+		return *n, nil
+	}
+	return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+	val, err := f.getFlagType(name, "ipNet", ipNetConv)
+	if err != nil {
+		return net.IPNet{}, err
+	}
+	return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+	f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+	f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+	CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+	CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+	p := new(net.IPNet)
+	f.IPNetVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+	p := new(net.IPNet)
+	f.IPNetVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+	return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+	return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 000000000..04e0a26ff
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+	*p = val
+	return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+	*s = stringValue(val)
+	return nil
+}
+func (s *stringValue) Type() string {
+	return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+	return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+	val, err := f.getFlagType(name, "string", stringConv)
+	if err != nil {
+		return "", err
+	}
+	return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+	f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+	f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+	CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+	CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+	p := new(string)
+	f.StringVarP(p, name, "", value, usage)
+	return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+	p := new(string)
+	f.StringVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+	return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+	return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 000000000..fa7bc6018
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,103 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+	value   *[]string
+	changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+	ssv := new(stringArrayValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+	if !s.changed {
+		*s.value = []string{val}
+		s.changed = true
+	} else {
+		*s.value = append(*s.value, val)
+	}
+	return nil
+}
+
+func (s *stringArrayValue) Type() string {
+	return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+	str, _ := writeAsCSV(*s.value)
+	return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+	sval = sval[1 : len(sval)-1]
+	// An empty string would cause a array with one (empty) string
+	if len(sval) == 0 {
+		return []string{}, nil
+	}
+	return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+	val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+	if err != nil {
+		return []string{}, err
+	}
+	return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+	f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+	CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringArrayVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringArrayVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+	return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+	return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 000000000..0cd3ccc08
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+	"bytes"
+	"encoding/csv"
+	"strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+	value   *[]string
+	changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+	ssv := new(stringSliceValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+	if val == "" {
+		return []string{}, nil
+	}
+	stringReader := strings.NewReader(val)
+	csvReader := csv.NewReader(stringReader)
+	return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+	b := &bytes.Buffer{}
+	w := csv.NewWriter(b)
+	err := w.Write(vals)
+	if err != nil {
+		return "", err
+	}
+	w.Flush()
+	return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+	v, err := readAsCSV(val)
+	if err != nil {
+		return err
+	}
+	if !s.changed {
+		*s.value = v
+	} else {
+		*s.value = append(*s.value, v...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *stringSliceValue) Type() string {
+	return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+	str, _ := writeAsCSV(*s.value)
+	return "[" + str + "]"
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+	sval = sval[1 : len(sval)-1]
+	// An empty string would cause a slice with one (empty) string
+	if len(sval) == 0 {
+		return []string{}, nil
+	}
+	return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+	val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+	if err != nil {
+		return []string{}, err
+	}
+	return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+	f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+	CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+	return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+	return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 000000000..dcbc2b758
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+	*p = val
+	return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 64)
+	*i = uintValue(v)
+	return err
+}
+
+func (i *uintValue) Type() string {
+	return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 0)
+	if err != nil {
+		return 0, err
+	}
+	return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+	val, err := f.getFlagType(name, "uint", uintConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+	f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+	f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint  variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+	CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+	CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+	p := new(uint)
+	f.UintVarP(p, name, "", value, usage)
+	return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+	p := new(uint)
+	f.UintVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+	return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+	return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 000000000..7e9914edd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+	*p = val
+	return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 16)
+	*i = uint16Value(v)
+	return err
+}
+
+func (i *uint16Value) Type() string {
+	return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 16)
+	if err != nil {
+		return 0, err
+	}
+	return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+	val, err := f.getFlagType(name, "uint16", uint16Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+	f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+	f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint  variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+	CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+	CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+	p := new(uint16)
+	f.Uint16VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+	p := new(uint16)
+	f.Uint16VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+	return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+	return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 000000000..d8024539b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+	*p = val
+	return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 32)
+	*i = uint32Value(v)
+	return err
+}
+
+func (i *uint32Value) Type() string {
+	return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+	val, err := f.getFlagType(name, "uint32", uint32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+	f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+	f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32  variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+	CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+	CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32  variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+	p := new(uint32)
+	f.Uint32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+	p := new(uint32)
+	f.Uint32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32  variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+	return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+	return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 000000000..f62240f2c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+	*p = val
+	return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 64)
+	*i = uint64Value(v)
+	return err
+}
+
+func (i *uint64Value) Type() string {
+	return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 64)
+	if err != nil {
+		return 0, err
+	}
+	return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+	val, err := f.getFlagType(name, "uint64", uint64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+	f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+	f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+	CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+	CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+	p := new(uint64)
+	f.Uint64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+	p := new(uint64)
+	f.Uint64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+	return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+	return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 000000000..bb0e83c1f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+	*p = val
+	return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 8)
+	*i = uint8Value(v)
+	return err
+}
+
+func (i *uint8Value) Type() string {
+	return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 8)
+	if err != nil {
+		return 0, err
+	}
+	return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+	val, err := f.getFlagType(name, "uint8", uint8Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+	f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+	f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+	CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+	CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+	p := new(uint8)
+	f.Uint8VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+	p := new(uint8)
+	f.Uint8VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+	return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+	return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 000000000..edd94c600
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,126 @@
+package pflag
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+	value   *[]uint
+	changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+	uisv := new(uintSliceValue)
+	uisv.value = p
+	*uisv.value = val
+	return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]uint, len(ss))
+	for i, d := range ss {
+		u, err := strconv.ParseUint(d, 10, 0)
+		if err != nil {
+			return err
+		}
+		out[i] = uint(u)
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *uintSliceValue) Type() string {
+	return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%d", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []uint{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]uint, len(ss))
+	for i, d := range ss {
+		u, err := strconv.ParseUint(d, 10, 0)
+		if err != nil {
+			return nil, err
+		}
+		out[i] = uint(u)
+	}
+	return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+	val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+	if err != nil {
+		return []uint{}, err
+	}
+	return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+	f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+	f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+	CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+	CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+	p := []uint{}
+	f.UintSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+	p := []uint{}
+	f.UintSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+	return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+	return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE
new file mode 100644
index 000000000..4527efb9c
--- /dev/null
+++ b/vendor/github.com/spf13/viper/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go
new file mode 100644
index 000000000..dd32f4e1c
--- /dev/null
+++ b/vendor/github.com/spf13/viper/flags.go
@@ -0,0 +1,57 @@
+package viper
+
+import "github.com/spf13/pflag"
+
+// FlagValueSet is an interface that users can implement
+// to bind a set of flags to viper.
+type FlagValueSet interface {
+	VisitAll(fn func(FlagValue))
+}
+
+// FlagValue is an interface that users can implement
+// to bind different flags to viper.
+type FlagValue interface {
+	HasChanged() bool
+	Name() string
+	ValueString() string
+	ValueType() string
+}
+
+// pflagValueSet is a wrapper around *pflag.ValueSet
+// that implements FlagValueSet.
+type pflagValueSet struct {
+	flags *pflag.FlagSet
+}
+
+// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet.
+func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) {
+	p.flags.VisitAll(func(flag *pflag.Flag) {
+		fn(pflagValue{flag})
+	})
+}
+
+// pflagValue is a wrapper aroung *pflag.flag
+// that implements FlagValue
+type pflagValue struct {
+	flag *pflag.Flag
+}
+
+// HasChanges returns whether the flag has changes or not.
+func (p pflagValue) HasChanged() bool {
+	return p.flag.Changed
+}
+
+// Name returns the name of the flag.
+func (p pflagValue) Name() string {
+	return p.flag.Name
+}
+
+// ValueString returns the value of the flag as a string.
+func (p pflagValue) ValueString() string {
+	return p.flag.Value.String()
+}
+
+// ValueType returns the type of the flag as a string.
+func (p pflagValue) ValueType() string {
+	return p.flag.Value.Type()
+}
diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go
new file mode 100644
index 000000000..952cad44c
--- /dev/null
+++ b/vendor/github.com/spf13/viper/util.go
@@ -0,0 +1,221 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+package viper
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"unicode"
+
+	"github.com/spf13/afero"
+	"github.com/spf13/cast"
+	jww "github.com/spf13/jwalterweatherman"
+)
+
+// ConfigParseError denotes failing to parse configuration file.
+type ConfigParseError struct {
+	err error
+}
+
+// Error returns the formatted configuration error.
+func (pe ConfigParseError) Error() string {
+	return fmt.Sprintf("While parsing config: %s", pe.err.Error())
+}
+
+// toCaseInsensitiveValue checks if the value is a  map;
+// if so, create a copy and lower-case the keys recursively.
+func toCaseInsensitiveValue(value interface{}) interface{} {
+	switch v := value.(type) {
+	case map[interface{}]interface{}:
+		value = copyAndInsensitiviseMap(cast.ToStringMap(v))
+	case map[string]interface{}:
+		value = copyAndInsensitiviseMap(v)
+	}
+
+	return value
+}
+
+// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of
+// any map it makes case insensitive.
+func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} {
+	nm := make(map[string]interface{})
+
+	for key, val := range m {
+		lkey := strings.ToLower(key)
+		switch v := val.(type) {
+		case map[interface{}]interface{}:
+			nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v))
+		case map[string]interface{}:
+			nm[lkey] = copyAndInsensitiviseMap(v)
+		default:
+			nm[lkey] = v
+		}
+	}
+
+	return nm
+}
+
+func insensitiviseMap(m map[string]interface{}) {
+	for key, val := range m {
+		switch val.(type) {
+		case map[interface{}]interface{}:
+			// nested map: cast and recursively insensitivise
+			val = cast.ToStringMap(val)
+			insensitiviseMap(val.(map[string]interface{}))
+		case map[string]interface{}:
+			// nested map: recursively insensitivise
+			insensitiviseMap(val.(map[string]interface{}))
+		}
+
+		lower := strings.ToLower(key)
+		if key != lower {
+			// remove old key (not lower-cased)
+			delete(m, key)
+		}
+		// update map
+		m[lower] = val
+	}
+}
+
+func absPathify(inPath string) string {
+	jww.INFO.Println("Trying to resolve absolute path to", inPath)
+
+	if strings.HasPrefix(inPath, "$HOME") {
+		inPath = userHomeDir() + inPath[5:]
+	}
+
+	if strings.HasPrefix(inPath, "$") {
+		end := strings.Index(inPath, string(os.PathSeparator))
+		inPath = os.Getenv(inPath[1:end]) + inPath[end:]
+	}
+
+	if filepath.IsAbs(inPath) {
+		return filepath.Clean(inPath)
+	}
+
+	p, err := filepath.Abs(inPath)
+	if err == nil {
+		return filepath.Clean(p)
+	}
+
+	jww.ERROR.Println("Couldn't discover absolute path")
+	jww.ERROR.Println(err)
+	return ""
+}
+
+// Check if File / Directory Exists
+func exists(fs afero.Fs, path string) (bool, error) {
+	_, err := fs.Stat(path)
+	if err == nil {
+		return true, nil
+	}
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	return false, err
+}
+
+func stringInSlice(a string, list []string) bool {
+	for _, b := range list {
+		if b == a {
+			return true
+		}
+	}
+	return false
+}
+
+func userHomeDir() string {
+	if runtime.GOOS == "windows" {
+		home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
+		if home == "" {
+			home = os.Getenv("USERPROFILE")
+		}
+		return home
+	}
+	return os.Getenv("HOME")
+}
+
+func safeMul(a, b uint) uint {
+	c := a * b
+	if a > 1 && b > 1 && c/b != a {
+		return 0
+	}
+	return c
+}
+
+// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes
+func parseSizeInBytes(sizeStr string) uint {
+	sizeStr = strings.TrimSpace(sizeStr)
+	lastChar := len(sizeStr) - 1
+	multiplier := uint(1)
+
+	if lastChar > 0 {
+		if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
+			if lastChar > 1 {
+				switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
+				case 'k':
+					multiplier = 1 << 10
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				case 'm':
+					multiplier = 1 << 20
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				case 'g':
+					multiplier = 1 << 30
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				default:
+					multiplier = 1
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar])
+				}
+			}
+		}
+	}
+
+	size := cast.ToInt(sizeStr)
+	if size < 0 {
+		size = 0
+	}
+
+	return safeMul(uint(size), multiplier)
+}
+
+// deepSearch scans deep maps, following the key indexes listed in the
+// sequence "path".
+// The last value is expected to be another map, and is returned.
+//
+// In case intermediate keys do not exist, or map to a non-map value,
+// a new map is created and inserted, and the search continues from there:
+// the initial map "m" may be modified!
+func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
+	for _, k := range path {
+		m2, ok := m[k]
+		if !ok {
+			// intermediate key does not exist
+			// => create it and continue from there
+			m3 := make(map[string]interface{})
+			m[k] = m3
+			m = m3
+			continue
+		}
+		m3, ok := m2.(map[string]interface{})
+		if !ok {
+			// intermediate key is a value
+			// => replace with a new map
+			m3 = make(map[string]interface{})
+			m[k] = m3
+		}
+		// continue search from here
+		m = m3
+	}
+	return m
+}
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
new file mode 100644
index 000000000..f657b201d
--- /dev/null
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -0,0 +1,1802 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+// Each item takes precedence over the item below it:
+
+// overrides
+// flag
+// env
+// config
+// key/value store
+// default
+
+package viper
+
+import (
+	"bytes"
+	"encoding/csv"
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"time"
+
+	yaml "gopkg.in/yaml.v2"
+
+	"github.com/fsnotify/fsnotify"
+	"github.com/hashicorp/hcl"
+	"github.com/hashicorp/hcl/hcl/printer"
+	"github.com/magiconair/properties"
+	"github.com/mitchellh/mapstructure"
+	toml "github.com/pelletier/go-toml"
+	"github.com/spf13/afero"
+	"github.com/spf13/cast"
+	jww "github.com/spf13/jwalterweatherman"
+	"github.com/spf13/pflag"
+)
+
+// ConfigMarshalError happens when failing to marshal the configuration.
+type ConfigMarshalError struct {
+	err error
+}
+
+// Error returns the formatted configuration error.
+func (e ConfigMarshalError) Error() string {
+	return fmt.Sprintf("While marshaling config: %s", e.err.Error())
+}
+
+var v *Viper
+
+type RemoteResponse struct {
+	Value []byte
+	Error error
+}
+
+func init() {
+	v = New()
+}
+
+type remoteConfigFactory interface {
+	Get(rp RemoteProvider) (io.Reader, error)
+	Watch(rp RemoteProvider) (io.Reader, error)
+	WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
+}
+
+// RemoteConfig is optional, see the remote package
+var RemoteConfig remoteConfigFactory
+
+// UnsupportedConfigError denotes encountering an unsupported
+// configuration filetype.
+type UnsupportedConfigError string
+
+// Error returns the formatted configuration error.
+func (str UnsupportedConfigError) Error() string {
+	return fmt.Sprintf("Unsupported Config Type %q", string(str))
+}
+
+// UnsupportedRemoteProviderError denotes encountering an unsupported remote
+// provider. Currently only etcd and Consul are supported.
+type UnsupportedRemoteProviderError string
+
+// Error returns the formatted remote provider error.
+func (str UnsupportedRemoteProviderError) Error() string {
+	return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
+}
+
+// RemoteConfigError denotes encountering an error while trying to
+// pull the configuration from the remote provider.
+type RemoteConfigError string
+
+// Error returns the formatted remote provider error
+func (rce RemoteConfigError) Error() string {
+	return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
+}
+
+// ConfigFileNotFoundError denotes failing to find configuration file.
+type ConfigFileNotFoundError struct {
+	name, locations string
+}
+
+// Error returns the formatted configuration error.
+func (fnfe ConfigFileNotFoundError) Error() string {
+	return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
+}
+
+// A DecoderConfigOption can be passed to viper.Unmarshal to configure
+// mapstructure.DecoderConfig options
+type DecoderConfigOption func(*mapstructure.DecoderConfig)
+
+// DecodeHook returns a DecoderConfigOption which overrides the default
+// DecoderConfig.DecodeHook value, the default is:
+//
+//  mapstructure.ComposeDecodeHookFunc(
+//		mapstructure.StringToTimeDurationHookFunc(),
+//		mapstructure.StringToSliceHookFunc(","),
+//	)
+func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
+	return func(c *mapstructure.DecoderConfig) {
+		c.DecodeHook = hook
+	}
+}
+
+// Viper is a prioritized configuration registry. It
+// maintains a set of configuration sources, fetches
+// values to populate those, and provides them according
+// to the source's priority.
+// The priority of the sources is the following:
+// 1. overrides
+// 2. flags
+// 3. env. variables
+// 4. config file
+// 5. key/value store
+// 6. defaults
+//
+// For example, if values from the following sources were loaded:
+//
+//  Defaults : {
+//  	"secret": "",
+//  	"user": "default",
+//  	"endpoint": "https://localhost"
+//  }
+//  Config : {
+//  	"user": "root"
+//  	"secret": "defaultsecret"
+//  }
+//  Env : {
+//  	"secret": "somesecretkey"
+//  }
+//
+// The resulting config will have the following values:
+//
+//	{
+//		"secret": "somesecretkey",
+//		"user": "root",
+//		"endpoint": "https://localhost"
+//	}
+type Viper struct {
+	// Delimiter that separates a list of keys
+	// used to access a nested value in one go
+	keyDelim string
+
+	// A set of paths to look for the config file in
+	configPaths []string
+
+	// The filesystem to read config from.
+	fs afero.Fs
+
+	// A set of remote providers to search for the configuration
+	remoteProviders []*defaultRemoteProvider
+
+	// Name of file to look for inside the path
+	configName string
+	configFile string
+	configType string
+	envPrefix  string
+
+	automaticEnvApplied bool
+	envKeyReplacer      *strings.Replacer
+
+	config         map[string]interface{}
+	override       map[string]interface{}
+	defaults       map[string]interface{}
+	kvstore        map[string]interface{}
+	pflags         map[string]FlagValue
+	env            map[string]string
+	aliases        map[string]string
+	typeByDefValue bool
+
+	// Store read properties on the object so that we can write back in order with comments.
+	// This will only be used if the configuration read is a properties file.
+	properties *properties.Properties
+
+	onConfigChange func(fsnotify.Event)
+}
+
+// New returns an initialized Viper instance.
+func New() *Viper {
+	v := new(Viper)
+	v.keyDelim = "."
+	v.configName = "config"
+	v.fs = afero.NewOsFs()
+	v.config = make(map[string]interface{})
+	v.override = make(map[string]interface{})
+	v.defaults = make(map[string]interface{})
+	v.kvstore = make(map[string]interface{})
+	v.pflags = make(map[string]FlagValue)
+	v.env = make(map[string]string)
+	v.aliases = make(map[string]string)
+	v.typeByDefValue = false
+
+	return v
+}
+
+// Intended for testing, will reset all to default settings.
+// In the public interface for the viper package so applications
+// can use it in their testing as well.
+func Reset() {
+	v = New()
+	SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"}
+	SupportedRemoteProviders = []string{"etcd", "consul"}
+}
+
+type defaultRemoteProvider struct {
+	provider      string
+	endpoint      string
+	path          string
+	secretKeyring string
+}
+
+func (rp defaultRemoteProvider) Provider() string {
+	return rp.provider
+}
+
+func (rp defaultRemoteProvider) Endpoint() string {
+	return rp.endpoint
+}
+
+func (rp defaultRemoteProvider) Path() string {
+	return rp.path
+}
+
+func (rp defaultRemoteProvider) SecretKeyring() string {
+	return rp.secretKeyring
+}
+
+// RemoteProvider stores the configuration necessary
+// to connect to a remote key/value store.
+// Optional secretKeyring to unencrypt encrypted values
+// can be provided.
+type RemoteProvider interface {
+	Provider() string
+	Endpoint() string
+	Path() string
+	SecretKeyring() string
+}
+
+// SupportedExts are universally supported extensions.
+var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"}
+
+// SupportedRemoteProviders are universally supported remote providers.
+var SupportedRemoteProviders = []string{"etcd", "consul"}
+
+func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
+func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
+	v.onConfigChange = run
+}
+
+func WatchConfig() { v.WatchConfig() }
+func (v *Viper) WatchConfig() {
+	go func() {
+		watcher, err := fsnotify.NewWatcher()
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer watcher.Close()
+
+		// we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
+		filename, err := v.getConfigFile()
+		if err != nil {
+			log.Println("error:", err)
+			return
+		}
+
+		configFile := filepath.Clean(filename)
+		configDir, _ := filepath.Split(configFile)
+
+		done := make(chan bool)
+		go func() {
+			for {
+				select {
+				case event := <-watcher.Events:
+					// we only care about the config file
+					if filepath.Clean(event.Name) == configFile {
+						if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create {
+							err := v.ReadInConfig()
+							if err != nil {
+								log.Println("error:", err)
+							}
+							v.onConfigChange(event)
+						}
+					}
+				case err := <-watcher.Errors:
+					log.Println("error:", err)
+				}
+			}
+		}()
+
+		watcher.Add(configDir)
+		<-done
+	}()
+}
+
+// SetConfigFile explicitly defines the path, name and extension of the config file.
+// Viper will use this and not check any of the config paths.
+func SetConfigFile(in string) { v.SetConfigFile(in) }
+func (v *Viper) SetConfigFile(in string) {
+	if in != "" {
+		v.configFile = in
+	}
+}
+
+// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
+// E.g. if your prefix is "spf", the env registry will look for env
+// variables that start with "SPF_".
+func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
+func (v *Viper) SetEnvPrefix(in string) {
+	if in != "" {
+		v.envPrefix = in
+	}
+}
+
+func (v *Viper) mergeWithEnvPrefix(in string) string {
+	if v.envPrefix != "" {
+		return strings.ToUpper(v.envPrefix + "_" + in)
+	}
+
+	return strings.ToUpper(in)
+}
+
+// TODO: should getEnv logic be moved into find(). Can generalize the use of
+// rewriting keys many things, Ex: Get('someKey') -> some_key
+// (camel case to snake case for JSON keys perhaps)
+
+// getEnv is a wrapper around os.Getenv which replaces characters in the original
+// key. This allows env vars which have different keys than the config object
+// keys.
+func (v *Viper) getEnv(key string) string {
+	if v.envKeyReplacer != nil {
+		key = v.envKeyReplacer.Replace(key)
+	}
+	return os.Getenv(key)
+}
+
+// ConfigFileUsed returns the file used to populate the config registry.
+func ConfigFileUsed() string            { return v.ConfigFileUsed() }
+func (v *Viper) ConfigFileUsed() string { return v.configFile }
+
+// AddConfigPath adds a path for Viper to search for the config file in.
+// Can be called multiple times to define multiple search paths.
+func AddConfigPath(in string) { v.AddConfigPath(in) }
+func (v *Viper) AddConfigPath(in string) {
+	if in != "" {
+		absin := absPathify(in)
+		jww.INFO.Println("adding", absin, "to paths to search")
+		if !stringInSlice(absin, v.configPaths) {
+			v.configPaths = append(v.configPaths, absin)
+		}
+	}
+}
+
+// AddRemoteProvider adds a remote configuration source.
+// Remote Providers are searched in the order they are added.
+// provider is a string value, "etcd" or "consul" are currently supported.
+// endpoint is the url.  etcd requires http://ip:port  consul requires ip:port
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+func AddRemoteProvider(provider, endpoint, path string) error {
+	return v.AddRemoteProvider(provider, endpoint, path)
+}
+func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
+	if !stringInSlice(provider, SupportedRemoteProviders) {
+		return UnsupportedRemoteProviderError(provider)
+	}
+	if provider != "" && endpoint != "" {
+		jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+		rp := &defaultRemoteProvider{
+			endpoint: endpoint,
+			provider: provider,
+			path:     path,
+		}
+		if !v.providerPathExists(rp) {
+			v.remoteProviders = append(v.remoteProviders, rp)
+		}
+	}
+	return nil
+}
+
+// AddSecureRemoteProvider adds a remote configuration source.
+// Secure Remote Providers are searched in the order they are added.
+// provider is a string value, "etcd" or "consul" are currently supported.
+// endpoint is the url.  etcd requires http://ip:port  consul requires ip:port
+// secretkeyring is the filepath to your openpgp secret keyring.  e.g. /etc/secrets/myring.gpg
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+// Secure Remote Providers are implemented with github.com/xordataexchange/crypt
+func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+	return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
+}
+
+func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+	if !stringInSlice(provider, SupportedRemoteProviders) {
+		return UnsupportedRemoteProviderError(provider)
+	}
+	if provider != "" && endpoint != "" {
+		jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+		rp := &defaultRemoteProvider{
+			endpoint:      endpoint,
+			provider:      provider,
+			path:          path,
+			secretKeyring: secretkeyring,
+		}
+		if !v.providerPathExists(rp) {
+			v.remoteProviders = append(v.remoteProviders, rp)
+		}
+	}
+	return nil
+}
+
+func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
+	for _, y := range v.remoteProviders {
+		if reflect.DeepEqual(y, p) {
+			return true
+		}
+	}
+	return false
+}
+
+// searchMap recursively searches for a value for path in source map.
+// Returns nil if not found.
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
+	if len(path) == 0 {
+		return source
+	}
+
+	next, ok := source[path[0]]
+	if ok {
+		// Fast path
+		if len(path) == 1 {
+			return next
+		}
+
+		// Nested case
+		switch next.(type) {
+		case map[interface{}]interface{}:
+			return v.searchMap(cast.ToStringMap(next), path[1:])
+		case map[string]interface{}:
+			// Type assertion is safe here since it is only reached
+			// if the type of `next` is the same as the type being asserted
+			return v.searchMap(next.(map[string]interface{}), path[1:])
+		default:
+			// got a value but nested key expected, return "nil" for not found
+			return nil
+		}
+	}
+	return nil
+}
+
+// searchMapWithPathPrefixes recursively searches for a value for path in source map.
+//
+// While searchMap() considers each path element as a single map key, this
+// function searches for, and prioritizes, merged path elements.
+// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
+// is also defined, this latter value is returned for path ["foo", "bar"].
+//
+// This should be useful only at config level (other maps may not contain dots
+// in their keys).
+//
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} {
+	if len(path) == 0 {
+		return source
+	}
+
+	// search for path prefixes, starting from the longest one
+	for i := len(path); i > 0; i-- {
+		prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
+
+		next, ok := source[prefixKey]
+		if ok {
+			// Fast path
+			if i == len(path) {
+				return next
+			}
+
+			// Nested case
+			var val interface{}
+			switch next.(type) {
+			case map[interface{}]interface{}:
+				val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:])
+			case map[string]interface{}:
+				// Type assertion is safe here since it is only reached
+				// if the type of `next` is the same as the type being asserted
+				val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:])
+			default:
+				// got a value but nested key expected, do nothing and look for next prefix
+			}
+			if val != nil {
+				return val
+			}
+		}
+	}
+
+	// not found
+	return nil
+}
+
+// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
+// on its path in the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+//       "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
+	var parentVal interface{}
+	for i := 1; i < len(path); i++ {
+		parentVal = v.searchMap(m, path[0:i])
+		if parentVal == nil {
+			// not found, no need to add more path elements
+			return ""
+		}
+		switch parentVal.(type) {
+		case map[interface{}]interface{}:
+			continue
+		case map[string]interface{}:
+			continue
+		default:
+			// parentVal is a regular value which shadows "path"
+			return strings.Join(path[0:i], v.keyDelim)
+		}
+	}
+	return ""
+}
+
+// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
+// in a sub-path of the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+//       "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
+	// unify input map
+	var m map[string]interface{}
+	switch mi.(type) {
+	case map[string]string, map[string]FlagValue:
+		m = cast.ToStringMap(mi)
+	default:
+		return ""
+	}
+
+	// scan paths
+	var parentKey string
+	for i := 1; i < len(path); i++ {
+		parentKey = strings.Join(path[0:i], v.keyDelim)
+		if _, ok := m[parentKey]; ok {
+			return parentKey
+		}
+	}
+	return ""
+}
+
+// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
+// in the environment, when automatic env is on.
+// e.g., if "foo.bar" has a value in the environment, it “shadows”
+//       "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
+	var parentKey string
+	var val string
+	for i := 1; i < len(path); i++ {
+		parentKey = strings.Join(path[0:i], v.keyDelim)
+		if val = v.getEnv(v.mergeWithEnvPrefix(parentKey)); val != "" {
+			return parentKey
+		}
+	}
+	return ""
+}
+
+// SetTypeByDefaultValue enables or disables the inference of a key value's
+// type when the Get function is used based upon a key's default value as
+// opposed to the value returned based on the normal fetch logic.
+//
+// For example, if a key has a default value of []string{} and the same key
+// is set via an environment variable to "a b c", a call to the Get function
+// would return a string slice for the key if the key's type is inferred by
+// the default value and the Get function would return:
+//
+//   []string {"a", "b", "c"}
+//
+// Otherwise the Get function would return:
+//
+//   "a b c"
+func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
+func (v *Viper) SetTypeByDefaultValue(enable bool) {
+	v.typeByDefValue = enable
+}
+
+// GetViper gets the global Viper instance.
+func GetViper() *Viper {
+	return v
+}
+
+// Get can retrieve any value given the key to use.
+// Get is case-insensitive for a key.
+// Get has the behavior of returning the value associated with the first
+// place from where it is set. Viper will check in the following order:
+// override, flag, env, config file, key/value store, default
+//
+// Get returns an interface. For a specific value use one of the Get____ methods.
+func Get(key string) interface{} { return v.Get(key) }
+func (v *Viper) Get(key string) interface{} {
+	lcaseKey := strings.ToLower(key)
+	val := v.find(lcaseKey)
+	if val == nil {
+		return nil
+	}
+
+	if v.typeByDefValue {
+		// TODO(bep) this branch isn't covered by a single test.
+		valType := val
+		path := strings.Split(lcaseKey, v.keyDelim)
+		defVal := v.searchMap(v.defaults, path)
+		if defVal != nil {
+			valType = defVal
+		}
+
+		switch valType.(type) {
+		case bool:
+			return cast.ToBool(val)
+		case string:
+			return cast.ToString(val)
+		case int64, int32, int16, int8, int:
+			return cast.ToInt(val)
+		case float64, float32:
+			return cast.ToFloat64(val)
+		case time.Time:
+			return cast.ToTime(val)
+		case time.Duration:
+			return cast.ToDuration(val)
+		case []string:
+			return cast.ToStringSlice(val)
+		}
+	}
+
+	return val
+}
+
+// Sub returns new Viper instance representing a sub tree of this instance.
+// Sub is case-insensitive for a key.
+func Sub(key string) *Viper { return v.Sub(key) }
+func (v *Viper) Sub(key string) *Viper {
+	subv := New()
+	data := v.Get(key)
+	if data == nil {
+		return nil
+	}
+
+	if reflect.TypeOf(data).Kind() == reflect.Map {
+		subv.config = cast.ToStringMap(data)
+		return subv
+	}
+	return nil
+}
+
+// GetString returns the value associated with the key as a string.
+func GetString(key string) string { return v.GetString(key) }
+func (v *Viper) GetString(key string) string {
+	return cast.ToString(v.Get(key))
+}
+
+// GetBool returns the value associated with the key as a boolean.
+func GetBool(key string) bool { return v.GetBool(key) }
+func (v *Viper) GetBool(key string) bool {
+	return cast.ToBool(v.Get(key))
+}
+
+// GetInt returns the value associated with the key as an integer.
+func GetInt(key string) int { return v.GetInt(key) }
+func (v *Viper) GetInt(key string) int {
+	return cast.ToInt(v.Get(key))
+}
+
+// GetInt32 returns the value associated with the key as an integer.
+func GetInt32(key string) int32 { return v.GetInt32(key) }
+func (v *Viper) GetInt32(key string) int32 {
+	return cast.ToInt32(v.Get(key))
+}
+
+// GetInt64 returns the value associated with the key as an integer.
+func GetInt64(key string) int64 { return v.GetInt64(key) }
+func (v *Viper) GetInt64(key string) int64 {
+	return cast.ToInt64(v.Get(key))
+}
+
+// GetFloat64 returns the value associated with the key as a float64.
+func GetFloat64(key string) float64 { return v.GetFloat64(key) }
+func (v *Viper) GetFloat64(key string) float64 {
+	return cast.ToFloat64(v.Get(key))
+}
+
+// GetTime returns the value associated with the key as time.
+func GetTime(key string) time.Time { return v.GetTime(key) }
+func (v *Viper) GetTime(key string) time.Time {
+	return cast.ToTime(v.Get(key))
+}
+
+// GetDuration returns the value associated with the key as a duration.
+func GetDuration(key string) time.Duration { return v.GetDuration(key) }
+func (v *Viper) GetDuration(key string) time.Duration {
+	return cast.ToDuration(v.Get(key))
+}
+
+// GetStringSlice returns the value associated with the key as a slice of strings.
+func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
+func (v *Viper) GetStringSlice(key string) []string {
+	return cast.ToStringSlice(v.Get(key))
+}
+
+// GetStringMap returns the value associated with the key as a map of interfaces.
+func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
+func (v *Viper) GetStringMap(key string) map[string]interface{} {
+	return cast.ToStringMap(v.Get(key))
+}
+
+// GetStringMapString returns the value associated with the key as a map of strings.
+func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
+func (v *Viper) GetStringMapString(key string) map[string]string {
+	return cast.ToStringMapString(v.Get(key))
+}
+
+// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
+func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
+func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
+	return cast.ToStringMapStringSlice(v.Get(key))
+}
+
+// GetSizeInBytes returns the size of the value associated with the given key
+// in bytes.
+func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
+func (v *Viper) GetSizeInBytes(key string) uint {
+	sizeStr := cast.ToString(v.Get(key))
+	return parseSizeInBytes(sizeStr)
+}
+
+// UnmarshalKey takes a single key and unmarshals it into a Struct.
+func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
+	return v.UnmarshalKey(key, rawVal, opts...)
+}
+func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
+	err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
+
+	if err != nil {
+		return err
+	}
+
+	v.insensitiviseMaps()
+
+	return nil
+}
+
+// Unmarshal unmarshals the config into a Struct. Make sure that the tags
+// on the fields of the structure are properly set.
+func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
+	return v.Unmarshal(rawVal, opts...)
+}
+func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
+	err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
+
+	if err != nil {
+		return err
+	}
+
+	v.insensitiviseMaps()
+
+	return nil
+}
+
+// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
+// of time.Duration values & string slices
+func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
+	c := &mapstructure.DecoderConfig{
+		Metadata:         nil,
+		Result:           output,
+		WeaklyTypedInput: true,
+		DecodeHook: mapstructure.ComposeDecodeHookFunc(
+			mapstructure.StringToTimeDurationHookFunc(),
+			mapstructure.StringToSliceHookFunc(","),
+		),
+	}
+	for _, opt := range opts {
+		opt(c)
+	}
+	return c
+}
+
+// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
+func decode(input interface{}, config *mapstructure.DecoderConfig) error {
+	decoder, err := mapstructure.NewDecoder(config)
+	if err != nil {
+		return err
+	}
+	return decoder.Decode(input)
+}
+
+// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
+// in the destination struct.
+func (v *Viper) UnmarshalExact(rawVal interface{}) error {
+	config := defaultDecoderConfig(rawVal)
+	config.ErrorUnused = true
+
+	err := decode(v.AllSettings(), config)
+
+	if err != nil {
+		return err
+	}
+
+	v.insensitiviseMaps()
+
+	return nil
+}
+
+// BindPFlags binds a full flag set to the configuration, using each flag's long
+// name as the config key.
+func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
+func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
+	return v.BindFlagValues(pflagValueSet{flags})
+}
+
+// BindPFlag binds a specific key to a pflag (as used by cobra).
+// Example (where serverCmd is a Cobra instance):
+//
+//	 serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+//	 Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+//
+func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
+func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
+	return v.BindFlagValue(key, pflagValue{flag})
+}
+
+// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
+// name as the config key.
+func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
+func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
+	flags.VisitAll(func(flag FlagValue) {
+		if err = v.BindFlagValue(flag.Name(), flag); err != nil {
+			return
+		}
+	})
+	return nil
+}
+
+// BindFlagValue binds a specific key to a FlagValue.
+// Example (where serverCmd is a Cobra instance):
+//
+//	 serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+//	 Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port"))
+//
+func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
+func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
+	if flag == nil {
+		return fmt.Errorf("flag for %q is nil", key)
+	}
+	v.pflags[strings.ToLower(key)] = flag
+	return nil
+}
+
+// BindEnv binds a Viper key to a ENV variable.
+// ENV variables are case sensitive.
+// If only a key is provided, it will use the env key matching the key, uppercased.
+// EnvPrefix will be used when set when env name is not provided.
+func BindEnv(input ...string) error { return v.BindEnv(input...) }
+func (v *Viper) BindEnv(input ...string) error {
+	var key, envkey string
+	if len(input) == 0 {
+		return fmt.Errorf("BindEnv missing key to bind to")
+	}
+
+	key = strings.ToLower(input[0])
+
+	if len(input) == 1 {
+		envkey = v.mergeWithEnvPrefix(key)
+	} else {
+		envkey = input[1]
+	}
+
+	v.env[key] = envkey
+
+	return nil
+}
+
+// Given a key, find the value.
+// Viper will check in the following order:
+// flag, env, config file, key/value store, default.
+// Viper will check to see if an alias exists first.
+// Note: this assumes a lower-cased key given.
+func (v *Viper) find(lcaseKey string) interface{} {
+
+	var (
+		val    interface{}
+		exists bool
+		path   = strings.Split(lcaseKey, v.keyDelim)
+		nested = len(path) > 1
+	)
+
+	// compute the path through the nested maps to the nested value
+	if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
+		return nil
+	}
+
+	// if the requested key is an alias, then return the proper key
+	lcaseKey = v.realKey(lcaseKey)
+	path = strings.Split(lcaseKey, v.keyDelim)
+	nested = len(path) > 1
+
+	// Set() override first
+	val = v.searchMap(v.override, path)
+	if val != nil {
+		return val
+	}
+	if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
+		return nil
+	}
+
+	// PFlag override next
+	flag, exists := v.pflags[lcaseKey]
+	if exists && flag.HasChanged() {
+		switch flag.ValueType() {
+		case "int", "int8", "int16", "int32", "int64":
+			return cast.ToInt(flag.ValueString())
+		case "bool":
+			return cast.ToBool(flag.ValueString())
+		case "stringSlice":
+			s := strings.TrimPrefix(flag.ValueString(), "[")
+			s = strings.TrimSuffix(s, "]")
+			res, _ := readAsCSV(s)
+			return res
+		default:
+			return flag.ValueString()
+		}
+	}
+	if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
+		return nil
+	}
+
+	// Env override next
+	if v.automaticEnvApplied {
+		// even if it hasn't been registered, if automaticEnv is used,
+		// check any Get request
+		if val = v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); val != "" {
+			return val
+		}
+		if nested && v.isPathShadowedInAutoEnv(path) != "" {
+			return nil
+		}
+	}
+	envkey, exists := v.env[lcaseKey]
+	if exists {
+		if val = v.getEnv(envkey); val != "" {
+			return val
+		}
+	}
+	if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
+		return nil
+	}
+
+	// Config file next
+	val = v.searchMapWithPathPrefixes(v.config, path)
+	if val != nil {
+		return val
+	}
+	if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
+		return nil
+	}
+
+	// K/V store next
+	val = v.searchMap(v.kvstore, path)
+	if val != nil {
+		return val
+	}
+	if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
+		return nil
+	}
+
+	// Default next
+	val = v.searchMap(v.defaults, path)
+	if val != nil {
+		return val
+	}
+	if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
+		return nil
+	}
+
+	// last chance: if no other value is returned and a flag does exist for the value,
+	// get the flag's value even if the flag's value has not changed
+	if flag, exists := v.pflags[lcaseKey]; exists {
+		switch flag.ValueType() {
+		case "int", "int8", "int16", "int32", "int64":
+			return cast.ToInt(flag.ValueString())
+		case "bool":
+			return cast.ToBool(flag.ValueString())
+		case "stringSlice":
+			s := strings.TrimPrefix(flag.ValueString(), "[")
+			s = strings.TrimSuffix(s, "]")
+			res, _ := readAsCSV(s)
+			return res
+		default:
+			return flag.ValueString()
+		}
+	}
+	// last item, no need to check shadowing
+
+	return nil
+}
+
+func readAsCSV(val string) ([]string, error) {
+	if val == "" {
+		return []string{}, nil
+	}
+	stringReader := strings.NewReader(val)
+	csvReader := csv.NewReader(stringReader)
+	return csvReader.Read()
+}
+
+// IsSet checks to see if the key has been set in any of the data locations.
+// IsSet is case-insensitive for a key.
+func IsSet(key string) bool { return v.IsSet(key) }
+func (v *Viper) IsSet(key string) bool {
+	lcaseKey := strings.ToLower(key)
+	val := v.find(lcaseKey)
+	return val != nil
+}
+
+// AutomaticEnv has Viper check ENV variables for all.
+// keys set in config, default & flags
+func AutomaticEnv() { v.AutomaticEnv() }
+func (v *Viper) AutomaticEnv() {
+	v.automaticEnvApplied = true
+}
+
+// SetEnvKeyReplacer sets the strings.Replacer on the viper object
+// Useful for mapping an environmental variable to a key that does
+// not match it.
+func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
+func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
+	v.envKeyReplacer = r
+}
+
+// Aliases provide another accessor for the same key.
+// This enables one to change a name without breaking the application
+func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
+func (v *Viper) RegisterAlias(alias string, key string) {
+	v.registerAlias(alias, strings.ToLower(key))
+}
+
+func (v *Viper) registerAlias(alias string, key string) {
+	alias = strings.ToLower(alias)
+	if alias != key && alias != v.realKey(key) {
+		_, exists := v.aliases[alias]
+
+		if !exists {
+			// if we alias something that exists in one of the maps to another
+			// name, we'll never be able to get that value using the original
+			// name, so move the config value to the new realkey.
+			if val, ok := v.config[alias]; ok {
+				delete(v.config, alias)
+				v.config[key] = val
+			}
+			if val, ok := v.kvstore[alias]; ok {
+				delete(v.kvstore, alias)
+				v.kvstore[key] = val
+			}
+			if val, ok := v.defaults[alias]; ok {
+				delete(v.defaults, alias)
+				v.defaults[key] = val
+			}
+			if val, ok := v.override[alias]; ok {
+				delete(v.override, alias)
+				v.override[key] = val
+			}
+			v.aliases[alias] = key
+		}
+	} else {
+		jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
+	}
+}
+
+func (v *Viper) realKey(key string) string {
+	newkey, exists := v.aliases[key]
+	if exists {
+		jww.DEBUG.Println("Alias", key, "to", newkey)
+		return v.realKey(newkey)
+	}
+	return key
+}
+
+// InConfig checks to see if the given key (or an alias) is in the config file.
+func InConfig(key string) bool { return v.InConfig(key) }
+func (v *Viper) InConfig(key string) bool {
+	// if the requested key is an alias, then return the proper key
+	key = v.realKey(key)
+
+	_, exists := v.config[key]
+	return exists
+}
+
+// SetDefault sets the default value for this key.
+// SetDefault is case-insensitive for a key.
+// Default only used when no value is provided by the user via flag, config or ENV.
+func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
+func (v *Viper) SetDefault(key string, value interface{}) {
+	// If alias passed in, then set the proper default
+	key = v.realKey(strings.ToLower(key))
+	value = toCaseInsensitiveValue(value)
+
+	path := strings.Split(key, v.keyDelim)
+	lastKey := strings.ToLower(path[len(path)-1])
+	deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
+
+	// set innermost value
+	deepestMap[lastKey] = value
+}
+
+// Set sets the value for the key in the override regiser.
+// Set is case-insensitive for a key.
+// Will be used instead of values obtained via
+// flags, config file, ENV, default, or key/value store.
+func Set(key string, value interface{}) { v.Set(key, value) }
+func (v *Viper) Set(key string, value interface{}) {
+	// If alias passed in, then set the proper override
+	key = v.realKey(strings.ToLower(key))
+	value = toCaseInsensitiveValue(value)
+
+	path := strings.Split(key, v.keyDelim)
+	lastKey := strings.ToLower(path[len(path)-1])
+	deepestMap := deepSearch(v.override, path[0:len(path)-1])
+
+	// set innermost value
+	deepestMap[lastKey] = value
+}
+
+// ReadInConfig will discover and load the configuration file from disk
+// and key/value stores, searching in one of the defined paths.
+func ReadInConfig() error { return v.ReadInConfig() }
+func (v *Viper) ReadInConfig() error {
+	jww.INFO.Println("Attempting to read in config file")
+	filename, err := v.getConfigFile()
+	if err != nil {
+		return err
+	}
+
+	if !stringInSlice(v.getConfigType(), SupportedExts) {
+		return UnsupportedConfigError(v.getConfigType())
+	}
+
+	jww.DEBUG.Println("Reading file: ", filename)
+	file, err := afero.ReadFile(v.fs, filename)
+	if err != nil {
+		return err
+	}
+
+	config := make(map[string]interface{})
+
+	err = v.unmarshalReader(bytes.NewReader(file), config)
+	if err != nil {
+		return err
+	}
+
+	v.config = config
+	return nil
+}
+
+// MergeInConfig merges a new configuration with an existing config.
+func MergeInConfig() error { return v.MergeInConfig() }
+func (v *Viper) MergeInConfig() error {
+	jww.INFO.Println("Attempting to merge in config file")
+	filename, err := v.getConfigFile()
+	if err != nil {
+		return err
+	}
+
+	if !stringInSlice(v.getConfigType(), SupportedExts) {
+		return UnsupportedConfigError(v.getConfigType())
+	}
+
+	file, err := afero.ReadFile(v.fs, filename)
+	if err != nil {
+		return err
+	}
+
+	return v.MergeConfig(bytes.NewReader(file))
+}
+
+// ReadConfig will read a configuration file, setting existing keys to nil if the
+// key does not exist in the file.
+func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
+func (v *Viper) ReadConfig(in io.Reader) error {
+	v.config = make(map[string]interface{})
+	return v.unmarshalReader(in, v.config)
+}
+
+// MergeConfig merges a new configuration with an existing config.
+func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
+func (v *Viper) MergeConfig(in io.Reader) error {
+	if v.config == nil {
+		v.config = make(map[string]interface{})
+	}
+	cfg := make(map[string]interface{})
+	if err := v.unmarshalReader(in, cfg); err != nil {
+		return err
+	}
+	mergeMaps(cfg, v.config, nil)
+	return nil
+}
+
+// WriteConfig writes the current configuration to a file.
+func WriteConfig() error { return v.WriteConfig() }
+func (v *Viper) WriteConfig() error {
+	filename, err := v.getConfigFile()
+	if err != nil {
+		return err
+	}
+	return v.writeConfig(filename, true)
+}
+
+// SafeWriteConfig writes current configuration to file only if the file does not exist.
+func SafeWriteConfig() error { return v.SafeWriteConfig() }
+func (v *Viper) SafeWriteConfig() error {
+	filename, err := v.getConfigFile()
+	if err != nil {
+		return err
+	}
+	return v.writeConfig(filename, false)
+}
+
+// WriteConfigAs writes current configuration to a given filename.
+func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) }
+func (v *Viper) WriteConfigAs(filename string) error {
+	return v.writeConfig(filename, true)
+}
+
+// SafeWriteConfigAs writes current configuration to a given filename if it does not exist.
+func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) }
+func (v *Viper) SafeWriteConfigAs(filename string) error {
+	return v.writeConfig(filename, false)
+}
+
+func writeConfig(filename string, force bool) error { return v.writeConfig(filename, force) }
+func (v *Viper) writeConfig(filename string, force bool) error {
+	jww.INFO.Println("Attempting to write configuration to file.")
+	ext := filepath.Ext(filename)
+	if len(ext) <= 1 {
+		return fmt.Errorf("Filename: %s requires valid extension.", filename)
+	}
+	configType := ext[1:]
+	if !stringInSlice(configType, SupportedExts) {
+		return UnsupportedConfigError(configType)
+	}
+	if v.config == nil {
+		v.config = make(map[string]interface{})
+	}
+	var flags int
+	if force == true {
+		flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY
+	} else {
+		if _, err := os.Stat(filename); os.IsNotExist(err) {
+			flags = os.O_WRONLY
+		} else {
+			return fmt.Errorf("File: %s exists. Use WriteConfig to overwrite.", filename)
+		}
+	}
+	f, err := v.fs.OpenFile(filename, flags, os.FileMode(0644))
+	if err != nil {
+		return err
+	}
+	return v.marshalWriter(f, configType)
+}
+
+// Unmarshal a Reader into a map.
+// Should probably be an unexported function.
+func unmarshalReader(in io.Reader, c map[string]interface{}) error {
+	return v.unmarshalReader(in, c)
+}
+func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
+	buf := new(bytes.Buffer)
+	buf.ReadFrom(in)
+
+	switch strings.ToLower(v.getConfigType()) {
+	case "yaml", "yml":
+		if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil {
+			return ConfigParseError{err}
+		}
+
+	case "json":
+		if err := json.Unmarshal(buf.Bytes(), &c); err != nil {
+			return ConfigParseError{err}
+		}
+
+	case "hcl":
+		obj, err := hcl.Parse(string(buf.Bytes()))
+		if err != nil {
+			return ConfigParseError{err}
+		}
+		if err = hcl.DecodeObject(&c, obj); err != nil {
+			return ConfigParseError{err}
+		}
+
+	case "toml":
+		tree, err := toml.LoadReader(buf)
+		if err != nil {
+			return ConfigParseError{err}
+		}
+		tmap := tree.ToMap()
+		for k, v := range tmap {
+			c[k] = v
+		}
+
+	case "properties", "props", "prop":
+		v.properties = properties.NewProperties()
+		var err error
+		if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
+			return ConfigParseError{err}
+		}
+		for _, key := range v.properties.Keys() {
+			value, _ := v.properties.Get(key)
+			// recursively build nested maps
+			path := strings.Split(key, ".")
+			lastKey := strings.ToLower(path[len(path)-1])
+			deepestMap := deepSearch(c, path[0:len(path)-1])
+			// set innermost value
+			deepestMap[lastKey] = value
+		}
+	}
+
+	insensitiviseMap(c)
+	return nil
+}
+
+// Marshal a map into Writer.
+func marshalWriter(f afero.File, configType string) error {
+	return v.marshalWriter(f, configType)
+}
+func (v *Viper) marshalWriter(f afero.File, configType string) error {
+	c := v.AllSettings()
+	switch configType {
+	case "json":
+		b, err := json.MarshalIndent(c, "", "  ")
+		if err != nil {
+			return ConfigMarshalError{err}
+		}
+		_, err = f.WriteString(string(b))
+		if err != nil {
+			return ConfigMarshalError{err}
+		}
+
+	case "hcl":
+		b, err := json.Marshal(c)
+		ast, err := hcl.Parse(string(b))
+		if err != nil {
+			return ConfigMarshalError{err}
+		}
+		err = printer.Fprint(f, ast.Node)
+		if err != nil {
+			return ConfigMarshalError{err}
+		}
+
+	case "prop", "props", "properties":
+		if v.properties == nil {
+			v.properties = properties.NewProperties()
+		}
+		p := v.properties
+		for _, key := range v.AllKeys() {
+			_, _, err := p.Set(key, v.GetString(key))
+			if err != nil {
+				return ConfigMarshalError{err}
+			}
+		}
+		_, err := p.WriteComment(f, "#", properties.UTF8)
+		if err != nil {
+			return ConfigMarshalError{err}
+		}
+
+	case "toml":
+		t, err := toml.TreeFromMap(c)
+		if err != nil {
+			return ConfigMarshalError{err}
+		}
+		s := t.String()
+		if _, err := f.WriteString(s); err != nil {
+			return ConfigMarshalError{err}
+		}
+
+	case "yaml", "yml":
+		b, err := yaml.Marshal(c)
+		if err != nil {
+			return ConfigMarshalError{err}
+		}
+		if _, err = f.WriteString(string(b)); err != nil {
+			return ConfigMarshalError{err}
+		}
+	}
+	return nil
+}
+
+func keyExists(k string, m map[string]interface{}) string {
+	lk := strings.ToLower(k)
+	for mk := range m {
+		lmk := strings.ToLower(mk)
+		if lmk == lk {
+			return mk
+		}
+	}
+	return ""
+}
+
+func castToMapStringInterface(
+	src map[interface{}]interface{}) map[string]interface{} {
+	tgt := map[string]interface{}{}
+	for k, v := range src {
+		tgt[fmt.Sprintf("%v", k)] = v
+	}
+	return tgt
+}
+
+func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
+	tgt := map[string]interface{}{}
+	for k, v := range src {
+		tgt[k] = v
+	}
+	return tgt
+}
+
+func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
+	tgt := map[string]interface{}{}
+	for k, v := range src {
+		tgt[k] = v
+	}
+	return tgt
+}
+
+// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
+// insistence on parsing nested structures as `map[interface{}]interface{}`
+// instead of using a `string` as the key for nest structures beyond one level
+// deep. Both map types are supported as there is a go-yaml fork that uses
+// `map[string]interface{}` instead.
+func mergeMaps(
+	src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
+	for sk, sv := range src {
+		tk := keyExists(sk, tgt)
+		if tk == "" {
+			jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
+			tgt[sk] = sv
+			if itgt != nil {
+				itgt[sk] = sv
+			}
+			continue
+		}
+
+		tv, ok := tgt[tk]
+		if !ok {
+			jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
+			tgt[sk] = sv
+			if itgt != nil {
+				itgt[sk] = sv
+			}
+			continue
+		}
+
+		svType := reflect.TypeOf(sv)
+		tvType := reflect.TypeOf(tv)
+		if svType != tvType {
+			jww.ERROR.Printf(
+				"svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+				sk, svType, tvType, sv, tv)
+			continue
+		}
+
+		jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+			sk, svType, tvType, sv, tv)
+
+		switch ttv := tv.(type) {
+		case map[interface{}]interface{}:
+			jww.TRACE.Printf("merging maps (must convert)")
+			tsv := sv.(map[interface{}]interface{})
+			ssv := castToMapStringInterface(tsv)
+			stv := castToMapStringInterface(ttv)
+			mergeMaps(ssv, stv, ttv)
+		case map[string]interface{}:
+			jww.TRACE.Printf("merging maps")
+			mergeMaps(sv.(map[string]interface{}), ttv, nil)
+		default:
+			jww.TRACE.Printf("setting value")
+			tgt[tk] = sv
+			if itgt != nil {
+				itgt[tk] = sv
+			}
+		}
+	}
+}
+
+// ReadRemoteConfig attempts to get configuration from a remote source
+// and read it in the remote configuration registry.
+func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
+func (v *Viper) ReadRemoteConfig() error {
+	return v.getKeyValueConfig()
+}
+
+func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
+func (v *Viper) WatchRemoteConfig() error {
+	return v.watchKeyValueConfig()
+}
+
+func (v *Viper) WatchRemoteConfigOnChannel() error {
+	return v.watchKeyValueConfigOnChannel()
+}
+
+func (v *Viper) insensitiviseMaps() {
+	insensitiviseMap(v.config)
+	insensitiviseMap(v.defaults)
+	insensitiviseMap(v.override)
+	insensitiviseMap(v.kvstore)
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) getKeyValueConfig() error {
+	if RemoteConfig == nil {
+		return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
+	}
+
+	for _, rp := range v.remoteProviders {
+		val, err := v.getRemoteConfig(rp)
+		if err != nil {
+			continue
+		}
+		v.kvstore = val
+		return nil
+	}
+	return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+	reader, err := RemoteConfig.Get(provider)
+	if err != nil {
+		return nil, err
+	}
+	err = v.unmarshalReader(reader, v.kvstore)
+	return v.kvstore, err
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfigOnChannel() error {
+	for _, rp := range v.remoteProviders {
+		respc, _ := RemoteConfig.WatchChannel(rp)
+		//Todo: Add quit channel
+		go func(rc <-chan *RemoteResponse) {
+			for {
+				b := <-rc
+				reader := bytes.NewReader(b.Value)
+				v.unmarshalReader(reader, v.kvstore)
+			}
+		}(respc)
+		return nil
+	}
+	return RemoteConfigError("No Files Found")
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfig() error {
+	for _, rp := range v.remoteProviders {
+		val, err := v.watchRemoteConfig(rp)
+		if err != nil {
+			continue
+		}
+		v.kvstore = val
+		return nil
+	}
+	return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+	reader, err := RemoteConfig.Watch(provider)
+	if err != nil {
+		return nil, err
+	}
+	err = v.unmarshalReader(reader, v.kvstore)
+	return v.kvstore, err
+}
+
+// AllKeys returns all keys holding a value, regardless of where they are set.
+// Nested keys are returned with a v.keyDelim (= ".") separator
+func AllKeys() []string { return v.AllKeys() }
+func (v *Viper) AllKeys() []string {
+	m := map[string]bool{}
+	// add all paths, by order of descending priority to ensure correct shadowing
+	m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
+	m = v.flattenAndMergeMap(m, v.override, "")
+	m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
+	m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env))
+	m = v.flattenAndMergeMap(m, v.config, "")
+	m = v.flattenAndMergeMap(m, v.kvstore, "")
+	m = v.flattenAndMergeMap(m, v.defaults, "")
+
+	// convert set of paths to list
+	a := []string{}
+	for x := range m {
+		a = append(a, x)
+	}
+	return a
+}
+
+// flattenAndMergeMap recursively flattens the given map into a map[string]bool
+// of key paths (used as a set, easier to manipulate than a []string):
+// - each path is merged into a single key string, delimited with v.keyDelim (= ".")
+// - if a path is shadowed by an earlier value in the initial shadow map,
+//   it is skipped.
+// The resulting set of paths is merged to the given shadow set at the same time.
+func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
+	if shadow != nil && prefix != "" && shadow[prefix] {
+		// prefix is shadowed => nothing more to flatten
+		return shadow
+	}
+	if shadow == nil {
+		shadow = make(map[string]bool)
+	}
+
+	var m2 map[string]interface{}
+	if prefix != "" {
+		prefix += v.keyDelim
+	}
+	for k, val := range m {
+		fullKey := prefix + k
+		switch val.(type) {
+		case map[string]interface{}:
+			m2 = val.(map[string]interface{})
+		case map[interface{}]interface{}:
+			m2 = cast.ToStringMap(val)
+		default:
+			// immediate value
+			shadow[strings.ToLower(fullKey)] = true
+			continue
+		}
+		// recursively merge to shadow map
+		shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
+	}
+	return shadow
+}
+
+// mergeFlatMap merges the given maps, excluding values of the second map
+// shadowed by values from the first map.
+func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
+	// scan keys
+outer:
+	for k, _ := range m {
+		path := strings.Split(k, v.keyDelim)
+		// scan intermediate paths
+		var parentKey string
+		for i := 1; i < len(path); i++ {
+			parentKey = strings.Join(path[0:i], v.keyDelim)
+			if shadow[parentKey] {
+				// path is shadowed, continue
+				continue outer
+			}
+		}
+		// add key
+		shadow[strings.ToLower(k)] = true
+	}
+	return shadow
+}
+
+// AllSettings merges all settings and returns them as a map[string]interface{}.
+func AllSettings() map[string]interface{} { return v.AllSettings() }
+func (v *Viper) AllSettings() map[string]interface{} {
+	m := map[string]interface{}{}
+	// start from the list of keys, and construct the map one value at a time
+	for _, k := range v.AllKeys() {
+		value := v.Get(k)
+		if value == nil {
+			// should not happen, since AllKeys() returns only keys holding a value,
+			// check just in case anything changes
+			continue
+		}
+		path := strings.Split(k, v.keyDelim)
+		lastKey := strings.ToLower(path[len(path)-1])
+		deepestMap := deepSearch(m, path[0:len(path)-1])
+		// set innermost value
+		deepestMap[lastKey] = value
+	}
+	return m
+}
+
+// SetFs sets the filesystem to use to read configuration.
+func SetFs(fs afero.Fs) { v.SetFs(fs) }
+func (v *Viper) SetFs(fs afero.Fs) {
+	v.fs = fs
+}
+
+// SetConfigName sets name for the config file.
+// Does not include extension.
+func SetConfigName(in string) { v.SetConfigName(in) }
+func (v *Viper) SetConfigName(in string) {
+	if in != "" {
+		v.configName = in
+		v.configFile = ""
+	}
+}
+
+// SetConfigType sets the type of the configuration returned by the
+// remote source, e.g. "json".
+func SetConfigType(in string) { v.SetConfigType(in) }
+func (v *Viper) SetConfigType(in string) {
+	if in != "" {
+		v.configType = in
+	}
+}
+
+func (v *Viper) getConfigType() string {
+	if v.configType != "" {
+		return v.configType
+	}
+
+	cf, err := v.getConfigFile()
+	if err != nil {
+		return ""
+	}
+
+	ext := filepath.Ext(cf)
+
+	if len(ext) > 1 {
+		return ext[1:]
+	}
+
+	return ""
+}
+
+func (v *Viper) getConfigFile() (string, error) {
+	if v.configFile == "" {
+		cf, err := v.findConfigFile()
+		if err != nil {
+			return "", err
+		}
+		v.configFile = cf
+	}
+	return v.configFile, nil
+}
+
+func (v *Viper) searchInPath(in string) (filename string) {
+	jww.DEBUG.Println("Searching for config in ", in)
+	for _, ext := range SupportedExts {
+		jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
+		if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
+			jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
+			return filepath.Join(in, v.configName+"."+ext)
+		}
+	}
+
+	return ""
+}
+
+// Search all configPaths for any config file.
+// Returns the first path that exists (and is a config file).
+func (v *Viper) findConfigFile() (string, error) {
+	jww.INFO.Println("Searching for config in ", v.configPaths)
+
+	for _, cp := range v.configPaths {
+		file := v.searchInPath(cp)
+		if file != "" {
+			return file, nil
+		}
+	}
+	return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
+}
+
+// Debug prints all configuration registries for debugging
+// purposes.
+func Debug() { v.Debug() }
+func (v *Viper) Debug() {
+	fmt.Printf("Aliases:\n%#v\n", v.aliases)
+	fmt.Printf("Override:\n%#v\n", v.override)
+	fmt.Printf("PFlags:\n%#v\n", v.pflags)
+	fmt.Printf("Env:\n%#v\n", v.env)
+	fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
+	fmt.Printf("Config:\n%#v\n", v.config)
+	fmt.Printf("Defaults:\n%#v\n", v.defaults)
+}
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 000000000..8da58fbf6
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+    apic.go
+    emitterc.go
+    parserc.go
+    readerc.go
+    scannerc.go
+    writerc.go
+    yamlh.go
+    yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE
new file mode 100644
index 000000000..866d74a7a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 000000000..1f7e87e67
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+	"io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+	// Check if we can move the queue at the beginning of the buffer.
+	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+		if parser.tokens_head != len(parser.tokens) {
+			copy(parser.tokens, parser.tokens[parser.tokens_head:])
+		}
+		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+		parser.tokens_head = 0
+	}
+	parser.tokens = append(parser.tokens, *token)
+	if pos < 0 {
+		return
+	}
+	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+	parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+	*parser = yaml_parser_t{
+		raw_buffer: make([]byte, 0, input_raw_buffer_size),
+		buffer:     make([]byte, 0, input_buffer_size),
+	}
+	return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+	*parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	if parser.input_pos == len(parser.input) {
+		return 0, io.EOF
+	}
+	n = copy(buffer, parser.input[parser.input_pos:])
+	parser.input_pos += n
+	return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_string_read_handler
+	parser.input = input
+	parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_reader_read_handler
+	parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+	if parser.encoding != yaml_ANY_ENCODING {
+		panic("must set the encoding only once")
+	}
+	parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{
+		buffer:     make([]byte, output_buffer_size),
+		raw_buffer: make([]byte, 0, output_raw_buffer_size),
+		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
+		events:     make([]yaml_event_t, 0, initial_queue_size),
+	}
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+	return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_writer.Write(buffer)
+	return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_string_write_handler
+	emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_writer_write_handler
+	emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+	if emitter.encoding != yaml_ANY_ENCODING {
+		panic("must set the output encoding only once")
+	}
+	emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+	emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+	if indent < 2 || indent > 9 {
+		indent = 2
+	}
+	emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+	if width < 0 {
+		width = -1
+	}
+	emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+	emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+	emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+//    assert(token);  // Non-NULL token object expected.
+//
+//    switch (token.type)
+//    {
+//        case YAML_TAG_DIRECTIVE_TOKEN:
+//            yaml_free(token.data.tag_directive.handle);
+//            yaml_free(token.data.tag_directive.prefix);
+//            break;
+//
+//        case YAML_ALIAS_TOKEN:
+//            yaml_free(token.data.alias.value);
+//            break;
+//
+//        case YAML_ANCHOR_TOKEN:
+//            yaml_free(token.data.anchor.value);
+//            break;
+//
+//        case YAML_TAG_TOKEN:
+//            yaml_free(token.data.tag.handle);
+//            yaml_free(token.data.tag.suffix);
+//            break;
+//
+//        case YAML_SCALAR_TOKEN:
+//            yaml_free(token.data.scalar.value);
+//            break;
+//
+//        default:
+//            break;
+//    }
+//
+//    memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+//    yaml_char_t *end = start+length;
+//    yaml_char_t *pointer = start;
+//
+//    while (pointer < end) {
+//        unsigned char octet;
+//        unsigned int width;
+//        unsigned int value;
+//        size_t k;
+//
+//        octet = pointer[0];
+//        width = (octet & 0x80) == 0x00 ? 1 :
+//                (octet & 0xE0) == 0xC0 ? 2 :
+//                (octet & 0xF0) == 0xE0 ? 3 :
+//                (octet & 0xF8) == 0xF0 ? 4 : 0;
+//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+//        if (!width) return 0;
+//        if (pointer+width > end) return 0;
+//        for (k = 1; k < width; k ++) {
+//            octet = pointer[k];
+//            if ((octet & 0xC0) != 0x80) return 0;
+//            value = (value << 6) + (octet & 0x3F);
+//        }
+//        if (!((width == 1) ||
+//            (width == 2 && value >= 0x80) ||
+//            (width == 3 && value >= 0x800) ||
+//            (width == 4 && value >= 0x10000))) return 0;
+//
+//        pointer += width;
+//    }
+//
+//    return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+	*event = yaml_event_t{
+		typ:      yaml_STREAM_START_EVENT,
+		encoding: encoding,
+	}
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+	*event = yaml_event_t{
+		typ: yaml_STREAM_END_EVENT,
+	}
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+	event *yaml_event_t,
+	version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t,
+	implicit bool,
+) {
+	*event = yaml_event_t{
+		typ:               yaml_DOCUMENT_START_EVENT,
+		version_directive: version_directive,
+		tag_directives:    tag_directives,
+		implicit:          implicit,
+	}
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+	*event = yaml_event_t{
+		typ:      yaml_DOCUMENT_END_EVENT,
+		implicit: implicit,
+	}
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    anchor_copy *yaml_char_t = NULL
+//
+//    assert(event) // Non-NULL event object is expected.
+//    assert(anchor) // Non-NULL anchor is expected.
+//
+//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+//    anchor_copy = yaml_strdup(anchor)
+//    if (!anchor_copy)
+//        return 0
+//
+//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+//    return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+	*event = yaml_event_t{
+		typ:             yaml_SCALAR_EVENT,
+		anchor:          anchor,
+		tag:             tag,
+		value:           value,
+		implicit:        plain_implicit,
+		quoted_implicit: quoted_implicit,
+		style:           yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_SEQUENCE_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_SEQUENCE_END_EVENT,
+	}
+	return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+	*event = yaml_event_t{
+		typ:      yaml_MAPPING_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+	*event = yaml_event_t{
+		typ: yaml_MAPPING_END_EVENT,
+	}
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+	*event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+//        version_directive *yaml_version_directive_t,
+//        tag_directives_start *yaml_tag_directive_t,
+//        tag_directives_end *yaml_tag_directive_t,
+//        start_implicit int, end_implicit int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    struct {
+//        start *yaml_node_t
+//        end *yaml_node_t
+//        top *yaml_node_t
+//    } nodes = { NULL, NULL, NULL }
+//    version_directive_copy *yaml_version_directive_t = NULL
+//    struct {
+//        start *yaml_tag_directive_t
+//        end *yaml_tag_directive_t
+//        top *yaml_tag_directive_t
+//    } tag_directives_copy = { NULL, NULL, NULL }
+//    value yaml_tag_directive_t = { NULL, NULL }
+//    mark yaml_mark_t = { 0, 0, 0 }
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert((tag_directives_start && tag_directives_end) ||
+//            (tag_directives_start == tag_directives_end))
+//                            // Valid tag directives are expected.
+//
+//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+//    if (version_directive) {
+//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+//        if (!version_directive_copy) goto error
+//        version_directive_copy.major = version_directive.major
+//        version_directive_copy.minor = version_directive.minor
+//    }
+//
+//    if (tag_directives_start != tag_directives_end) {
+//        tag_directive *yaml_tag_directive_t
+//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+//            goto error
+//        for (tag_directive = tag_directives_start
+//                tag_directive != tag_directives_end; tag_directive ++) {
+//            assert(tag_directive.handle)
+//            assert(tag_directive.prefix)
+//            if (!yaml_check_utf8(tag_directive.handle,
+//                        strlen((char *)tag_directive.handle)))
+//                goto error
+//            if (!yaml_check_utf8(tag_directive.prefix,
+//                        strlen((char *)tag_directive.prefix)))
+//                goto error
+//            value.handle = yaml_strdup(tag_directive.handle)
+//            value.prefix = yaml_strdup(tag_directive.prefix)
+//            if (!value.handle || !value.prefix) goto error
+//            if (!PUSH(&context, tag_directives_copy, value))
+//                goto error
+//            value.handle = NULL
+//            value.prefix = NULL
+//        }
+//    }
+//
+//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+//            tag_directives_copy.start, tag_directives_copy.top,
+//            start_implicit, end_implicit, mark, mark)
+//
+//    return 1
+//
+//error:
+//    STACK_DEL(&context, nodes)
+//    yaml_free(version_directive_copy)
+//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
+//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+//        yaml_free(value.handle)
+//        yaml_free(value.prefix)
+//    }
+//    STACK_DEL(&context, tag_directives_copy)
+//    yaml_free(value.handle)
+//    yaml_free(value.prefix)
+//
+//    return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    tag_directive *yaml_tag_directive_t
+//
+//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    while (!STACK_EMPTY(&context, document.nodes)) {
+//        node yaml_node_t = POP(&context, document.nodes)
+//        yaml_free(node.tag)
+//        switch (node.type) {
+//            case YAML_SCALAR_NODE:
+//                yaml_free(node.data.scalar.value)
+//                break
+//            case YAML_SEQUENCE_NODE:
+//                STACK_DEL(&context, node.data.sequence.items)
+//                break
+//            case YAML_MAPPING_NODE:
+//                STACK_DEL(&context, node.data.mapping.pairs)
+//                break
+//            default:
+//                assert(0) // Should not happen.
+//        }
+//    }
+//    STACK_DEL(&context, document.nodes)
+//
+//    yaml_free(document.version_directive)
+//    for (tag_directive = document.tag_directives.start
+//            tag_directive != document.tag_directives.end
+//            tag_directive++) {
+//        yaml_free(tag_directive.handle)
+//        yaml_free(tag_directive.prefix)
+//    }
+//    yaml_free(document.tag_directives.start)
+//
+//    memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+//        return document.nodes.start + index - 1
+//    }
+//    return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (document.nodes.top != document.nodes.start) {
+//        return document.nodes.start
+//    }
+//    return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+//        tag *yaml_char_t, value *yaml_char_t, length int,
+//        style yaml_scalar_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    value_copy *yaml_char_t = NULL
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert(value) // Non-NULL value is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (length < 0) {
+//        length = strlen((char *)value)
+//    }
+//
+//    if (!yaml_check_utf8(value, length)) goto error
+//    value_copy = yaml_malloc(length+1)
+//    if (!value_copy) goto error
+//    memcpy(value_copy, value, length)
+//    value_copy[length] = '\0'
+//
+//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    yaml_free(tag_copy)
+//    yaml_free(value_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_item_t
+//        end *yaml_node_item_t
+//        top *yaml_node_item_t
+//    } items = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, items)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_pair_t
+//        end *yaml_node_pair_t
+//        top *yaml_node_pair_t
+//    } pairs = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, pairs)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+//        sequence int, item int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(sequence > 0
+//            && document.nodes.start + sequence <= document.nodes.top)
+//                            // Valid sequence id is required.
+//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+//                            // A sequence node is required.
+//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+//                            // Valid item id is required.
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[sequence-1].data.sequence.items, item))
+//        return 0
+//
+//    return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+//        mapping int, key int, value int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    pair yaml_node_pair_t
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(mapping > 0
+//            && document.nodes.start + mapping <= document.nodes.top)
+//                            // Valid mapping id is required.
+//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+//                            // A mapping node is required.
+//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+//                            // Valid key id is required.
+//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+//                            // Valid value id is required.
+//
+//    pair.key = key
+//    pair.value = value
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
+//        return 0
+//
+//    return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 000000000..e4e56e28e
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,775 @@
+package yaml
+
+import (
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+const (
+	documentNode = 1 << iota
+	mappingNode
+	sequenceNode
+	scalarNode
+	aliasNode
+)
+
+type node struct {
+	kind         int
+	line, column int
+	tag          string
+	// For an alias node, alias holds the resolved alias.
+	alias    *node
+	value    string
+	implicit bool
+	children []*node
+	anchors  map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+	parser   yaml_parser_t
+	event    yaml_event_t
+	doc      *node
+	doneInit bool
+}
+
+func newParser(b []byte) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	if len(b) == 0 {
+		b = []byte{'\n'}
+	}
+	yaml_parser_set_input_string(&p.parser, b)
+	return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	yaml_parser_set_input_reader(&p.parser, r)
+	return &p
+}
+
+func (p *parser) init() {
+	if p.doneInit {
+		return
+	}
+	p.expect(yaml_STREAM_START_EVENT)
+	p.doneInit = true
+}
+
+func (p *parser) destroy() {
+	if p.event.typ != yaml_NO_EVENT {
+		yaml_event_delete(&p.event)
+	}
+	yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+	if p.event.typ == yaml_NO_EVENT {
+		if !yaml_parser_parse(&p.parser, &p.event) {
+			p.fail()
+		}
+	}
+	if p.event.typ == yaml_STREAM_END_EVENT {
+		failf("attempted to go past the end of stream; corrupted value?")
+	}
+	if p.event.typ != e {
+		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+		p.fail()
+	}
+	yaml_event_delete(&p.event)
+	p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+	if p.event.typ != yaml_NO_EVENT {
+		return p.event.typ
+	}
+	if !yaml_parser_parse(&p.parser, &p.event) {
+		p.fail()
+	}
+	return p.event.typ
+}
+
+func (p *parser) fail() {
+	var where string
+	var line int
+	if p.parser.problem_mark.line != 0 {
+		line = p.parser.problem_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
+	} else if p.parser.context_mark.line != 0 {
+		line = p.parser.context_mark.line
+	}
+	if line != 0 {
+		where = "line " + strconv.Itoa(line) + ": "
+	}
+	var msg string
+	if len(p.parser.problem) > 0 {
+		msg = p.parser.problem
+	} else {
+		msg = "unknown problem parsing YAML content"
+	}
+	failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+	if anchor != nil {
+		p.doc.anchors[string(anchor)] = n
+	}
+}
+
+func (p *parser) parse() *node {
+	p.init()
+	switch p.peek() {
+	case yaml_SCALAR_EVENT:
+		return p.scalar()
+	case yaml_ALIAS_EVENT:
+		return p.alias()
+	case yaml_MAPPING_START_EVENT:
+		return p.mapping()
+	case yaml_SEQUENCE_START_EVENT:
+		return p.sequence()
+	case yaml_DOCUMENT_START_EVENT:
+		return p.document()
+	case yaml_STREAM_END_EVENT:
+		// Happens when attempting to decode an empty buffer.
+		return nil
+	default:
+		panic("attempted to parse unknown event: " + p.event.typ.String())
+	}
+}
+
+func (p *parser) node(kind int) *node {
+	return &node{
+		kind:   kind,
+		line:   p.event.start_mark.line,
+		column: p.event.start_mark.column,
+	}
+}
+
+func (p *parser) document() *node {
+	n := p.node(documentNode)
+	n.anchors = make(map[string]*node)
+	p.doc = n
+	p.expect(yaml_DOCUMENT_START_EVENT)
+	n.children = append(n.children, p.parse())
+	p.expect(yaml_DOCUMENT_END_EVENT)
+	return n
+}
+
+func (p *parser) alias() *node {
+	n := p.node(aliasNode)
+	n.value = string(p.event.anchor)
+	n.alias = p.doc.anchors[n.value]
+	if n.alias == nil {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	p.expect(yaml_ALIAS_EVENT)
+	return n
+}
+
+func (p *parser) scalar() *node {
+	n := p.node(scalarNode)
+	n.value = string(p.event.value)
+	n.tag = string(p.event.tag)
+	n.implicit = p.event.implicit
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SCALAR_EVENT)
+	return n
+}
+
+func (p *parser) sequence() *node {
+	n := p.node(sequenceNode)
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SEQUENCE_START_EVENT)
+	for p.peek() != yaml_SEQUENCE_END_EVENT {
+		n.children = append(n.children, p.parse())
+	}
+	p.expect(yaml_SEQUENCE_END_EVENT)
+	return n
+}
+
+func (p *parser) mapping() *node {
+	n := p.node(mappingNode)
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_MAPPING_START_EVENT)
+	for p.peek() != yaml_MAPPING_END_EVENT {
+		n.children = append(n.children, p.parse(), p.parse())
+	}
+	p.expect(yaml_MAPPING_END_EVENT)
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+	doc     *node
+	aliases map[*node]bool
+	mapType reflect.Type
+	terrors []string
+	strict  bool
+}
+
+var (
+	mapItemType    = reflect.TypeOf(MapItem{})
+	durationType   = reflect.TypeOf(time.Duration(0))
+	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+	ifaceType      = defaultMapType.Elem()
+	timeType       = reflect.TypeOf(time.Time{})
+	ptrTimeType    = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+	d := &decoder{mapType: defaultMapType, strict: strict}
+	d.aliases = make(map[*node]bool)
+	return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+	if n.tag != "" {
+		tag = n.tag
+	}
+	value := n.value
+	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+		if len(value) > 10 {
+			value = " `" + value[:7] + "...`"
+		} else {
+			value = " `" + value + "`"
+		}
+	}
+	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+	terrlen := len(d.terrors)
+	err := u.UnmarshalYAML(func(v interface{}) (err error) {
+		defer handleErr(&err)
+		d.unmarshal(n, reflect.ValueOf(v))
+		if len(d.terrors) > terrlen {
+			issues := d.terrors[terrlen:]
+			d.terrors = d.terrors[:terrlen]
+			return &TypeError{issues}
+		}
+		return nil
+	})
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+		return out, false, false
+	}
+	again := true
+	for again {
+		again = false
+		if out.Kind() == reflect.Ptr {
+			if out.IsNil() {
+				out.Set(reflect.New(out.Type().Elem()))
+			}
+			out = out.Elem()
+			again = true
+		}
+		if out.CanAddr() {
+			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+				good = d.callUnmarshaler(n, u)
+				return out, true, good
+			}
+		}
+	}
+	return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+	switch n.kind {
+	case documentNode:
+		return d.document(n, out)
+	case aliasNode:
+		return d.alias(n, out)
+	}
+	out, unmarshaled, good := d.prepare(n, out)
+	if unmarshaled {
+		return good
+	}
+	switch n.kind {
+	case scalarNode:
+		good = d.scalar(n, out)
+	case mappingNode:
+		good = d.mapping(n, out)
+	case sequenceNode:
+		good = d.sequence(n, out)
+	default:
+		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+	}
+	return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+	if len(n.children) == 1 {
+		d.doc = n
+		d.unmarshal(n.children[0], out)
+		return true
+	}
+	return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+	if d.aliases[n] {
+		// TODO this could actually be allowed in some circumstances.
+		failf("anchor '%s' value contains itself", n.value)
+	}
+	d.aliases[n] = true
+	good = d.unmarshal(n.alias, out)
+	delete(d.aliases, n)
+	return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+	for _, k := range out.MapKeys() {
+		out.SetMapIndex(k, zeroValue)
+	}
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+	var tag string
+	var resolved interface{}
+	if n.tag == "" && !n.implicit {
+		tag = yaml_STR_TAG
+		resolved = n.value
+	} else {
+		tag, resolved = resolve(n.tag, n.value)
+		if tag == yaml_BINARY_TAG {
+			data, err := base64.StdEncoding.DecodeString(resolved.(string))
+			if err != nil {
+				failf("!!binary value contains invalid base64 data")
+			}
+			resolved = string(data)
+		}
+	}
+	if resolved == nil {
+		if out.Kind() == reflect.Map && !out.CanAddr() {
+			resetMap(out)
+		} else {
+			out.Set(reflect.Zero(out.Type()))
+		}
+		return true
+	}
+	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+		// We've resolved to exactly the type we want, so use that.
+		out.Set(resolvedv)
+		return true
+	}
+	// Perhaps we can use the value as a TextUnmarshaler to
+	// set its value.
+	if out.CanAddr() {
+		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+		if ok {
+			var text []byte
+			if tag == yaml_BINARY_TAG {
+				text = []byte(resolved.(string))
+			} else {
+				// We let any value be unmarshaled into TextUnmarshaler.
+				// That might be more lax than we'd like, but the
+				// TextUnmarshaler itself should bowl out any dubious values.
+				text = []byte(n.value)
+			}
+			err := u.UnmarshalText(text)
+			if err != nil {
+				fail(err)
+			}
+			return true
+		}
+	}
+	switch out.Kind() {
+	case reflect.String:
+		if tag == yaml_BINARY_TAG {
+			out.SetString(resolved.(string))
+			return true
+		}
+		if resolved != nil {
+			out.SetString(n.value)
+			return true
+		}
+	case reflect.Interface:
+		if resolved == nil {
+			out.Set(reflect.Zero(out.Type()))
+		} else if tag == yaml_TIMESTAMP_TAG {
+			// It looks like a timestamp but for backward compatibility
+			// reasons we set it as a string, so that code that unmarshals
+			// timestamp-like values into interface{} will continue to
+			// see a string and not a time.Time.
+			// TODO(v3) Drop this.
+			out.Set(reflect.ValueOf(n.value))
+		} else {
+			out.Set(reflect.ValueOf(resolved))
+		}
+		return true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch resolved := resolved.(type) {
+		case int:
+			if !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case int64:
+			if !out.OverflowInt(resolved) {
+				out.SetInt(resolved)
+				return true
+			}
+		case uint64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case float64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case string:
+			if out.Type() == durationType {
+				d, err := time.ParseDuration(resolved)
+				if err == nil {
+					out.SetInt(int64(d))
+					return true
+				}
+			}
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch resolved := resolved.(type) {
+		case int:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case int64:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case uint64:
+			if !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case float64:
+			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		}
+	case reflect.Bool:
+		switch resolved := resolved.(type) {
+		case bool:
+			out.SetBool(resolved)
+			return true
+		}
+	case reflect.Float32, reflect.Float64:
+		switch resolved := resolved.(type) {
+		case int:
+			out.SetFloat(float64(resolved))
+			return true
+		case int64:
+			out.SetFloat(float64(resolved))
+			return true
+		case uint64:
+			out.SetFloat(float64(resolved))
+			return true
+		case float64:
+			out.SetFloat(resolved)
+			return true
+		}
+	case reflect.Struct:
+		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+			out.Set(resolvedv)
+			return true
+		}
+	case reflect.Ptr:
+		if out.Type().Elem() == reflect.TypeOf(resolved) {
+			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+			elem := reflect.New(out.Type().Elem())
+			elem.Elem().Set(reflect.ValueOf(resolved))
+			out.Set(elem)
+			return true
+		}
+	}
+	d.terror(n, tag, out)
+	return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+	l := len(n.children)
+
+	var iface reflect.Value
+	switch out.Kind() {
+	case reflect.Slice:
+		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Array:
+		if l != out.Len() {
+			failf("invalid array: want %d elements but got %d", out.Len(), l)
+		}
+	case reflect.Interface:
+		// No type hints. Will have to use a generic sequence.
+		iface = out
+		out = settableValueOf(make([]interface{}, l))
+	default:
+		d.terror(n, yaml_SEQ_TAG, out)
+		return false
+	}
+	et := out.Type().Elem()
+
+	j := 0
+	for i := 0; i < l; i++ {
+		e := reflect.New(et).Elem()
+		if ok := d.unmarshal(n.children[i], e); ok {
+			out.Index(j).Set(e)
+			j++
+		}
+	}
+	if out.Kind() != reflect.Array {
+		out.Set(out.Slice(0, j))
+	}
+	if iface.IsValid() {
+		iface.Set(out)
+	}
+	return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+	switch out.Kind() {
+	case reflect.Struct:
+		return d.mappingStruct(n, out)
+	case reflect.Slice:
+		return d.mappingSlice(n, out)
+	case reflect.Map:
+		// okay
+	case reflect.Interface:
+		if d.mapType.Kind() == reflect.Map {
+			iface := out
+			out = reflect.MakeMap(d.mapType)
+			iface.Set(out)
+		} else {
+			slicev := reflect.New(d.mapType).Elem()
+			if !d.mappingSlice(n, slicev) {
+				return false
+			}
+			out.Set(slicev)
+			return true
+		}
+	default:
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+	outt := out.Type()
+	kt := outt.Key()
+	et := outt.Elem()
+
+	mapType := d.mapType
+	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+		d.mapType = outt
+	}
+
+	if out.IsNil() {
+		out.Set(reflect.MakeMap(outt))
+	}
+	l := len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		k := reflect.New(kt).Elem()
+		if d.unmarshal(n.children[i], k) {
+			kkind := k.Kind()
+			if kkind == reflect.Interface {
+				kkind = k.Elem().Kind()
+			}
+			if kkind == reflect.Map || kkind == reflect.Slice {
+				failf("invalid map key: %#v", k.Interface())
+			}
+			e := reflect.New(et).Elem()
+			if d.unmarshal(n.children[i+1], e) {
+				d.setMapIndex(n.children[i+1], out, k, e)
+			}
+		}
+	}
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+	if d.strict && out.MapIndex(k) != zeroValue {
+		d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+		return
+	}
+	out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+	outt := out.Type()
+	if outt.Elem() != mapItemType {
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+
+	mapType := d.mapType
+	d.mapType = outt
+
+	var slice []MapItem
+	var l = len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		item := MapItem{}
+		k := reflect.ValueOf(&item.Key).Elem()
+		if d.unmarshal(n.children[i], k) {
+			v := reflect.ValueOf(&item.Value).Elem()
+			if d.unmarshal(n.children[i+1], v) {
+				slice = append(slice, item)
+			}
+		}
+	}
+	out.Set(reflect.ValueOf(slice))
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+	sinfo, err := getStructInfo(out.Type())
+	if err != nil {
+		panic(err)
+	}
+	name := settableValueOf("")
+	l := len(n.children)
+
+	var inlineMap reflect.Value
+	var elemType reflect.Type
+	if sinfo.InlineMap != -1 {
+		inlineMap = out.Field(sinfo.InlineMap)
+		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+		elemType = inlineMap.Type().Elem()
+	}
+
+	var doneFields []bool
+	if d.strict {
+		doneFields = make([]bool, len(sinfo.FieldsList))
+	}
+	for i := 0; i < l; i += 2 {
+		ni := n.children[i]
+		if isMerge(ni) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		if !d.unmarshal(ni, name) {
+			continue
+		}
+		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			if d.strict {
+				if doneFields[info.Id] {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+					continue
+				}
+				doneFields[info.Id] = true
+			}
+			var field reflect.Value
+			if info.Inline == nil {
+				field = out.Field(info.Num)
+			} else {
+				field = out.FieldByIndex(info.Inline)
+			}
+			d.unmarshal(n.children[i+1], field)
+		} else if sinfo.InlineMap != -1 {
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+			value := reflect.New(elemType).Elem()
+			d.unmarshal(n.children[i+1], value)
+			d.setMapIndex(n.children[i+1], inlineMap, name, value)
+		} else if d.strict {
+			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+		}
+	}
+	return true
+}
+
+func failWantMap() {
+	failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+	switch n.kind {
+	case mappingNode:
+		d.unmarshal(n, out)
+	case aliasNode:
+		an, ok := d.doc.anchors[n.value]
+		if ok && an.kind != mappingNode {
+			failWantMap()
+		}
+		d.unmarshal(n, out)
+	case sequenceNode:
+		// Step backwards as earlier nodes take precedence.
+		for i := len(n.children) - 1; i >= 0; i-- {
+			ni := n.children[i]
+			if ni.kind == aliasNode {
+				an, ok := d.doc.anchors[ni.value]
+				if ok && an.kind != mappingNode {
+					failWantMap()
+				}
+			} else if ni.kind != mappingNode {
+				failWantMap()
+			}
+			d.unmarshal(ni, out)
+		}
+	default:
+		failWantMap()
+	}
+}
+
+func isMerge(n *node) bool {
+	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 000000000..a1c2cc526
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) {
+		return yaml_emitter_flush(emitter)
+	}
+	return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.buffer[emitter.buffer_pos] = value
+	emitter.buffer_pos++
+	emitter.column++
+	return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	switch emitter.line_break {
+	case yaml_CR_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\r'
+		emitter.buffer_pos += 1
+	case yaml_LN_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\n'
+		emitter.buffer_pos += 1
+	case yaml_CRLN_BREAK:
+		emitter.buffer[emitter.buffer_pos+0] = '\r'
+		emitter.buffer[emitter.buffer_pos+1] = '\n'
+		emitter.buffer_pos += 2
+	default:
+		panic("unknown line break setting")
+	}
+	emitter.column = 0
+	emitter.line++
+	return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	p := emitter.buffer_pos
+	w := width(s[*i])
+	switch w {
+	case 4:
+		emitter.buffer[p+3] = s[*i+3]
+		fallthrough
+	case 3:
+		emitter.buffer[p+2] = s[*i+2]
+		fallthrough
+	case 2:
+		emitter.buffer[p+1] = s[*i+1]
+		fallthrough
+	case 1:
+		emitter.buffer[p+0] = s[*i+0]
+	default:
+		panic("unknown character width")
+	}
+	emitter.column++
+	emitter.buffer_pos += w
+	*i += w
+	return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+	for i := 0; i < len(s); {
+		if !write(emitter, s, &i) {
+			return false
+		}
+	}
+	return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if s[*i] == '\n' {
+		if !put_break(emitter) {
+			return false
+		}
+		*i++
+	} else {
+		if !write(emitter, s, i) {
+			return false
+		}
+		emitter.column = 0
+		emitter.line++
+	}
+	return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_EMITTER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.events = append(emitter.events, *event)
+	for !yaml_emitter_need_more_events(emitter) {
+		event := &emitter.events[emitter.events_head]
+		if !yaml_emitter_analyze_event(emitter, event) {
+			return false
+		}
+		if !yaml_emitter_state_machine(emitter, event) {
+			return false
+		}
+		yaml_event_delete(event)
+		emitter.events_head++
+	}
+	return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+//  - 1 event for DOCUMENT-START
+//  - 2 events for SEQUENCE-START
+//  - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+	if emitter.events_head == len(emitter.events) {
+		return true
+	}
+	var accumulate int
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_DOCUMENT_START_EVENT:
+		accumulate = 1
+		break
+	case yaml_SEQUENCE_START_EVENT:
+		accumulate = 2
+		break
+	case yaml_MAPPING_START_EVENT:
+		accumulate = 3
+		break
+	default:
+		return false
+	}
+	if len(emitter.events)-emitter.events_head > accumulate {
+		return false
+	}
+	var level int
+	for i := emitter.events_head; i < len(emitter.events); i++ {
+		switch emitter.events[i].typ {
+		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+			level++
+		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+			level--
+		}
+		if level == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+		}
+	}
+
+	// [Go] Do we actually need to copy this given garbage collection
+	// and the lack of deallocating destructors?
+	tag_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(tag_copy.handle, value.handle)
+	copy(tag_copy.prefix, value.prefix)
+	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+	return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+	emitter.indents = append(emitter.indents, emitter.indent)
+	if emitter.indent < 0 {
+		if flow {
+			emitter.indent = emitter.best_indent
+		} else {
+			emitter.indent = 0
+		}
+	} else if !indentless {
+		emitter.indent += emitter.best_indent
+	}
+	return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	switch emitter.state {
+	default:
+	case yaml_EMIT_STREAM_START_STATE:
+		return yaml_emitter_emit_stream_start(emitter, event)
+
+	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, true)
+
+	case yaml_EMIT_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, false)
+
+	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+		return yaml_emitter_emit_document_content(emitter, event)
+
+	case yaml_EMIT_DOCUMENT_END_STATE:
+		return yaml_emitter_emit_document_end(emitter, event)
+
+	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_END_STATE:
+		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+	}
+	panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_STREAM_START_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+	}
+	if emitter.encoding == yaml_ANY_ENCODING {
+		emitter.encoding = event.encoding
+		if emitter.encoding == yaml_ANY_ENCODING {
+			emitter.encoding = yaml_UTF8_ENCODING
+		}
+	}
+	if emitter.best_indent < 2 || emitter.best_indent > 9 {
+		emitter.best_indent = 2
+	}
+	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+		emitter.best_width = 80
+	}
+	if emitter.best_width < 0 {
+		emitter.best_width = 1<<31 - 1
+	}
+	if emitter.line_break == yaml_ANY_BREAK {
+		emitter.line_break = yaml_LN_BREAK
+	}
+
+	emitter.indent = -1
+	emitter.line = 0
+	emitter.column = 0
+	emitter.whitespace = true
+	emitter.indention = true
+
+	if emitter.encoding != yaml_UTF8_ENCODING {
+		if !yaml_emitter_write_bom(emitter) {
+			return false
+		}
+	}
+	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+	return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+	if event.typ == yaml_DOCUMENT_START_EVENT {
+
+		if event.version_directive != nil {
+			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(event.tag_directives); i++ {
+			tag_directive := &event.tag_directives[i]
+			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+				return false
+			}
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(default_tag_directives); i++ {
+			tag_directive := &default_tag_directives[i]
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+				return false
+			}
+		}
+
+		implicit := event.implicit
+		if !first || emitter.canonical {
+			implicit = false
+		}
+
+		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if event.version_directive != nil {
+			implicit = false
+			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if len(event.tag_directives) > 0 {
+			implicit = false
+			for i := 0; i < len(event.tag_directives); i++ {
+				tag_directive := &event.tag_directives[i]
+				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+					return false
+				}
+				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+					return false
+				}
+				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+					return false
+				}
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		if yaml_emitter_check_empty_document(emitter) {
+			implicit = false
+		}
+		if !implicit {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+				return false
+			}
+			if emitter.canonical {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+		return true
+	}
+
+	if event.typ == yaml_STREAM_END_EVENT {
+		if emitter.open_ended {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_flush(emitter) {
+			return false
+		}
+		emitter.state = yaml_EMIT_END_STATE
+		return true
+	}
+
+	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_DOCUMENT_END_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !event.implicit {
+		// [Go] Allocate the slice elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	if !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+	emitter.tag_directives = emitter.tag_directives[:0]
+	return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if emitter.canonical || emitter.column > emitter.best_width {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+			return false
+		}
+	}
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
+			return false
+		}
+	}
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+	root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+	emitter.root_context = root
+	emitter.sequence_context = sequence
+	emitter.mapping_context = mapping
+	emitter.simple_key_context = simple_key
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		return yaml_emitter_emit_alias(emitter, event)
+	case yaml_SCALAR_EVENT:
+		return yaml_emitter_emit_scalar(emitter, event)
+	case yaml_SEQUENCE_START_EVENT:
+		return yaml_emitter_emit_sequence_start(emitter, event)
+	case yaml_MAPPING_START_EVENT:
+		return yaml_emitter_emit_mapping_start(emitter, event)
+	default:
+		return yaml_emitter_set_emitter_error(emitter,
+			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+	}
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_select_scalar_style(emitter, event) {
+		return false
+	}
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if !yaml_emitter_increase_indent(emitter, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_scalar(emitter) {
+		return false
+	}
+	emitter.indent = emitter.indents[len(emitter.indents)-1]
+	emitter.indents = emitter.indents[:len(emitter.indents)-1]
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+		yaml_emitter_check_empty_sequence(emitter) {
+		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+	}
+	return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+		yaml_emitter_check_empty_mapping(emitter) {
+		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+	}
+	return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+	return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+	length := 0
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_ALIAS_EVENT:
+		length += len(emitter.anchor_data.anchor)
+	case yaml_SCALAR_EVENT:
+		if emitter.scalar_data.multiline {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix) +
+			len(emitter.scalar_data.value)
+	case yaml_SEQUENCE_START_EVENT:
+		if !yaml_emitter_check_empty_sequence(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	case yaml_MAPPING_START_EVENT:
+		if !yaml_emitter_check_empty_mapping(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	default:
+		return false
+	}
+	return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+	if no_tag && !event.implicit && !event.quoted_implicit {
+		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+	}
+
+	style := event.scalar_style()
+	if style == yaml_ANY_SCALAR_STYLE {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	if emitter.canonical {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	if emitter.simple_key_context && emitter.scalar_data.multiline {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+
+	if style == yaml_PLAIN_SCALAR_STYLE {
+		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if no_tag && !event.implicit {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+		if !emitter.scalar_data.single_quoted_allowed {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+
+	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+		emitter.tag_data.handle = []byte{'!'}
+	}
+	emitter.scalar_data.style = style
+	return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+	if emitter.anchor_data.anchor == nil {
+		return true
+	}
+	c := []byte{'&'}
+	if emitter.anchor_data.alias {
+		c[0] = '*'
+	}
+	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+		return false
+	}
+	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+		return true
+	}
+	if len(emitter.tag_data.handle) > 0 {
+		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+			return false
+		}
+		if len(emitter.tag_data.suffix) > 0 {
+			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+				return false
+			}
+		}
+	} else {
+		// [Go] Allocate these slices elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+	switch emitter.scalar_data.style {
+	case yaml_PLAIN_SCALAR_STYLE:
+		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_LITERAL_SCALAR_STYLE:
+		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+	case yaml_FOLDED_SCALAR_STYLE:
+		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+	}
+	panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+	if version_directive.major != 1 || version_directive.minor != 1 {
+		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+	}
+	return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+	handle := tag_directive.handle
+	prefix := tag_directive.prefix
+	if len(handle) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+	}
+	if handle[0] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+	}
+	if handle[len(handle)-1] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+	}
+	for i := 1; i < len(handle)-1; i += width(handle[i]) {
+		if !is_alpha(handle, i) {
+			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+		}
+	}
+	if len(prefix) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+	}
+	return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+	if len(anchor) == 0 {
+		problem := "anchor value must not be empty"
+		if alias {
+			problem = "alias value must not be empty"
+		}
+		return yaml_emitter_set_emitter_error(emitter, problem)
+	}
+	for i := 0; i < len(anchor); i += width(anchor[i]) {
+		if !is_alpha(anchor, i) {
+			problem := "anchor value must contain alphanumerical characters only"
+			if alias {
+				problem = "alias value must contain alphanumerical characters only"
+			}
+			return yaml_emitter_set_emitter_error(emitter, problem)
+		}
+	}
+	emitter.anchor_data.anchor = anchor
+	emitter.anchor_data.alias = alias
+	return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+	if len(tag) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+	}
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		tag_directive := &emitter.tag_directives[i]
+		if bytes.HasPrefix(tag, tag_directive.prefix) {
+			emitter.tag_data.handle = tag_directive.handle
+			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+			return true
+		}
+	}
+	emitter.tag_data.suffix = tag
+	return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	var (
+		block_indicators   = false
+		flow_indicators    = false
+		line_breaks        = false
+		special_characters = false
+
+		leading_space  = false
+		leading_break  = false
+		trailing_space = false
+		trailing_break = false
+		break_space    = false
+		space_break    = false
+
+		preceded_by_whitespace = false
+		followed_by_whitespace = false
+		previous_space         = false
+		previous_break         = false
+	)
+
+	emitter.scalar_data.value = value
+
+	if len(value) == 0 {
+		emitter.scalar_data.multiline = false
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = true
+		emitter.scalar_data.single_quoted_allowed = true
+		emitter.scalar_data.block_allowed = false
+		return true
+	}
+
+	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+		block_indicators = true
+		flow_indicators = true
+	}
+
+	preceded_by_whitespace = true
+	for i, w := 0, 0; i < len(value); i += w {
+		w = width(value[i])
+		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+		if i == 0 {
+			switch value[i] {
+			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+				flow_indicators = true
+				block_indicators = true
+			case '?', ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '-':
+				if followed_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		} else {
+			switch value[i] {
+			case ',', '?', '[', ']', '{', '}':
+				flow_indicators = true
+			case ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '#':
+				if preceded_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		}
+
+		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+			special_characters = true
+		}
+		if is_space(value, i) {
+			if i == 0 {
+				leading_space = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_space = true
+			}
+			if previous_break {
+				break_space = true
+			}
+			previous_space = true
+			previous_break = false
+		} else if is_break(value, i) {
+			line_breaks = true
+			if i == 0 {
+				leading_break = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_break = true
+			}
+			if previous_space {
+				space_break = true
+			}
+			previous_space = false
+			previous_break = true
+		} else {
+			previous_space = false
+			previous_break = false
+		}
+
+		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+		preceded_by_whitespace = is_blankz(value, i)
+	}
+
+	emitter.scalar_data.multiline = line_breaks
+	emitter.scalar_data.flow_plain_allowed = true
+	emitter.scalar_data.block_plain_allowed = true
+	emitter.scalar_data.single_quoted_allowed = true
+	emitter.scalar_data.block_allowed = true
+
+	if leading_space || leading_break || trailing_space || trailing_break {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if trailing_space {
+		emitter.scalar_data.block_allowed = false
+	}
+	if break_space {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || special_characters {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+		emitter.scalar_data.block_allowed = false
+	}
+	if line_breaks {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if flow_indicators {
+		emitter.scalar_data.flow_plain_allowed = false
+	}
+	if block_indicators {
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	emitter.anchor_data.anchor = nil
+	emitter.tag_data.handle = nil
+	emitter.tag_data.suffix = nil
+	emitter.scalar_data.value = nil
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+			return false
+		}
+
+	case yaml_SCALAR_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+		if !yaml_emitter_analyze_scalar(emitter, event.value) {
+			return false
+		}
+
+	case yaml_SEQUENCE_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+
+	case yaml_MAPPING_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+	if !flush(emitter) {
+		return false
+	}
+	pos := emitter.buffer_pos
+	emitter.buffer[pos+0] = '\xEF'
+	emitter.buffer[pos+1] = '\xBB'
+	emitter.buffer[pos+2] = '\xBF'
+	emitter.buffer_pos += 3
+	return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+	indent := emitter.indent
+	if indent < 0 {
+		indent = 0
+	}
+	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+		if !put_break(emitter) {
+			return false
+		}
+	}
+	for emitter.column < indent {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	emitter.whitespace = true
+	emitter.indention = true
+	return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, indicator) {
+		return false
+	}
+	emitter.whitespace = is_whitespace
+	emitter.indention = (emitter.indention && is_indention)
+	emitter.open_ended = false
+	return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	for i := 0; i < len(value); {
+		var must_write bool
+		switch value[i] {
+		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+			must_write = true
+		default:
+			must_write = is_alpha(value, i)
+		}
+		if must_write {
+			if !write(emitter, value, &i) {
+				return false
+			}
+		} else {
+			w := width(value[i])
+			for k := 0; k < w; k++ {
+				octet := value[i]
+				i++
+				if !put(emitter, '%') {
+					return false
+				}
+
+				c := octet >> 4
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+
+				c = octet & 0x0f
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+			}
+		}
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+
+	emitter.whitespace = false
+	emitter.indention = false
+	if emitter.root_context {
+		emitter.open_ended = true
+	}
+
+	return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+		return false
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if value[i] == '\'' {
+				if !put(emitter, '\'') {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	spaces := false
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+		return false
+	}
+
+	for i := 0; i < len(value); {
+		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+			is_bom(value, i) || is_break(value, i) ||
+			value[i] == '"' || value[i] == '\\' {
+
+			octet := value[i]
+
+			var w int
+			var v rune
+			switch {
+			case octet&0x80 == 0x00:
+				w, v = 1, rune(octet&0x7F)
+			case octet&0xE0 == 0xC0:
+				w, v = 2, rune(octet&0x1F)
+			case octet&0xF0 == 0xE0:
+				w, v = 3, rune(octet&0x0F)
+			case octet&0xF8 == 0xF0:
+				w, v = 4, rune(octet&0x07)
+			}
+			for k := 1; k < w; k++ {
+				octet = value[i+k]
+				v = (v << 6) + (rune(octet) & 0x3F)
+			}
+			i += w
+
+			if !put(emitter, '\\') {
+				return false
+			}
+
+			var ok bool
+			switch v {
+			case 0x00:
+				ok = put(emitter, '0')
+			case 0x07:
+				ok = put(emitter, 'a')
+			case 0x08:
+				ok = put(emitter, 'b')
+			case 0x09:
+				ok = put(emitter, 't')
+			case 0x0A:
+				ok = put(emitter, 'n')
+			case 0x0b:
+				ok = put(emitter, 'v')
+			case 0x0c:
+				ok = put(emitter, 'f')
+			case 0x0d:
+				ok = put(emitter, 'r')
+			case 0x1b:
+				ok = put(emitter, 'e')
+			case 0x22:
+				ok = put(emitter, '"')
+			case 0x5c:
+				ok = put(emitter, '\\')
+			case 0x85:
+				ok = put(emitter, 'N')
+			case 0xA0:
+				ok = put(emitter, '_')
+			case 0x2028:
+				ok = put(emitter, 'L')
+			case 0x2029:
+				ok = put(emitter, 'P')
+			default:
+				if v <= 0xFF {
+					ok = put(emitter, 'x')
+					w = 2
+				} else if v <= 0xFFFF {
+					ok = put(emitter, 'u')
+					w = 4
+				} else {
+					ok = put(emitter, 'U')
+					w = 8
+				}
+				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+					digit := byte((v >> uint(k)) & 0x0F)
+					if digit < 10 {
+						ok = put(emitter, digit+'0')
+					} else {
+						ok = put(emitter, digit+'A'-10)
+					}
+				}
+			}
+			if !ok {
+				return false
+			}
+			spaces = false
+		} else if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				if is_space(value, i+1) {
+					if !put(emitter, '\\') {
+						return false
+					}
+				}
+				i += width(value[i])
+			} else if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = true
+		} else {
+			if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+	if is_space(value, 0) || is_break(value, 0) {
+		indent_hint := []byte{'0' + byte(emitter.best_indent)}
+		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+			return false
+		}
+	}
+
+	emitter.open_ended = false
+
+	var chomp_hint [1]byte
+	if len(value) == 0 {
+		chomp_hint[0] = '-'
+	} else {
+		i := len(value) - 1
+		for value[i]&0xC0 == 0x80 {
+			i--
+		}
+		if !is_break(value, i) {
+			chomp_hint[0] = '-'
+		} else if i == 0 {
+			chomp_hint[0] = '+'
+			emitter.open_ended = true
+		} else {
+			i--
+			for value[i]&0xC0 == 0x80 {
+				i--
+			}
+			if is_break(value, i) {
+				chomp_hint[0] = '+'
+				emitter.open_ended = true
+			}
+		}
+	}
+	if chomp_hint[0] != 0 {
+		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+	breaks := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+
+	return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+
+	breaks := true
+	leading_spaces := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !breaks && !leading_spaces && value[i] == '\n' {
+				k := 0
+				for is_break(value, k) {
+					k += width(value[k])
+				}
+				if !is_blankz(value, k) {
+					if !put_break(emitter) {
+						return false
+					}
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				leading_spaces = is_blank(value, i)
+			}
+			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 000000000..a14435e82
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,362 @@
+package yaml
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+type encoder struct {
+	emitter yaml_emitter_t
+	event   yaml_event_t
+	out     []byte
+	flow    bool
+	// doneInit holds whether the initial stream_start_event has been
+	// emitted.
+	doneInit bool
+}
+
+func newEncoder() *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_string(&e.emitter, &e.out)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_writer(&e.emitter, w)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func (e *encoder) init() {
+	if e.doneInit {
+		return
+	}
+	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+	e.emit()
+	e.doneInit = true
+}
+
+func (e *encoder) finish() {
+	e.emitter.open_ended = false
+	yaml_stream_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) destroy() {
+	yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+	// This will internally delete the e.event value.
+	e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+	if !ok {
+		msg := e.emitter.problem
+		if msg == "" {
+			msg = "unknown problem generating YAML content"
+		}
+		failf("%s", msg)
+	}
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+	e.init()
+	yaml_document_start_event_initialize(&e.event, nil, nil, true)
+	e.emit()
+	e.marshal(tag, in)
+	yaml_document_end_event_initialize(&e.event, true)
+	e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+		e.nilv()
+		return
+	}
+	iface := in.Interface()
+	switch m := iface.(type) {
+	case time.Time, *time.Time:
+		// Although time.Time implements TextMarshaler,
+		// we don't want to treat it as a string for YAML
+		// purposes because YAML has special support for
+		// timestamps.
+	case Marshaler:
+		v, err := m.MarshalYAML()
+		if err != nil {
+			fail(err)
+		}
+		if v == nil {
+			e.nilv()
+			return
+		}
+		in = reflect.ValueOf(v)
+	case encoding.TextMarshaler:
+		text, err := m.MarshalText()
+		if err != nil {
+			fail(err)
+		}
+		in = reflect.ValueOf(string(text))
+	case nil:
+		e.nilv()
+		return
+	}
+	switch in.Kind() {
+	case reflect.Interface:
+		e.marshal(tag, in.Elem())
+	case reflect.Map:
+		e.mapv(tag, in)
+	case reflect.Ptr:
+		if in.Type() == ptrTimeType {
+			e.timev(tag, in.Elem())
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Struct:
+		if in.Type() == timeType {
+			e.timev(tag, in)
+		} else {
+			e.structv(tag, in)
+		}
+	case reflect.Slice, reflect.Array:
+		if in.Type().Elem() == mapItemType {
+			e.itemsv(tag, in)
+		} else {
+			e.slicev(tag, in)
+		}
+	case reflect.String:
+		e.stringv(tag, in)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if in.Type() == durationType {
+			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+		} else {
+			e.intv(tag, in)
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		e.uintv(tag, in)
+	case reflect.Float32, reflect.Float64:
+		e.floatv(tag, in)
+	case reflect.Bool:
+		e.boolv(tag, in)
+	default:
+		panic("cannot marshal type: " + in.Type().String())
+	}
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		keys := keyList(in.MapKeys())
+		sort.Sort(keys)
+		for _, k := range keys {
+			e.marshal("", k)
+			e.marshal("", in.MapIndex(k))
+		}
+	})
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+		for _, item := range slice {
+			e.marshal("", reflect.ValueOf(item.Key))
+			e.marshal("", reflect.ValueOf(item.Value))
+		}
+	})
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+	sinfo, err := getStructInfo(in.Type())
+	if err != nil {
+		panic(err)
+	}
+	e.mappingv(tag, func() {
+		for _, info := range sinfo.FieldsList {
+			var value reflect.Value
+			if info.Inline == nil {
+				value = in.Field(info.Num)
+			} else {
+				value = in.FieldByIndex(info.Inline)
+			}
+			if info.OmitEmpty && isZero(value) {
+				continue
+			}
+			e.marshal("", reflect.ValueOf(info.Key))
+			e.flow = info.Flow
+			e.marshal("", value)
+		}
+		if sinfo.InlineMap >= 0 {
+			m := in.Field(sinfo.InlineMap)
+			if m.Len() > 0 {
+				e.flow = false
+				keys := keyList(m.MapKeys())
+				sort.Sort(keys)
+				for _, k := range keys {
+					if _, found := sinfo.FieldsMap[k.String()]; found {
+						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+					}
+					e.marshal("", k)
+					e.flow = false
+					e.marshal("", m.MapIndex(k))
+				}
+			}
+		}
+	})
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+	implicit := tag == ""
+	style := yaml_BLOCK_MAPPING_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_MAPPING_STYLE
+	}
+	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+	e.emit()
+	f()
+	yaml_mapping_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+	implicit := tag == ""
+	style := yaml_BLOCK_SEQUENCE_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_SEQUENCE_STYLE
+	}
+	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	n := in.Len()
+	for i := 0; i < n; i++ {
+		e.marshal("", in.Index(i))
+	}
+	e.must(yaml_sequence_end_event_initialize(&e.event))
+	e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+	// Fast path.
+	if s == "" {
+		return false
+	}
+	c := s[0]
+	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+		return false
+	}
+	// Do the full match.
+	return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+	var style yaml_scalar_style_t
+	s := in.String()
+	canUsePlain := true
+	switch {
+	case !utf8.ValidString(s):
+		if tag == yaml_BINARY_TAG {
+			failf("explicitly tagged !!binary data must be base64-encoded")
+		}
+		if tag != "" {
+			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+		}
+		// It can't be encoded directly as YAML so use a binary tag
+		// and encode it as base64.
+		tag = yaml_BINARY_TAG
+		s = encodeBase64(s)
+	case tag == "":
+		// Check to see if it would resolve to a specific
+		// tag when encoded unquoted. If it doesn't,
+		// there's no need to quote it.
+		rtag, _ := resolve("", s)
+		canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+	}
+	// Note: it's possible for user code to emit invalid YAML
+	// if they explicitly specify a tag and a string containing
+	// text that's incompatible with that tag.
+	switch {
+	case strings.Contains(s, "\n"):
+		style = yaml_LITERAL_SCALAR_STYLE
+	case canUsePlain:
+		style = yaml_PLAIN_SCALAR_STYLE
+	default:
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+	var s string
+	if in.Bool() {
+		s = "true"
+	} else {
+		s = "false"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+	s := strconv.FormatInt(in.Int(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+	s := strconv.FormatUint(in.Uint(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+	t := in.Interface().(time.Time)
+	s := t.Format(time.RFC3339Nano)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+	// Issue #352: When formatting, use the precision of the underlying value
+	precision := 64
+	if in.Kind() == reflect.Float32 {
+		precision = 32
+	}
+
+	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+	switch s {
+	case "+Inf":
+		s = ".inf"
+	case "-Inf":
+		s = "-.inf"
+	case "NaN":
+		s = ".nan"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+	implicit := tag == ""
+	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+	e.emit()
+}
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 000000000..81d05dfe5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document    ::= block_node DOCUMENT-END*
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          | properties (block_content | indentless_block_sequence)?
+//                          | block_content
+//                          | indentless_block_sequence
+// block_node           ::= ALIAS
+//                          | properties block_content?
+//                          | block_content
+// flow_node            ::= ALIAS
+//                          | properties flow_content?
+//                          | flow_content
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content        ::= block_collection | flow_collection | SCALAR
+// flow_content         ::= flow_collection | SCALAR
+// block_collection     ::= block_sequence | block_mapping
+// flow_collection      ::= flow_sequence | flow_mapping
+// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                          BLOCK-END
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                          flow_sequence_entry?
+//                          FLOW-SEQUENCE-END
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                          flow_mapping_entry?
+//                          FLOW-MAPPING-END
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+		return &parser.tokens[parser.tokens_head]
+	}
+	return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+	parser.token_available = false
+	parser.tokens_parsed++
+	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+	parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+	// Erase the event object.
+	*event = yaml_event_t{}
+
+	// No events after the end of the stream or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+		return true
+	}
+
+	// Generate the next event.
+	return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+	//trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+	switch parser.state {
+	case yaml_PARSE_STREAM_START_STATE:
+		return yaml_parser_parse_stream_start(parser, event)
+
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, true)
+
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, false)
+
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return yaml_parser_parse_document_content(parser, event)
+
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return yaml_parser_parse_document_end(parser, event)
+
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, true, false)
+
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return yaml_parser_parse_node(parser, event, true, true)
+
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, false, false)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_block_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+	default:
+		panic("invalid parser state")
+	}
+}
+
+// Parse the production:
+// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//              ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_STREAM_START_TOKEN {
+		return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+	}
+	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_STREAM_START_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+		encoding:   token.encoding,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                          *
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                          *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	// Parse extra document end indicators.
+	if !implicit {
+		for token.typ == yaml_DOCUMENT_END_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+		token.typ != yaml_DOCUMENT_START_TOKEN &&
+		token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an implicit document.
+		if !yaml_parser_process_directives(parser, nil, nil) {
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+		*event = yaml_event_t{
+			typ:        yaml_DOCUMENT_START_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+	} else if token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an explicit document.
+		var version_directive *yaml_version_directive_t
+		var tag_directives []yaml_tag_directive_t
+		start_mark := token.start_mark
+		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+			return false
+		}
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_DOCUMENT_START_TOKEN {
+			yaml_parser_set_parser_error(parser,
+				"did not find expected ", token.start_mark)
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+		end_mark := token.end_mark
+
+		*event = yaml_event_t{
+			typ:               yaml_DOCUMENT_START_EVENT,
+			start_mark:        start_mark,
+			end_mark:          end_mark,
+			version_directive: version_directive,
+			tag_directives:    tag_directives,
+			implicit:          false,
+		}
+		skip_token(parser)
+
+	} else {
+		// Parse the stream end.
+		parser.state = yaml_PARSE_END_STATE
+		*event = yaml_event_t{
+			typ:        yaml_STREAM_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+	}
+
+	return true
+}
+
+// Parse the productions:
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                                                    ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+		token.typ == yaml_DOCUMENT_START_TOKEN ||
+		token.typ == yaml_DOCUMENT_END_TOKEN ||
+		token.typ == yaml_STREAM_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		return yaml_parser_process_empty_scalar(parser, event,
+			token.start_mark)
+	}
+	return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                                     *************
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	implicit := true
+	if token.typ == yaml_DOCUMENT_END_TOKEN {
+		end_mark = token.end_mark
+		skip_token(parser)
+		implicit = false
+	}
+
+	parser.tag_directives = parser.tag_directives[:0]
+
+	parser.state = yaml_PARSE_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_DOCUMENT_END_EVENT,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		implicit:   implicit,
+	}
+	return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          *****
+//                          | properties (block_content | indentless_block_sequence)?
+//                            **********  *
+//                          | block_content | indentless_block_sequence
+//                            *
+// block_node           ::= ALIAS
+//                          *****
+//                          | properties block_content?
+//                            ********** *
+//                          | block_content
+//                            *
+// flow_node            ::= ALIAS
+//                          *****
+//                          | properties flow_content?
+//                            ********** *
+//                          | flow_content
+//                            *
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+//                          *************************
+// block_content        ::= block_collection | flow_collection | SCALAR
+//                                                               ******
+// flow_content         ::= flow_collection | SCALAR
+//                                            ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_ALIAS_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		*event = yaml_event_t{
+			typ:        yaml_ALIAS_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+			anchor:     token.value,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	var tag_token bool
+	var tag_handle, tag_suffix, anchor []byte
+	var tag_mark yaml_mark_t
+	if token.typ == yaml_ANCHOR_TOKEN {
+		anchor = token.value
+		start_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_TAG_TOKEN {
+			tag_token = true
+			tag_handle = token.value
+			tag_suffix = token.suffix
+			tag_mark = token.start_mark
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	} else if token.typ == yaml_TAG_TOKEN {
+		tag_token = true
+		tag_handle = token.value
+		tag_suffix = token.suffix
+		start_mark = token.start_mark
+		tag_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_ANCHOR_TOKEN {
+			anchor = token.value
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	var tag []byte
+	if tag_token {
+		if len(tag_handle) == 0 {
+			tag = tag_suffix
+			tag_suffix = nil
+		} else {
+			for i := range parser.tag_directives {
+				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+					tag = append(tag, tag_suffix...)
+					break
+				}
+			}
+			if len(tag) == 0 {
+				yaml_parser_set_parser_error_context(parser,
+					"while parsing a node", start_mark,
+					"found undefined tag handle", tag_mark)
+				return false
+			}
+		}
+	}
+
+	implicit := len(tag) == 0
+	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_SCALAR_TOKEN {
+		var plain_implicit, quoted_implicit bool
+		end_mark = token.end_mark
+		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+			plain_implicit = true
+		} else if len(tag) == 0 {
+			quoted_implicit = true
+		}
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			value:           token.value,
+			implicit:        plain_implicit,
+			quoted_implicit: quoted_implicit,
+			style:           yaml_style_t(token.style),
+		}
+		skip_token(parser)
+		return true
+	}
+	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+		// [Go] Some of the events below can be merged as they differ only on style.
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+		}
+		return true
+	}
+	if len(anchor) > 0 || len(tag) > 0 {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			implicit:        implicit,
+			quoted_implicit: false,
+			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+		}
+		return true
+	}
+
+	context := "while parsing a flow node"
+	if block {
+		context = "while parsing a block node"
+	}
+	yaml_parser_set_parser_error_context(parser, context, start_mark,
+		"did not find expected node content", token.start_mark)
+	return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//                    ********************  *********** *             *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	}
+	if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block collection", context_mark,
+		"did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+//                           *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+			token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		}
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          *******************
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                            *** *
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//
+//                          BLOCK-END
+//                          *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_KEY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	} else if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block mapping", context_mark,
+		"did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//
+//                          ((KEY block_node_or_indentless_sequence?)?
+//
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                           ***** *
+//                          BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		}
+		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          *******************
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                           *                   **********
+//                          flow_sequence_entry?
+//                          *
+//                          FLOW-SEQUENCE-END
+//                          *****************
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow sequence", context_mark,
+					"did not find expected ',' or ']'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+			*event = yaml_event_t{
+				typ:        yaml_MAPPING_START_EVENT,
+				start_mark: token.start_mark,
+				end_mark:   token.end_mark,
+				implicit:   true,
+				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+			}
+			skip_token(parser)
+			return true
+		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+
+	skip_token(parser)
+	return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                      *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_VALUE_TOKEN &&
+		token.typ != yaml_FLOW_ENTRY_TOKEN &&
+		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+		return yaml_parser_parse_node(parser, event, false, false)
+	}
+	mark := token.end_mark
+	skip_token(parser)
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+	return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                      ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token := peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                                      *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          ******************
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                           *                  **********
+//                          flow_mapping_entry?
+//                          ******************
+//                          FLOW-MAPPING-END
+//                          ****************
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *           *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow mapping", context_mark,
+					"did not find expected ',' or '}'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+			if token.typ != yaml_VALUE_TOKEN &&
+				token.typ != yaml_FLOW_ENTRY_TOKEN &&
+				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+				return yaml_parser_parse_node(parser, event, false, false)
+			} else {
+				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+			}
+		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                   *                  ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if empty {
+		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+	*event = yaml_event_t{
+		typ:        yaml_SCALAR_EVENT,
+		start_mark: mark,
+		end_mark:   mark,
+		value:      nil, // Empty
+		implicit:   true,
+		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+	}
+	return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+	{[]byte("!"), []byte("!")},
+	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+	version_directive_ref **yaml_version_directive_t,
+	tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+	var version_directive *yaml_version_directive_t
+	var tag_directives []yaml_tag_directive_t
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+			if version_directive != nil {
+				yaml_parser_set_parser_error(parser,
+					"found duplicate %YAML directive", token.start_mark)
+				return false
+			}
+			if token.major != 1 || token.minor != 1 {
+				yaml_parser_set_parser_error(parser,
+					"found incompatible YAML document", token.start_mark)
+				return false
+			}
+			version_directive = &yaml_version_directive_t{
+				major: token.major,
+				minor: token.minor,
+			}
+		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+			value := yaml_tag_directive_t{
+				handle: token.value,
+				prefix: token.prefix,
+			}
+			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+				return false
+			}
+			tag_directives = append(tag_directives, value)
+		}
+
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+	}
+
+	for i := range default_tag_directives {
+		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+			return false
+		}
+	}
+
+	if version_directive_ref != nil {
+		*version_directive_ref = version_directive
+	}
+	if tag_directives_ref != nil {
+		*tag_directives_ref = tag_directives
+	}
+	return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+	for i := range parser.tag_directives {
+		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+		}
+	}
+
+	// [Go] I suspect the copy is unnecessary. This was likely done
+	// because there was no way to track ownership of the data.
+	value_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(value_copy.handle, value.handle)
+	copy(value_copy.prefix, value.prefix)
+	parser.tag_directives = append(parser.tag_directives, value_copy)
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 000000000..7c1f5fac3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+	"io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+	parser.error = yaml_READER_ERROR
+	parser.problem = problem
+	parser.problem_offset = offset
+	parser.problem_value = value
+	return false
+}
+
+// Byte order marks.
+const (
+	bom_UTF8    = "\xef\xbb\xbf"
+	bom_UTF16LE = "\xff\xfe"
+	bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+	// Ensure that we had enough bytes in the raw buffer.
+	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+		if !yaml_parser_update_raw_buffer(parser) {
+			return false
+		}
+	}
+
+	// Determine the encoding.
+	buf := parser.raw_buffer
+	pos := parser.raw_buffer_pos
+	avail := len(buf) - pos
+	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+		parser.encoding = yaml_UTF16LE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+		parser.encoding = yaml_UTF16BE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+		parser.encoding = yaml_UTF8_ENCODING
+		parser.raw_buffer_pos += 3
+		parser.offset += 3
+	} else {
+		parser.encoding = yaml_UTF8_ENCODING
+	}
+	return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+	size_read := 0
+
+	// Return if the raw buffer is full.
+	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+		return true
+	}
+
+	// Return on EOF.
+	if parser.eof {
+		return true
+	}
+
+	// Move the remaining bytes in the raw buffer to the beginning.
+	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+	}
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+	parser.raw_buffer_pos = 0
+
+	// Call the read handler to fill the buffer.
+	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+	if err == io.EOF {
+		parser.eof = true
+	} else if err != nil {
+		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+	}
+	return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+	if parser.read_handler == nil {
+		panic("read handler must be set")
+	}
+
+	// [Go] This function was changed to guarantee the requested length size at EOF.
+	// The fact we need to do this is pretty awful, but the description above implies
+	// for that to be the case, and there are tests 
+
+	// If the EOF flag is set and the raw buffer is empty, do nothing.
+	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+		// [Go] ACTUALLY! Read the documentation of this function above.
+		// This is just broken. To return true, we need to have the
+		// given length in the buffer. Not doing that means every single
+		// check that calls this function to make sure the buffer has a
+		// given length is Go) panicking; or C) accessing invalid memory.
+		//return true
+	}
+
+	// Return if the buffer contains enough characters.
+	if parser.unread >= length {
+		return true
+	}
+
+	// Determine the input encoding if it is not known yet.
+	if parser.encoding == yaml_ANY_ENCODING {
+		if !yaml_parser_determine_encoding(parser) {
+			return false
+		}
+	}
+
+	// Move the unread characters to the beginning of the buffer.
+	buffer_len := len(parser.buffer)
+	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+		buffer_len -= parser.buffer_pos
+		parser.buffer_pos = 0
+	} else if parser.buffer_pos == buffer_len {
+		buffer_len = 0
+		parser.buffer_pos = 0
+	}
+
+	// Open the whole buffer for writing, and cut it before returning.
+	parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+	// Fill the buffer until it has enough characters.
+	first := true
+	for parser.unread < length {
+
+		// Fill the raw buffer if necessary.
+		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+			if !yaml_parser_update_raw_buffer(parser) {
+				parser.buffer = parser.buffer[:buffer_len]
+				return false
+			}
+		}
+		first = false
+
+		// Decode the raw buffer.
+	inner:
+		for parser.raw_buffer_pos != len(parser.raw_buffer) {
+			var value rune
+			var width int
+
+			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+			// Decode the next character.
+			switch parser.encoding {
+			case yaml_UTF8_ENCODING:
+				// Decode a UTF-8 character.  Check RFC 3629
+				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+				//
+				// The following table (taken from the RFC) is used for
+				// decoding.
+				//
+				//    Char. number range |        UTF-8 octet sequence
+				//      (hexadecimal)    |              (binary)
+				//   --------------------+------------------------------------
+				//   0000 0000-0000 007F | 0xxxxxxx
+				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				//
+				// Additionally, the characters in the range 0xD800-0xDFFF
+				// are prohibited as they are reserved for use with UTF-16
+				// surrogate pairs.
+
+				// Determine the length of the UTF-8 sequence.
+				octet := parser.raw_buffer[parser.raw_buffer_pos]
+				switch {
+				case octet&0x80 == 0x00:
+					width = 1
+				case octet&0xE0 == 0xC0:
+					width = 2
+				case octet&0xF0 == 0xE0:
+					width = 3
+				case octet&0xF8 == 0xF0:
+					width = 4
+				default:
+					// The leading octet is invalid.
+					return yaml_parser_set_reader_error(parser,
+						"invalid leading UTF-8 octet",
+						parser.offset, int(octet))
+				}
+
+				// Check if the raw buffer contains an incomplete character.
+				if width > raw_unread {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-8 octet sequence",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Decode the leading octet.
+				switch {
+				case octet&0x80 == 0x00:
+					value = rune(octet & 0x7F)
+				case octet&0xE0 == 0xC0:
+					value = rune(octet & 0x1F)
+				case octet&0xF0 == 0xE0:
+					value = rune(octet & 0x0F)
+				case octet&0xF8 == 0xF0:
+					value = rune(octet & 0x07)
+				default:
+					value = 0
+				}
+
+				// Check and decode the trailing octets.
+				for k := 1; k < width; k++ {
+					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+					// Check if the octet is valid.
+					if (octet & 0xC0) != 0x80 {
+						return yaml_parser_set_reader_error(parser,
+							"invalid trailing UTF-8 octet",
+							parser.offset+k, int(octet))
+					}
+
+					// Decode the octet.
+					value = (value << 6) + rune(octet&0x3F)
+				}
+
+				// Check the length of the sequence against the value.
+				switch {
+				case width == 1:
+				case width == 2 && value >= 0x80:
+				case width == 3 && value >= 0x800:
+				case width == 4 && value >= 0x10000:
+				default:
+					return yaml_parser_set_reader_error(parser,
+						"invalid length of a UTF-8 sequence",
+						parser.offset, -1)
+				}
+
+				// Check the range of the value.
+				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+					return yaml_parser_set_reader_error(parser,
+						"invalid Unicode character",
+						parser.offset, int(value))
+				}
+
+			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+				var low, high int
+				if parser.encoding == yaml_UTF16LE_ENCODING {
+					low, high = 0, 1
+				} else {
+					low, high = 1, 0
+				}
+
+				// The UTF-16 encoding is not as simple as one might
+				// naively think.  Check RFC 2781
+				// (http://www.ietf.org/rfc/rfc2781.txt).
+				//
+				// Normally, two subsequent bytes describe a Unicode
+				// character.  However a special technique (called a
+				// surrogate pair) is used for specifying character
+				// values larger than 0xFFFF.
+				//
+				// A surrogate pair consists of two pseudo-characters:
+				//      high surrogate area (0xD800-0xDBFF)
+				//      low surrogate area (0xDC00-0xDFFF)
+				//
+				// The following formulas are used for decoding
+				// and encoding characters using surrogate pairs:
+				//
+				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
+				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
+				//  W1 = 110110yyyyyyyyyy
+				//  W2 = 110111xxxxxxxxxx
+				//
+				// where U is the character value, W1 is the high surrogate
+				// area, W2 is the low surrogate area.
+
+				// Check for incomplete UTF-16 character.
+				if raw_unread < 2 {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-16 character",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Get the character.
+				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+				// Check for unexpected low surrogate area.
+				if value&0xFC00 == 0xDC00 {
+					return yaml_parser_set_reader_error(parser,
+						"unexpected low surrogate area",
+						parser.offset, int(value))
+				}
+
+				// Check for a high surrogate area.
+				if value&0xFC00 == 0xD800 {
+					width = 4
+
+					// Check for incomplete surrogate pair.
+					if raw_unread < 4 {
+						if parser.eof {
+							return yaml_parser_set_reader_error(parser,
+								"incomplete UTF-16 surrogate pair",
+								parser.offset, -1)
+						}
+						break inner
+					}
+
+					// Get the next character.
+					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+					// Check for a low surrogate area.
+					if value2&0xFC00 != 0xDC00 {
+						return yaml_parser_set_reader_error(parser,
+							"expected low surrogate area",
+							parser.offset+2, int(value2))
+					}
+
+					// Generate the value of the surrogate pair.
+					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+				} else {
+					width = 2
+				}
+
+			default:
+				panic("impossible")
+			}
+
+			// Check if the character is in the allowed range:
+			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
+			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
+			//      | [#x10000-#x10FFFF]                        (32 bit)
+			switch {
+			case value == 0x09:
+			case value == 0x0A:
+			case value == 0x0D:
+			case value >= 0x20 && value <= 0x7E:
+			case value == 0x85:
+			case value >= 0xA0 && value <= 0xD7FF:
+			case value >= 0xE000 && value <= 0xFFFD:
+			case value >= 0x10000 && value <= 0x10FFFF:
+			default:
+				return yaml_parser_set_reader_error(parser,
+					"control characters are not allowed",
+					parser.offset, int(value))
+			}
+
+			// Move the raw pointers.
+			parser.raw_buffer_pos += width
+			parser.offset += width
+
+			// Finally put the character into the buffer.
+			if value <= 0x7F {
+				// 0000 0000-0000 007F . 0xxxxxxx
+				parser.buffer[buffer_len+0] = byte(value)
+				buffer_len += 1
+			} else if value <= 0x7FF {
+				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+				buffer_len += 2
+			} else if value <= 0xFFFF {
+				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+				buffer_len += 3
+			} else {
+				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+				buffer_len += 4
+			}
+
+			parser.unread++
+		}
+
+		// On EOF, put NUL into the buffer and return.
+		if parser.eof {
+			parser.buffer[buffer_len] = 0
+			buffer_len++
+			parser.unread++
+			break
+		}
+	}
+	// [Go] Read the documentation of this function above. To return true,
+	// we need to have the given length in the buffer. Not doing that means
+	// every single check that calls this function to make sure the buffer
+	// has a given length is Go) panicking; or C) accessing invalid memory.
+	// This happens here due to the EOF above breaking early.
+	for buffer_len < length {
+		parser.buffer[buffer_len] = 0
+		buffer_len++
+	}
+	parser.buffer = parser.buffer[:buffer_len]
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 000000000..6c151db6f
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+	"encoding/base64"
+	"math"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type resolveMapItem struct {
+	value interface{}
+	tag   string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+	t := resolveTable
+	t[int('+')] = 'S' // Sign
+	t[int('-')] = 'S'
+	for _, c := range "0123456789" {
+		t[int(c)] = 'D' // Digit
+	}
+	for _, c := range "yYnNtTfFoO~" {
+		t[int(c)] = 'M' // In map
+	}
+	t[int('.')] = '.' // Float (potentially in map)
+
+	var resolveMapList = []struct {
+		v   interface{}
+		tag string
+		l   []string
+	}{
+		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+		{"<<", yaml_MERGE_TAG, []string{"<<"}},
+	}
+
+	m := resolveMap
+	for _, item := range resolveMapList {
+		for _, s := range item.l {
+			m[s] = resolveMapItem{item.v, item.tag}
+		}
+	}
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+	// TODO This can easily be made faster and produce less garbage.
+	if strings.HasPrefix(tag, longTagPrefix) {
+		return "!!" + tag[len(longTagPrefix):]
+	}
+	return tag
+}
+
+func longTag(tag string) string {
+	if strings.HasPrefix(tag, "!!") {
+		return longTagPrefix + tag[2:]
+	}
+	return tag
+}
+
+func resolvableTag(tag string) bool {
+	switch tag {
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+		return true
+	}
+	return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+	if !resolvableTag(tag) {
+		return tag, in
+	}
+
+	defer func() {
+		switch tag {
+		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+			return
+		case yaml_FLOAT_TAG:
+			if rtag == yaml_INT_TAG {
+				switch v := out.(type) {
+				case int64:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				case int:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				}
+			}
+		}
+		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+	}()
+
+	// Any data is accepted as a !!str or !!binary.
+	// Otherwise, the prefix is enough of a hint about what it might be.
+	hint := byte('N')
+	if in != "" {
+		hint = resolveTable[in[0]]
+	}
+	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+		// Handle things we can lookup in a map.
+		if item, ok := resolveMap[in]; ok {
+			return item.tag, item.value
+		}
+
+		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+		// are purposefully unsupported here. They're still quoted on
+		// the way out for compatibility with other parser, though.
+
+		switch hint {
+		case 'M':
+			// We've already checked the map above.
+
+		case '.':
+			// Not in the map, so maybe a normal float.
+			floatv, err := strconv.ParseFloat(in, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+
+		case 'D', 'S':
+			// Int, float, or timestamp.
+			// Only try values as a timestamp if the value is unquoted or there's an explicit
+			// !!timestamp tag.
+			if tag == "" || tag == yaml_TIMESTAMP_TAG {
+				t, ok := parseTimestamp(in)
+				if ok {
+					return yaml_TIMESTAMP_TAG, t
+				}
+			}
+
+			plain := strings.Replace(in, "_", "", -1)
+			intv, err := strconv.ParseInt(plain, 0, 64)
+			if err == nil {
+				if intv == int64(int(intv)) {
+					return yaml_INT_TAG, int(intv)
+				} else {
+					return yaml_INT_TAG, intv
+				}
+			}
+			uintv, err := strconv.ParseUint(plain, 0, 64)
+			if err == nil {
+				return yaml_INT_TAG, uintv
+			}
+			if yamlStyleFloat.MatchString(plain) {
+				floatv, err := strconv.ParseFloat(plain, 64)
+				if err == nil {
+					return yaml_FLOAT_TAG, floatv
+				}
+			}
+			if strings.HasPrefix(plain, "0b") {
+				intv, err := strconv.ParseInt(plain[2:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+				if err == nil {
+					return yaml_INT_TAG, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0b") {
+				intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+				if err == nil {
+					if true || intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+			}
+		default:
+			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+		}
+	}
+	return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+	const lineLen = 70
+	encLen := base64.StdEncoding.EncodedLen(len(s))
+	lines := encLen/lineLen + 1
+	buf := make([]byte, encLen*2+lines)
+	in := buf[0:encLen]
+	out := buf[encLen:]
+	base64.StdEncoding.Encode(in, []byte(s))
+	k := 0
+	for i := 0; i < len(in); i += lineLen {
+		j := i + lineLen
+		if j > len(in) {
+			j = len(in)
+		}
+		k += copy(out[k:], in[i:j])
+		if lines > 1 {
+			out[k] = '\n'
+			k++
+		}
+	}
+	return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
+	"2006-1-2",                        // date only
+	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+	// from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+	// TODO write code to check all the formats supported by
+	// http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+	// Quick check: all date formats start with YYYY-.
+	i := 0
+	for ; i < len(s); i++ {
+		if c := s[i]; c < '0' || c > '9' {
+			break
+		}
+	}
+	if i != 4 || i == len(s) || s[i] != '-' {
+		return time.Time{}, false
+	}
+	for _, format := range allowedTimestampFormats {
+		if t, err := time.Parse(format, s); err == nil {
+			return t, true
+		}
+	}
+	return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 000000000..077fd1dd2
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2696 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html).  We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward.  The issues are "block collection start" and
+// "simple keys".  Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented.  We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+//      STREAM-START(encoding)          # The stream start.
+//      STREAM-END                      # The stream end.
+//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
+//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
+//      DOCUMENT-START                  # '---'
+//      DOCUMENT-END                    # '...'
+//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
+//      BLOCK-MAPPING-START             # sequence or a block mapping.
+//      BLOCK-END                       # Indentation decrease.
+//      FLOW-SEQUENCE-START             # '['
+//      FLOW-SEQUENCE-END               # ']'
+//      BLOCK-SEQUENCE-START            # '{'
+//      BLOCK-SEQUENCE-END              # '}'
+//      BLOCK-ENTRY                     # '-'
+//      FLOW-ENTRY                      # ','
+//      KEY                             # '?' or nothing (simple keys).
+//      VALUE                           # ':'
+//      ALIAS(anchor)                   # '*anchor'
+//      ANCHOR(anchor)                  # '&anchor'
+//      TAG(handle,suffix)              # '!handle!suffix'
+//      SCALAR(value,style)             # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+//      STREAM-START(encoding)
+//      STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+//      VERSION-DIRECTIVE(major,minor)
+//      TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+//      %YAML   1.1
+//      %TAG    !   !foo
+//      %TAG    !yaml!  tag:yaml.org,2002:
+//      ---
+//
+// The correspoding sequence of tokens:
+//
+//      STREAM-START(utf-8)
+//      VERSION-DIRECTIVE(1,1)
+//      TAG-DIRECTIVE("!","!foo")
+//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+//      DOCUMENT-START
+//      STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+//      DOCUMENT-START
+//      DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+//      1. An implicit document:
+//
+//          'a scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          STREAM-END
+//
+//      2. An explicit document:
+//
+//          ---
+//          'a scalar'
+//          ...
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-END
+//          STREAM-END
+//
+//      3. Several documents in a stream:
+//
+//          'a scalar'
+//          ---
+//          'another scalar'
+//          ---
+//          'yet another scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("another scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("yet another scalar",single-quoted)
+//          STREAM-END
+//
+// We have already introduced the SCALAR token above.  The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+//      ALIAS(anchor)
+//      ANCHOR(anchor)
+//      TAG(handle,suffix)
+//      SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+//      1. A recursive sequence:
+//
+//          &A [ *A ]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          ANCHOR("A")
+//          FLOW-SEQUENCE-START
+//          ALIAS("A")
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A tagged scalar:
+//
+//          !!float "3.14"  # A good approximation.
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          TAG("!!","float")
+//          SCALAR("3.14",double-quoted)
+//          STREAM-END
+//
+//      3. Various scalar styles:
+//
+//          --- # Implicit empty plain scalars do not produce tokens.
+//          --- a plain scalar
+//          --- 'a single-quoted scalar'
+//          --- "a double-quoted scalar"
+//          --- |-
+//            a literal scalar
+//          --- >-
+//            a folded
+//            scalar
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          DOCUMENT-START
+//          SCALAR("a plain scalar",plain)
+//          DOCUMENT-START
+//          SCALAR("a single-quoted scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("a double-quoted scalar",double-quoted)
+//          DOCUMENT-START
+//          SCALAR("a literal scalar",literal)
+//          DOCUMENT-START
+//          SCALAR("a folded scalar",folded)
+//          STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+//      FLOW-SEQUENCE-START
+//      FLOW-SEQUENCE-END
+//      FLOW-MAPPING-START
+//      FLOW-MAPPING-END
+//      FLOW-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+//      1. A flow sequence:
+//
+//          [item 1, item 2, item 3]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-SEQUENCE-START
+//          SCALAR("item 1",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 2",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 3",plain)
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A flow mapping:
+//
+//          {
+//              a simple key: a value,  # Note that the KEY token is produced.
+//              ? a complex key: another value,
+//          }
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          FLOW-ENTRY
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          FLOW-ENTRY
+//          FLOW-MAPPING-END
+//          STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator.  Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+//      BLOCK-SEQUENCE-START
+//      BLOCK-MAPPING-START
+//      BLOCK-END
+//      BLOCK-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+//      1. Block sequences:
+//
+//          - item 1
+//          - item 2
+//          -
+//            - item 3.1
+//            - item 3.2
+//          -
+//            key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 3.1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 3.2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Block mappings:
+//
+//          a simple key: a value   # The KEY token is produced here.
+//          ? a complex key
+//          : another value
+//          a mapping:
+//            key 1: value 1
+//            key 2: value 2
+//          a sequence:
+//            - item 1
+//            - item 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          KEY
+//          SCALAR("a mapping",plain)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line.  If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line.  The following examples
+// illustrate this case:
+//
+//      1. Collections in a sequence:
+//
+//          - - item 1
+//            - item 2
+//          - key 1: value 1
+//            key 2: value 2
+//          - ? complex key
+//            : complex value
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("complex key")
+//          VALUE
+//          SCALAR("complex value")
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Collections in a mapping:
+//
+//          ? a sequence
+//          : - item 1
+//            - item 2
+//          ? a mapping
+//          : key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a mapping",plain)
+//          VALUE
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+//      key:
+//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
+//      - item 2
+//
+// Tokens:
+//
+//      STREAM-START(utf-8)
+//      BLOCK-MAPPING-START
+//      KEY
+//      SCALAR("key",plain)
+//      VALUE
+//      BLOCK-ENTRY
+//      SCALAR("item 1",plain)
+//      BLOCK-ENTRY
+//      SCALAR("item 2",plain)
+//      BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+	if is_crlf(parser.buffer, parser.buffer_pos) {
+		parser.mark.index += 2
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread -= 2
+		parser.buffer_pos += 2
+	} else if is_break(parser.buffer, parser.buffer_pos) {
+		parser.mark.index++
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread--
+		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+	}
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+	w := width(parser.buffer[parser.buffer_pos])
+	if w == 0 {
+		panic("invalid character sequence")
+	}
+	if len(s) == 0 {
+		s = make([]byte, 0, 32)
+	}
+	if w == 1 && len(s)+w <= cap(s) {
+		s = s[:len(s)+1]
+		s[len(s)-1] = parser.buffer[parser.buffer_pos]
+		parser.buffer_pos++
+	} else {
+		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+		parser.buffer_pos += w
+	}
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+	buf := parser.buffer
+	pos := parser.buffer_pos
+	switch {
+	case buf[pos] == '\r' && buf[pos+1] == '\n':
+		// CR LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+		parser.mark.index++
+		parser.unread--
+	case buf[pos] == '\r' || buf[pos] == '\n':
+		// CR|LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 1
+	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+		// NEL . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+		// LS|PS . LS|PS
+		s = append(s, buf[parser.buffer_pos:pos+3]...)
+		parser.buffer_pos += 3
+	default:
+		return s
+	}
+	parser.mark.index++
+	parser.mark.column = 0
+	parser.mark.line++
+	parser.unread--
+	return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Erase the token object.
+	*token = yaml_token_t{} // [Go] Is this necessary?
+
+	// No tokens after STREAM-END or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+		return true
+	}
+
+	// Ensure that the tokens queue contains enough tokens.
+	if !parser.token_available {
+		if !yaml_parser_fetch_more_tokens(parser) {
+			return false
+		}
+	}
+
+	// Fetch the next token from the queue.
+	*token = parser.tokens[parser.tokens_head]
+	parser.tokens_head++
+	parser.tokens_parsed++
+	parser.token_available = false
+
+	if token.typ == yaml_STREAM_END_TOKEN {
+		parser.stream_end_produced = true
+	}
+	return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+	parser.error = yaml_SCANNER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = parser.mark
+	return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+	context := "while parsing a tag"
+	if directive {
+		context = "while parsing a %TAG directive"
+	}
+	return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+	pargs := append([]interface{}{"+++"}, args...)
+	fmt.Println(pargs...)
+	pargs = append([]interface{}{"---"}, args...)
+	return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+	// While we need more tokens to fetch, do it.
+	for {
+		// Check if we really need to fetch more tokens.
+		need_more_tokens := false
+
+		if parser.tokens_head == len(parser.tokens) {
+			// Queue is empty.
+			need_more_tokens = true
+		} else {
+			// Check if any potential simple key may occupy the head position.
+			if !yaml_parser_stale_simple_keys(parser) {
+				return false
+			}
+
+			for i := range parser.simple_keys {
+				simple_key := &parser.simple_keys[i]
+				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+					need_more_tokens = true
+					break
+				}
+			}
+		}
+
+		// We are finished.
+		if !need_more_tokens {
+			break
+		}
+		// Fetch the next token.
+		if !yaml_parser_fetch_next_token(parser) {
+			return false
+		}
+	}
+
+	parser.token_available = true
+	return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+	// Ensure that the buffer is initialized.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check if we just started scanning.  Fetch STREAM-START then.
+	if !parser.stream_start_produced {
+		return yaml_parser_fetch_stream_start(parser)
+	}
+
+	// Eat whitespaces and comments until we reach the next token.
+	if !yaml_parser_scan_to_next_token(parser) {
+		return false
+	}
+
+	// Remove obsolete potential simple keys.
+	if !yaml_parser_stale_simple_keys(parser) {
+		return false
+	}
+
+	// Check the indentation level against the current column.
+	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+		return false
+	}
+
+	// Ensure that the buffer contains at least 4 characters.  4 is the length
+	// of the longest indicators ('--- ' and '... ').
+	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+		return false
+	}
+
+	// Is it the end of the stream?
+	if is_z(parser.buffer, parser.buffer_pos) {
+		return yaml_parser_fetch_stream_end(parser)
+	}
+
+	// Is it a directive?
+	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+		return yaml_parser_fetch_directive(parser)
+	}
+
+	buf := parser.buffer
+	pos := parser.buffer_pos
+
+	// Is it the document start indicator?
+	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+	}
+
+	// Is it the document end indicator?
+	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+	}
+
+	// Is it the flow sequence start indicator?
+	if buf[pos] == '[' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+	}
+
+	// Is it the flow mapping start indicator?
+	if parser.buffer[parser.buffer_pos] == '{' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+	}
+
+	// Is it the flow sequence end indicator?
+	if parser.buffer[parser.buffer_pos] == ']' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_SEQUENCE_END_TOKEN)
+	}
+
+	// Is it the flow mapping end indicator?
+	if parser.buffer[parser.buffer_pos] == '}' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_MAPPING_END_TOKEN)
+	}
+
+	// Is it the flow entry indicator?
+	if parser.buffer[parser.buffer_pos] == ',' {
+		return yaml_parser_fetch_flow_entry(parser)
+	}
+
+	// Is it the block entry indicator?
+	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+		return yaml_parser_fetch_block_entry(parser)
+	}
+
+	// Is it the key indicator?
+	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_key(parser)
+	}
+
+	// Is it the value indicator?
+	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_value(parser)
+	}
+
+	// Is it an alias?
+	if parser.buffer[parser.buffer_pos] == '*' {
+		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+	}
+
+	// Is it an anchor?
+	if parser.buffer[parser.buffer_pos] == '&' {
+		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+	}
+
+	// Is it a tag?
+	if parser.buffer[parser.buffer_pos] == '!' {
+		return yaml_parser_fetch_tag(parser)
+	}
+
+	// Is it a literal scalar?
+	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, true)
+	}
+
+	// Is it a folded scalar?
+	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, false)
+	}
+
+	// Is it a single-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '\'' {
+		return yaml_parser_fetch_flow_scalar(parser, true)
+	}
+
+	// Is it a double-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '"' {
+		return yaml_parser_fetch_flow_scalar(parser, false)
+	}
+
+	// Is it a plain scalar?
+	//
+	// A plain scalar may start with any non-blank characters except
+	//
+	//      '-', '?', ':', ',', '[', ']', '{', '}',
+	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
+	//      '%', '@', '`'.
+	//
+	// In the block context (and, for the '-' indicator, in the flow context
+	// too), it may also start with the characters
+	//
+	//      '-', '?', ':'
+	//
+	// if it is followed by a non-space character.
+	//
+	// The last rule is more restrictive than the specification requires.
+	// [Go] Make this logic more reasonable.
+	//switch parser.buffer[parser.buffer_pos] {
+	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+	//}
+	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+		(parser.flow_level == 0 &&
+			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_plain_scalar(parser)
+	}
+
+	// If we don't determine the token type so far, it is an error.
+	return yaml_parser_set_scanner_error(parser,
+		"while scanning for the next token", parser.mark,
+		"found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+	// Check for a potential simple key for each flow level.
+	for i := range parser.simple_keys {
+		simple_key := &parser.simple_keys[i]
+
+		// The specification requires that a simple key
+		//
+		//  - is limited to a single line,
+		//  - is shorter than 1024 characters.
+		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+			// Check if the potential simple key to be removed is required.
+			if simple_key.required {
+				return yaml_parser_set_scanner_error(parser,
+					"while scanning a simple key", simple_key.mark,
+					"could not find expected ':'")
+			}
+			simple_key.possible = false
+		}
+	}
+	return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+	// A simple key is required at the current position if the scanner is in
+	// the block context and the current column coincides with the indentation
+	// level.
+
+	required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+	//
+	// If the current position may start a simple key, save it.
+	//
+	if parser.simple_key_allowed {
+		simple_key := yaml_simple_key_t{
+			possible:     true,
+			required:     required,
+			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		}
+		simple_key.mark = parser.mark
+
+		if !yaml_parser_remove_simple_key(parser) {
+			return false
+		}
+		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+	}
+	return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+	i := len(parser.simple_keys) - 1
+	if parser.simple_keys[i].possible {
+		// If the key is required, it is an error.
+		if parser.simple_keys[i].required {
+			return yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", parser.simple_keys[i].mark,
+				"could not find expected ':'")
+		}
+	}
+	// Remove the key from the stack.
+	parser.simple_keys[i].possible = false
+	return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+	// Reset the simple key on the next level.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// Increase the flow level.
+	parser.flow_level++
+	return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+	if parser.flow_level > 0 {
+		parser.flow_level--
+		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+	}
+	return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level.  In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	if parser.indent < column {
+		// Push the current indentation level to the stack and set the new
+		// indentation level.
+		parser.indents = append(parser.indents, parser.indent)
+		parser.indent = column
+
+		// Create a token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        typ,
+			start_mark: mark,
+			end_mark:   mark,
+		}
+		if number > -1 {
+			number -= parser.tokens_parsed
+		}
+		yaml_insert_token(parser, number, &token)
+	}
+	return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column.  For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	// Loop through the indentation levels in the stack.
+	for parser.indent > column {
+		// Create a token and append it to the queue.
+		token := yaml_token_t{
+			typ:        yaml_BLOCK_END_TOKEN,
+			start_mark: parser.mark,
+			end_mark:   parser.mark,
+		}
+		yaml_insert_token(parser, -1, &token)
+
+		// Pop the indentation level.
+		parser.indent = parser.indents[len(parser.indents)-1]
+		parser.indents = parser.indents[:len(parser.indents)-1]
+	}
+	return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+	// Set the initial indentation.
+	parser.indent = -1
+
+	// Initialize the simple key stack.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// A simple key is allowed at the beginning of the stream.
+	parser.simple_key_allowed = true
+
+	// We have started.
+	parser.stream_start_produced = true
+
+	// Create the STREAM-START token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_START_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+		encoding:   parser.encoding,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+	// Force new line.
+	if parser.mark.column != 0 {
+		parser.mark.column = 0
+		parser.mark.line++
+	}
+
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the STREAM-END token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_END_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+	token := yaml_token_t{}
+	if !yaml_parser_scan_directive(parser, &token) {
+		return false
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+	start_mark := parser.mark
+
+	skip(parser)
+	skip(parser)
+	skip(parser)
+
+	end_mark := parser.mark
+
+	// Create the DOCUMENT-START or DOCUMENT-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// The indicators '[' and '{' may start a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// Increase the flow level.
+	if !yaml_parser_increase_flow_level(parser) {
+		return false
+	}
+
+	// A simple key may follow the indicators '[' and '{'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset any potential simple key on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Decrease the flow level.
+	if !yaml_parser_decrease_flow_level(parser) {
+		return false
+	}
+
+	// No simple keys after the indicators ']' and '}'.
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after ','.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_FLOW_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+	// Check if the scanner is in the block context.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new entry.
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"block sequence entries are not allowed in this context")
+		}
+		// Add the BLOCK-SEQUENCE-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+			return false
+		}
+	} else {
+		// It is an error for the '-' indicator to occur in the flow context,
+		// but we let the Parser detect and report about it because the Parser
+		// is able to point to the context.
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '-'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the BLOCK-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_BLOCK_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+	// In the block context, additional checks are required.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new key (not nessesary simple).
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"mapping keys are not allowed in this context")
+		}
+		// Add the BLOCK-MAPPING-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+			return false
+		}
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '?' in the block context.
+	parser.simple_key_allowed = parser.flow_level == 0
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the KEY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_KEY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+	// Have we found a simple key?
+	if simple_key.possible {
+		// Create the KEY token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        yaml_KEY_TOKEN,
+			start_mark: simple_key.mark,
+			end_mark:   simple_key.mark,
+		}
+		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+		// In the block context, we may need to add the BLOCK-MAPPING-START token.
+		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+			simple_key.token_number,
+			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+			return false
+		}
+
+		// Remove the simple key.
+		simple_key.possible = false
+
+		// A simple key cannot follow another simple key.
+		parser.simple_key_allowed = false
+
+	} else {
+		// The ':' indicator follows a complex key.
+
+		// In the block context, extra checks are required.
+		if parser.flow_level == 0 {
+
+			// Check if we are allowed to start a complex value.
+			if !parser.simple_key_allowed {
+				return yaml_parser_set_scanner_error(parser, "", parser.mark,
+					"mapping values are not allowed in this context")
+			}
+
+			// Add the BLOCK-MAPPING-START token if needed.
+			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+				return false
+			}
+		}
+
+		// Simple keys after ':' are allowed in the block context.
+		parser.simple_key_allowed = parser.flow_level == 0
+	}
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the VALUE token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_VALUE_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// An anchor or an alias could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow an anchor or an alias.
+	parser.simple_key_allowed = false
+
+	// Create the ALIAS or ANCHOR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_anchor(parser, &token, typ) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+	// A tag could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a tag.
+	parser.simple_key_allowed = false
+
+	// Create the TAG token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_tag(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+	// Remove any potential simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// A simple key may follow a block scalar.
+	parser.simple_key_allowed = true
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_plain_scalar(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+	// Until the next token is not found.
+	for {
+		// Allow the BOM mark to start a line.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+		}
+
+		// Eat whitespaces.
+		// Tabs are allowed:
+		//  - in the flow context
+		//  - in the block context, but not at the beginning of the line or
+		//  after '-', '?', or ':' (complex value).
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Eat a comment until a line break.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			for !is_breakz(parser.buffer, parser.buffer_pos) {
+				skip(parser)
+				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+					return false
+				}
+			}
+		}
+
+		// If it is a line break, eat it.
+		if is_break(parser.buffer, parser.buffer_pos) {
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+			skip_line(parser)
+
+			// In the block context, a new line may start a simple key.
+			if parser.flow_level == 0 {
+				parser.simple_key_allowed = true
+			}
+		} else {
+			break // We have found a token.
+		}
+	}
+
+	return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//      %YAML    1.1    # a comment \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Eat '%'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the directive name.
+	var name []byte
+	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+		return false
+	}
+
+	// Is it a YAML directive?
+	if bytes.Equal(name, []byte("YAML")) {
+		// Scan the VERSION directive value.
+		var major, minor int8
+		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a VERSION-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			major:      major,
+			minor:      minor,
+		}
+
+		// Is it a TAG directive?
+	} else if bytes.Equal(name, []byte("TAG")) {
+		// Scan the TAG directive value.
+		var handle, prefix []byte
+		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a TAG-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_TAG_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			value:      handle,
+			prefix:     prefix,
+		}
+
+		// Unknown directive.
+	} else {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unknown directive name")
+		return false
+	}
+
+	// Eat the rest of the line including any comments.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//       ^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//       ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+	// Consume the directive name.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	var s []byte
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the name is empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "could not find expected directive name")
+		return false
+	}
+
+	// Check for an blank character after the name.
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unexpected non-alphabetical character")
+		return false
+	}
+	*name = s
+	return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//           ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Consume the major version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+		return false
+	}
+
+	// Eat '.'.
+	if parser.buffer[parser.buffer_pos] != '.' {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected digit or '.' character")
+	}
+
+	skip(parser)
+
+	// Consume the minor version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+		return false
+	}
+	return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//              ^
+//      %YAML   1.1     # a comment \n
+//                ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+	// Repeat while the next character is digit.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var value, length int8
+	for is_digit(parser.buffer, parser.buffer_pos) {
+		// Check if the number is too long.
+		length++
+		if length > max_number_length {
+			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+				start_mark, "found extremely long version number")
+		}
+		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the number was present.
+	if length == 0 {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected version number")
+	}
+	*number = value
+	return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+	var handle_value, prefix_value []byte
+
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a handle.
+	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+		return false
+	}
+
+	// Expect a whitespace.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace")
+		return false
+	}
+
+	// Eat whitespaces.
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a prefix.
+	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+		return false
+	}
+
+	// Expect a whitespace or line break.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	*handle = handle_value
+	*prefix = prefix_value
+	return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+	var s []byte
+
+	// Eat the indicator character.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the value.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	end_mark := parser.mark
+
+	/*
+	 * Check if length of the anchor is greater than 0 and it is followed by
+	 * a whitespace character or one of the indicators:
+	 *
+	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
+	 */
+
+	if len(s) == 0 ||
+		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+			parser.buffer[parser.buffer_pos] == '`') {
+		context := "while scanning an alias"
+		if typ == yaml_ANCHOR_TOKEN {
+			context = "while scanning an anchor"
+		}
+		yaml_parser_set_scanner_error(parser, context, start_mark,
+			"did not find expected alphabetic or numeric character")
+		return false
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+	}
+
+	return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+	var handle, suffix []byte
+
+	start_mark := parser.mark
+
+	// Check if the tag is in the canonical form.
+	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+		return false
+	}
+
+	if parser.buffer[parser.buffer_pos+1] == '<' {
+		// Keep the handle as ''
+
+		// Eat '!<'
+		skip(parser)
+		skip(parser)
+
+		// Consume the tag value.
+		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+			return false
+		}
+
+		// Check for '>' and eat it.
+		if parser.buffer[parser.buffer_pos] != '>' {
+			yaml_parser_set_scanner_error(parser, "while scanning a tag",
+				start_mark, "did not find the expected '>'")
+			return false
+		}
+
+		skip(parser)
+	} else {
+		// The tag has either the '!suffix' or the '!handle!suffix' form.
+
+		// First, try to scan a handle.
+		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+			return false
+		}
+
+		// Check if it is, indeed, handle.
+		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+			// Scan the suffix now.
+			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+				return false
+			}
+		} else {
+			// It wasn't a handle after all.  Scan the rest of the tag.
+			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+				return false
+			}
+
+			// Set the handle to '!'.
+			handle = []byte{'!'}
+
+			// A special case: the '!' tag.  Set the handle to '' and the
+			// suffix to '!'.
+			if len(suffix) == 0 {
+				handle, suffix = suffix, handle
+			}
+		}
+	}
+
+	// Check the character which ends the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a tag",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_TAG_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      handle,
+		suffix:     suffix,
+	}
+	return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+	// Check the initial '!' character.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if parser.buffer[parser.buffer_pos] != '!' {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected '!'")
+		return false
+	}
+
+	var s []byte
+
+	// Copy the '!' character.
+	s = read(parser, s)
+
+	// Copy all subsequent alphabetical and numerical characters.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the trailing character is '!' and copy it.
+	if parser.buffer[parser.buffer_pos] == '!' {
+		s = read(parser, s)
+	} else {
+		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
+		// directive, it's an error.  If it's a tag token, it must be a part of URI.
+		if directive && string(s) != "!" {
+			yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find expected '!'")
+			return false
+		}
+	}
+
+	*handle = s
+	return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+	//size_t length = head ? strlen((char *)head) : 0
+	var s []byte
+	hasTag := len(head) > 0
+
+	// Copy the head if needed.
+	//
+	// Note that we don't copy the leading '!' character.
+	if len(head) > 1 {
+		s = append(s, head[1:]...)
+	}
+
+	// Scan the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// The set of characters that may appear in URI is as follows:
+	//
+	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+	//      '%'.
+	// [Go] Convert this into more reasonable logic.
+	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+		parser.buffer[parser.buffer_pos] == '%' {
+		// Check if it is a URI-escape sequence.
+		if parser.buffer[parser.buffer_pos] == '%' {
+			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+				return false
+			}
+		} else {
+			s = read(parser, s)
+		}
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		hasTag = true
+	}
+
+	if !hasTag {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected tag URI")
+		return false
+	}
+	*uri = s
+	return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+	// Decode the required number of characters.
+	w := 1024
+	for w > 0 {
+		// Check for a URI-escaped octet.
+		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+			return false
+		}
+
+		if !(parser.buffer[parser.buffer_pos] == '%' &&
+			is_hex(parser.buffer, parser.buffer_pos+1) &&
+			is_hex(parser.buffer, parser.buffer_pos+2)) {
+			return yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find URI escaped octet")
+		}
+
+		// Get the octet.
+		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+		// If it is the leading octet, determine the length of the UTF-8 sequence.
+		if w == 1024 {
+			w = width(octet)
+			if w == 0 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect leading UTF-8 octet")
+			}
+		} else {
+			// Check if the trailing octet is correct.
+			if octet&0xC0 != 0x80 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect trailing UTF-8 octet")
+			}
+		}
+
+		// Copy the octet and move the pointers.
+		*s = append(*s, octet)
+		skip(parser)
+		skip(parser)
+		skip(parser)
+		w--
+	}
+	return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+	// Eat the indicator '|' or '>'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the additional block scalar indicators.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check for a chomping indicator.
+	var chomping, increment int
+	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+		// Set the chomping method and eat the indicator.
+		if parser.buffer[parser.buffer_pos] == '+' {
+			chomping = +1
+		} else {
+			chomping = -1
+		}
+		skip(parser)
+
+		// Check for an indentation indicator.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if is_digit(parser.buffer, parser.buffer_pos) {
+			// Check that the indentation is greater than 0.
+			if parser.buffer[parser.buffer_pos] == '0' {
+				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+					start_mark, "found an indentation indicator equal to 0")
+				return false
+			}
+
+			// Get the indentation level and eat the indicator.
+			increment = as_digit(parser.buffer, parser.buffer_pos)
+			skip(parser)
+		}
+
+	} else if is_digit(parser.buffer, parser.buffer_pos) {
+		// Do the same as above, but in the opposite order.
+
+		if parser.buffer[parser.buffer_pos] == '0' {
+			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found an indentation indicator equal to 0")
+			return false
+		}
+		increment = as_digit(parser.buffer, parser.buffer_pos)
+		skip(parser)
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+			if parser.buffer[parser.buffer_pos] == '+' {
+				chomping = +1
+			} else {
+				chomping = -1
+			}
+			skip(parser)
+		}
+	}
+
+	// Eat whitespaces and comments to the end of the line.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	end_mark := parser.mark
+
+	// Set the indentation level if it was specified.
+	var indent int
+	if increment > 0 {
+		if parser.indent >= 0 {
+			indent = parser.indent + increment
+		} else {
+			indent = increment
+		}
+	}
+
+	// Scan the leading line breaks and determine the indentation level if needed.
+	var s, leading_break, trailing_breaks []byte
+	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+		return false
+	}
+
+	// Scan the block scalar content.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var leading_blank, trailing_blank bool
+	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+		// We are at the beginning of a non-empty line.
+
+		// Is it a trailing whitespace?
+		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Check if we need to fold the leading line break.
+		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+			// Do we need to join the lines by space?
+			if len(trailing_breaks) == 0 {
+				s = append(s, ' ')
+			}
+		} else {
+			s = append(s, leading_break...)
+		}
+		leading_break = leading_break[:0]
+
+		// Append the remaining line breaks.
+		s = append(s, trailing_breaks...)
+		trailing_breaks = trailing_breaks[:0]
+
+		// Is it a leading whitespace?
+		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Consume the current line.
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			s = read(parser, s)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+
+		leading_break = read_line(parser, leading_break)
+
+		// Eat the following indentation spaces and line breaks.
+		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+			return false
+		}
+	}
+
+	// Chomp the tail.
+	if chomping != -1 {
+		s = append(s, leading_break...)
+	}
+	if chomping == 1 {
+		s = append(s, trailing_breaks...)
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_LITERAL_SCALAR_STYLE,
+	}
+	if !literal {
+		token.style = yaml_FOLDED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar.  Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+	*end_mark = parser.mark
+
+	// Eat the indentation spaces and line breaks.
+	max_indent := 0
+	for {
+		// Eat the indentation spaces.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+		if parser.mark.column > max_indent {
+			max_indent = parser.mark.column
+		}
+
+		// Check for a tab character messing the indentation.
+		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found a tab character where an indentation space is expected")
+		}
+
+		// Have we found a non-empty line?
+		if !is_break(parser.buffer, parser.buffer_pos) {
+			break
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		// [Go] Should really be returning breaks instead.
+		*breaks = read_line(parser, *breaks)
+		*end_mark = parser.mark
+	}
+
+	// Determine the indentation level if needed.
+	if *indent == 0 {
+		*indent = max_indent
+		if *indent < parser.indent+1 {
+			*indent = parser.indent + 1
+		}
+		if *indent < 1 {
+			*indent = 1
+		}
+	}
+	return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+	// Eat the left quote.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the content of the quoted scalar.
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	for {
+		// Check that there are no document indicators at the beginning of the line.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected document indicator")
+			return false
+		}
+
+		// Check for EOF.
+		if is_z(parser.buffer, parser.buffer_pos) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected end of stream")
+			return false
+		}
+
+		// Consume non-blank characters.
+		leading_blanks := false
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+				// Is is an escaped single quote.
+				s = append(s, '\'')
+				skip(parser)
+				skip(parser)
+
+			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
+				// It is a right single quote.
+				break
+			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
+				// It is a right double quote.
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+				// It is an escaped line break.
+				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+					return false
+				}
+				skip(parser)
+				skip_line(parser)
+				leading_blanks = true
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+				// It is an escape sequence.
+				code_length := 0
+
+				// Check the escape character.
+				switch parser.buffer[parser.buffer_pos+1] {
+				case '0':
+					s = append(s, 0)
+				case 'a':
+					s = append(s, '\x07')
+				case 'b':
+					s = append(s, '\x08')
+				case 't', '\t':
+					s = append(s, '\x09')
+				case 'n':
+					s = append(s, '\x0A')
+				case 'v':
+					s = append(s, '\x0B')
+				case 'f':
+					s = append(s, '\x0C')
+				case 'r':
+					s = append(s, '\x0D')
+				case 'e':
+					s = append(s, '\x1B')
+				case ' ':
+					s = append(s, '\x20')
+				case '"':
+					s = append(s, '"')
+				case '\'':
+					s = append(s, '\'')
+				case '\\':
+					s = append(s, '\\')
+				case 'N': // NEL (#x85)
+					s = append(s, '\xC2')
+					s = append(s, '\x85')
+				case '_': // #xA0
+					s = append(s, '\xC2')
+					s = append(s, '\xA0')
+				case 'L': // LS (#x2028)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA8')
+				case 'P': // PS (#x2029)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA9')
+				case 'x':
+					code_length = 2
+				case 'u':
+					code_length = 4
+				case 'U':
+					code_length = 8
+				default:
+					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+						start_mark, "found unknown escape character")
+					return false
+				}
+
+				skip(parser)
+				skip(parser)
+
+				// Consume an arbitrary escape code.
+				if code_length > 0 {
+					var value int
+
+					// Scan the character value.
+					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+						return false
+					}
+					for k := 0; k < code_length; k++ {
+						if !is_hex(parser.buffer, parser.buffer_pos+k) {
+							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+								start_mark, "did not find expected hexdecimal number")
+							return false
+						}
+						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+					}
+
+					// Check the value and write the character.
+					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+							start_mark, "found invalid Unicode character escape code")
+						return false
+					}
+					if value <= 0x7F {
+						s = append(s, byte(value))
+					} else if value <= 0x7FF {
+						s = append(s, byte(0xC0+(value>>6)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else if value <= 0xFFFF {
+						s = append(s, byte(0xE0+(value>>12)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else {
+						s = append(s, byte(0xF0+(value>>18)))
+						s = append(s, byte(0x80+((value>>12)&0x3F)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					}
+
+					// Advance the pointer.
+					for k := 0; k < code_length; k++ {
+						skip(parser)
+					}
+				}
+			} else {
+				// It is a non-escaped non-blank character.
+				s = read(parser, s)
+			}
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		// Check if we are at the end of the scalar.
+		if single {
+			if parser.buffer[parser.buffer_pos] == '\'' {
+				break
+			}
+		} else {
+			if parser.buffer[parser.buffer_pos] == '"' {
+				break
+			}
+		}
+
+		// Consume blank characters.
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Join the whitespaces or fold line breaks.
+		if leading_blanks {
+			// Do we need to fold line breaks?
+			if len(leading_break) > 0 && leading_break[0] == '\n' {
+				if len(trailing_breaks) == 0 {
+					s = append(s, ' ')
+				} else {
+					s = append(s, trailing_breaks...)
+				}
+			} else {
+				s = append(s, leading_break...)
+				s = append(s, trailing_breaks...)
+			}
+			trailing_breaks = trailing_breaks[:0]
+			leading_break = leading_break[:0]
+		} else {
+			s = append(s, whitespaces...)
+			whitespaces = whitespaces[:0]
+		}
+	}
+
+	// Eat the right quote.
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
+	}
+	if !single {
+		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	var leading_blanks bool
+	var indent = parser.indent + 1
+
+	start_mark := parser.mark
+	end_mark := parser.mark
+
+	// Consume the content of the plain scalar.
+	for {
+		// Check for a document indicator.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			break
+		}
+
+		// Check for a comment.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			break
+		}
+
+		// Consume non-blank characters.
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+			// Check for indicators that may end a plain scalar.
+			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+				(parser.flow_level > 0 &&
+					(parser.buffer[parser.buffer_pos] == ',' ||
+						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+						parser.buffer[parser.buffer_pos] == '}')) {
+				break
+			}
+
+			// Check if we need to join whitespaces and breaks.
+			if leading_blanks || len(whitespaces) > 0 {
+				if leading_blanks {
+					// Do we need to fold line breaks?
+					if leading_break[0] == '\n' {
+						if len(trailing_breaks) == 0 {
+							s = append(s, ' ')
+						} else {
+							s = append(s, trailing_breaks...)
+						}
+					} else {
+						s = append(s, leading_break...)
+						s = append(s, trailing_breaks...)
+					}
+					trailing_breaks = trailing_breaks[:0]
+					leading_break = leading_break[:0]
+					leading_blanks = false
+				} else {
+					s = append(s, whitespaces...)
+					whitespaces = whitespaces[:0]
+				}
+			}
+
+			// Copy the character.
+			s = read(parser, s)
+
+			end_mark = parser.mark
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Is it the end?
+		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+			break
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+
+				// Check for tab characters that abuse indentation.
+				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+						start_mark, "found a tab character that violates indentation")
+					return false
+				}
+
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Check indentation level.
+		if parser.flow_level == 0 && parser.mark.column < indent {
+			break
+		}
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_PLAIN_SCALAR_STYLE,
+	}
+
+	// Note that we change the 'simple_key_allowed' flag.
+	if leading_blanks {
+		parser.simple_key_allowed = true
+	}
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 000000000..4c45e660a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+	"reflect"
+	"unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int      { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+	a := l[i]
+	b := l[j]
+	ak := a.Kind()
+	bk := b.Kind()
+	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+		a = a.Elem()
+		ak = a.Kind()
+	}
+	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+		b = b.Elem()
+		bk = b.Kind()
+	}
+	af, aok := keyFloat(a)
+	bf, bok := keyFloat(b)
+	if aok && bok {
+		if af != bf {
+			return af < bf
+		}
+		if ak != bk {
+			return ak < bk
+		}
+		return numLess(a, b)
+	}
+	if ak != reflect.String || bk != reflect.String {
+		return ak < bk
+	}
+	ar, br := []rune(a.String()), []rune(b.String())
+	for i := 0; i < len(ar) && i < len(br); i++ {
+		if ar[i] == br[i] {
+			continue
+		}
+		al := unicode.IsLetter(ar[i])
+		bl := unicode.IsLetter(br[i])
+		if al && bl {
+			return ar[i] < br[i]
+		}
+		if al || bl {
+			return bl
+		}
+		var ai, bi int
+		var an, bn int64
+		if ar[i] == '0' || br[i] == '0' {
+			for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+				if ar[j] != '0' {
+					an = 1
+					bn = 1
+					break
+				}
+			}
+		}
+		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+			an = an*10 + int64(ar[ai]-'0')
+		}
+		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+			bn = bn*10 + int64(br[bi]-'0')
+		}
+		if an != bn {
+			return an < bn
+		}
+		if ai != bi {
+			return ai < bi
+		}
+		return ar[i] < br[i]
+	}
+	return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return float64(v.Int()), true
+	case reflect.Float32, reflect.Float64:
+		return v.Float(), true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return float64(v.Uint()), true
+	case reflect.Bool:
+		if v.Bool() {
+			return 1, true
+		}
+		return 0, true
+	}
+	return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return a.Int() < b.Int()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	}
+	panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 000000000..a2dde608c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_WRITER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+	if emitter.write_handler == nil {
+		panic("write handler not set")
+	}
+
+	// Check if the buffer is empty.
+	if emitter.buffer_pos == 0 {
+		return true
+	}
+
+	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+	}
+	emitter.buffer_pos = 0
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 000000000..de85aa4cd
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,466 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+//   https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+	Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+	UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+	MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     var t T
+//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, true)
+}
+
+// A Decorder reads and decodes YAML values from an input stream.
+type Decoder struct {
+	strict bool
+	parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		parser: newParserFromReader(r),
+	}
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+	dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	d := newDecoder(dec.strict)
+	defer handleErr(&err)
+	node := dec.parser.parse()
+	if node == nil {
+		return io.EOF
+	}
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(node, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+	defer handleErr(&err)
+	d := newDecoder(strict)
+	p := newParser(in)
+	defer p.destroy()
+	node := p.parse()
+	if node != nil {
+		v := reflect.ValueOf(out)
+		if v.Kind() == reflect.Ptr && !v.IsNil() {
+			v = v.Elem()
+		}
+		d.unmarshal(node, v)
+	}
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+//     `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty    Only include the field if it's not set to the zero
+//                  value for the type or to empty slices or maps.
+//                  Zero valued structs will be omitted if all their public
+//                  fields are zero, unless they implement an IsZero
+//                  method (see the IsZeroer interface type), in which
+//                  case the field will be included if that method returns true.
+//
+//     flow         Marshal using a flow style (useful for structs,
+//                  sequences and maps).
+//
+//     inline       Inline the field, which must be a struct or a map,
+//                  causing all of its fields or keys to be processed as if
+//                  they were part of the outer struct. For maps, keys must
+//                  not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshalDoc("", reflect.ValueOf(in))
+	e.finish()
+	out = e.out
+	return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+	encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		encoder: newEncoderWithWriter(w),
+	}
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e.encoder.marshalDoc("", reflect.ValueOf(v))
+	return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+	defer handleErr(&err)
+	e.encoder.finish()
+	return nil
+}
+
+func handleErr(err *error) {
+	if v := recover(); v != nil {
+		if e, ok := v.(yamlError); ok {
+			*err = e.err
+		} else {
+			panic(v)
+		}
+	}
+}
+
+type yamlError struct {
+	err error
+}
+
+func fail(err error) {
+	panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+	Errors []string
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+
+	// InlineMap is the number of the field in the struct that
+	// contains an ,inline map, or -1 if there's none.
+	InlineMap int
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	Flow      bool
+	// Id holds the unique field identifier, so we can cheaply
+	// check for field duplicates without maintaining an extra map.
+	Id int
+
+	// Inline holds the field index if the field is part of an inlined struct.
+	Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	fieldMapMutex.RLock()
+	sinfo, found := structMap[st]
+	fieldMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("yaml")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "flow":
+					info.Flow = true
+				case "inline":
+					inline = true
+				default:
+					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					finfo.Id = len(fieldsList)
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				//return nil, errors.New("Option ,inline needs a struct value or map field")
+				return nil, errors.New("Option ,inline needs a struct value field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		info.Id = len(fieldsList)
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+
+	sinfo = &structInfo{
+		FieldsMap:  fieldsMap,
+		FieldsList: fieldsList,
+		InlineMap:  inlineMap,
+	}
+
+	fieldMapMutex.Lock()
+	structMap[st] = sinfo
+	fieldMapMutex.Unlock()
+	return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+	IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+	kind := v.Kind()
+	if z, ok := v.Interface().(IsZeroer); ok {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+			return true
+		}
+		return z.IsZero()
+	}
+	switch kind {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		for i := v.NumField() - 1; i >= 0; i-- {
+			if vt.Field(i).PkgPath != "" {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 000000000..e25cee563
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,738 @@
+package yaml
+
+import (
+	"fmt"
+	"io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+	major int8 // The major version number.
+	minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+	handle []byte // The tag handle.
+	prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+	// Let the parser choose the encoding.
+	yaml_ANY_ENCODING yaml_encoding_t = iota
+
+	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
+	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+	// Let the parser choose the break type.
+	yaml_ANY_BREAK yaml_break_t = iota
+
+	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
+	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
+	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+	// No error is produced.
+	yaml_NO_ERROR yaml_error_type_t = iota
+
+	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
+	yaml_READER_ERROR   // Cannot read or decode the input stream.
+	yaml_SCANNER_ERROR  // Cannot scan the input stream.
+	yaml_PARSER_ERROR   // Cannot parse the input stream.
+	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+	yaml_WRITER_ERROR   // Cannot write to the output stream.
+	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+	index  int // The position index.
+	line   int // The position line.
+	column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
+	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
+	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+	// An empty token.
+	yaml_NO_TOKEN yaml_token_type_t = iota
+
+	yaml_STREAM_START_TOKEN // A STREAM-START token.
+	yaml_STREAM_END_TOKEN   // A STREAM-END token.
+
+	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
+	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
+	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
+
+	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
+	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
+
+	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
+	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
+	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
+
+	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
+	yaml_KEY_TOKEN         // A KEY token.
+	yaml_VALUE_TOKEN       // A VALUE token.
+
+	yaml_ALIAS_TOKEN  // An ALIAS token.
+	yaml_ANCHOR_TOKEN // An ANCHOR token.
+	yaml_TAG_TOKEN    // A TAG token.
+	yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+	switch tt {
+	case yaml_NO_TOKEN:
+		return "yaml_NO_TOKEN"
+	case yaml_STREAM_START_TOKEN:
+		return "yaml_STREAM_START_TOKEN"
+	case yaml_STREAM_END_TOKEN:
+		return "yaml_STREAM_END_TOKEN"
+	case yaml_VERSION_DIRECTIVE_TOKEN:
+		return "yaml_VERSION_DIRECTIVE_TOKEN"
+	case yaml_TAG_DIRECTIVE_TOKEN:
+		return "yaml_TAG_DIRECTIVE_TOKEN"
+	case yaml_DOCUMENT_START_TOKEN:
+		return "yaml_DOCUMENT_START_TOKEN"
+	case yaml_DOCUMENT_END_TOKEN:
+		return "yaml_DOCUMENT_END_TOKEN"
+	case yaml_BLOCK_SEQUENCE_START_TOKEN:
+		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+	case yaml_BLOCK_MAPPING_START_TOKEN:
+		return "yaml_BLOCK_MAPPING_START_TOKEN"
+	case yaml_BLOCK_END_TOKEN:
+		return "yaml_BLOCK_END_TOKEN"
+	case yaml_FLOW_SEQUENCE_START_TOKEN:
+		return "yaml_FLOW_SEQUENCE_START_TOKEN"
+	case yaml_FLOW_SEQUENCE_END_TOKEN:
+		return "yaml_FLOW_SEQUENCE_END_TOKEN"
+	case yaml_FLOW_MAPPING_START_TOKEN:
+		return "yaml_FLOW_MAPPING_START_TOKEN"
+	case yaml_FLOW_MAPPING_END_TOKEN:
+		return "yaml_FLOW_MAPPING_END_TOKEN"
+	case yaml_BLOCK_ENTRY_TOKEN:
+		return "yaml_BLOCK_ENTRY_TOKEN"
+	case yaml_FLOW_ENTRY_TOKEN:
+		return "yaml_FLOW_ENTRY_TOKEN"
+	case yaml_KEY_TOKEN:
+		return "yaml_KEY_TOKEN"
+	case yaml_VALUE_TOKEN:
+		return "yaml_VALUE_TOKEN"
+	case yaml_ALIAS_TOKEN:
+		return "yaml_ALIAS_TOKEN"
+	case yaml_ANCHOR_TOKEN:
+		return "yaml_ANCHOR_TOKEN"
+	case yaml_TAG_TOKEN:
+		return "yaml_TAG_TOKEN"
+	case yaml_SCALAR_TOKEN:
+		return "yaml_SCALAR_TOKEN"
+	}
+	return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+	// The token type.
+	typ yaml_token_type_t
+
+	// The start/end of the token.
+	start_mark, end_mark yaml_mark_t
+
+	// The stream encoding (for yaml_STREAM_START_TOKEN).
+	encoding yaml_encoding_t
+
+	// The alias/anchor/scalar value or tag/tag directive handle
+	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+	value []byte
+
+	// The tag suffix (for yaml_TAG_TOKEN).
+	suffix []byte
+
+	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+	prefix []byte
+
+	// The scalar style (for yaml_SCALAR_TOKEN).
+	style yaml_scalar_style_t
+
+	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+	major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+	// An empty event.
+	yaml_NO_EVENT yaml_event_type_t = iota
+
+	yaml_STREAM_START_EVENT   // A STREAM-START event.
+	yaml_STREAM_END_EVENT     // A STREAM-END event.
+	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
+	yaml_ALIAS_EVENT          // An ALIAS event.
+	yaml_SCALAR_EVENT         // A SCALAR event.
+	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
+	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
+	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+	yaml_NO_EVENT:             "none",
+	yaml_STREAM_START_EVENT:   "stream start",
+	yaml_STREAM_END_EVENT:     "stream end",
+	yaml_DOCUMENT_START_EVENT: "document start",
+	yaml_DOCUMENT_END_EVENT:   "document end",
+	yaml_ALIAS_EVENT:          "alias",
+	yaml_SCALAR_EVENT:         "scalar",
+	yaml_SEQUENCE_START_EVENT: "sequence start",
+	yaml_SEQUENCE_END_EVENT:   "sequence end",
+	yaml_MAPPING_START_EVENT:  "mapping start",
+	yaml_MAPPING_END_EVENT:    "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+	if e < 0 || int(e) >= len(eventStrings) {
+		return fmt.Sprintf("unknown event %d", e)
+	}
+	return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+	// The event type.
+	typ yaml_event_type_t
+
+	// The start and end of the event.
+	start_mark, end_mark yaml_mark_t
+
+	// The document encoding (for yaml_STREAM_START_EVENT).
+	encoding yaml_encoding_t
+
+	// The version directive (for yaml_DOCUMENT_START_EVENT).
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+	tag_directives []yaml_tag_directive_t
+
+	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+	anchor []byte
+
+	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	tag []byte
+
+	// The scalar value (for yaml_SCALAR_EVENT).
+	value []byte
+
+	// Is the document start/end indicator implicit, or the tag optional?
+	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+	implicit bool
+
+	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+	quoted_implicit bool
+
+	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
+	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
+	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
+	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
+	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
+	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+	// Not in original libyaml.
+	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
+
+	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
+	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+	// An empty node.
+	yaml_NO_NODE yaml_node_type_t = iota
+
+	yaml_SCALAR_NODE   // A scalar node.
+	yaml_SEQUENCE_NODE // A sequence node.
+	yaml_MAPPING_NODE  // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+	key   int // The key of the element.
+	value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+	typ yaml_node_type_t // The node type.
+	tag []byte           // The node tag.
+
+	// The node data.
+
+	// The scalar parameters (for yaml_SCALAR_NODE).
+	scalar struct {
+		value  []byte              // The scalar value.
+		length int                 // The length of the scalar value.
+		style  yaml_scalar_style_t // The scalar style.
+	}
+
+	// The sequence parameters (for YAML_SEQUENCE_NODE).
+	sequence struct {
+		items_data []yaml_node_item_t    // The stack of sequence items.
+		style      yaml_sequence_style_t // The sequence style.
+	}
+
+	// The mapping parameters (for yaml_MAPPING_NODE).
+	mapping struct {
+		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
+		pairs_start *yaml_node_pair_t    // The beginning of the stack.
+		pairs_end   *yaml_node_pair_t    // The end of the stack.
+		pairs_top   *yaml_node_pair_t    // The top of the stack.
+		style       yaml_mapping_style_t // The mapping style.
+	}
+
+	start_mark yaml_mark_t // The beginning of the node.
+	end_mark   yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+	// The document nodes.
+	nodes []yaml_node_t
+
+	// The version directive.
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives.
+	tag_directives_data  []yaml_tag_directive_t
+	tag_directives_start int // The beginning of the tag directives list.
+	tag_directives_end   int // The end of the tag directives list.
+
+	start_implicit int // Is the document start indicator implicit?
+	end_implicit   int // Is the document end indicator implicit?
+
+	// The start/end of the document.
+	start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out]   data        A pointer to an application data specified by
+//                        yaml_parser_set_input().
+// [out]      buffer      The buffer to write the data from the source.
+// [in]       size        The size of the buffer.
+// [out]      size_read   The actual number of bytes read from the source.
+//
+// On success, the handler should return 1.  If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+	possible     bool        // Is a simple key possible?
+	required     bool        // Is a simple key required?
+	token_number int         // The number of the token.
+	mark         yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
+	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
+	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
+	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
+	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
+	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
+	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
+	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
+	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
+	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
+	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
+	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
+	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
+	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
+	yaml_PARSE_END_STATE                               // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+	switch ps {
+	case yaml_PARSE_STREAM_START_STATE:
+		return "yaml_PARSE_STREAM_START_STATE"
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return "yaml_PARSE_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return "yaml_PARSE_DOCUMENT_END_STATE"
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_STATE"
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return "yaml_PARSE_FLOW_NODE_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+	case yaml_PARSE_END_STATE:
+		return "yaml_PARSE_END_STATE"
+	}
+	return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+	anchor []byte      // The anchor.
+	index  int         // The node id.
+	mark   yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+	// Error handling
+
+	error yaml_error_type_t // Error type.
+
+	problem string // Error description.
+
+	// The byte about which the problem occurred.
+	problem_offset int
+	problem_value  int
+	problem_mark   yaml_mark_t
+
+	// The error context.
+	context      string
+	context_mark yaml_mark_t
+
+	// Reader stuff
+
+	read_handler yaml_read_handler_t // Read handler.
+
+	input_reader io.Reader // File input data.
+	input        []byte    // String input data.
+	input_pos    int
+
+	eof bool // EOF flag
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	unread int // The number of unread characters in the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The input encoding.
+
+	offset int         // The offset of the current position (in bytes).
+	mark   yaml_mark_t // The mark of the current position.
+
+	// Scanner stuff
+
+	stream_start_produced bool // Have we started to scan the input stream?
+	stream_end_produced   bool // Have we reached the end of the input stream?
+
+	flow_level int // The number of unclosed '[' and '{' indicators.
+
+	tokens          []yaml_token_t // The tokens queue.
+	tokens_head     int            // The head of the tokens queue.
+	tokens_parsed   int            // The number of tokens fetched from the queue.
+	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
+
+	indent  int   // The current indentation level.
+	indents []int // The indentation levels stack.
+
+	simple_key_allowed bool                // May a simple key occur at the current position?
+	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+
+	// Parser stuff
+
+	state          yaml_parser_state_t    // The current parser state.
+	states         []yaml_parser_state_t  // The parser states stack.
+	marks          []yaml_mark_t          // The stack of marks.
+	tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+	// Dumper stuff
+
+	aliases []yaml_alias_data_t // The alias data.
+
+	document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output.  The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out]   data        A pointer to an application data specified by
+//                              yaml_emitter_set_output().
+// @param[in]       buffer      The buffer with bytes to be written.
+// @param[in]       size        The size of the buffer.
+//
+// @returns On success, the handler should return @c 1.  If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+	// Expect STREAM-START.
+	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
+	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
+	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
+	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
+	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
+	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
+	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
+	yaml_EMIT_END_STATE                        // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal.  Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+	// Error handling
+
+	error   yaml_error_type_t // Error type.
+	problem string            // Error description.
+
+	// Writer stuff
+
+	write_handler yaml_write_handler_t // Write handler.
+
+	output_buffer *[]byte   // String output data.
+	output_writer io.Writer // File output data.
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The stream encoding.
+
+	// Emitter stuff
+
+	canonical   bool         // If the output is in the canonical style?
+	best_indent int          // The number of indentation spaces.
+	best_width  int          // The preferred width of the output lines.
+	unicode     bool         // Allow unescaped non-ASCII characters?
+	line_break  yaml_break_t // The preferred line break.
+
+	state  yaml_emitter_state_t   // The current emitter state.
+	states []yaml_emitter_state_t // The stack of states.
+
+	events      []yaml_event_t // The event queue.
+	events_head int            // The head of the event queue.
+
+	indents []int // The stack of indentation levels.
+
+	tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+	indent int // The current indentation level.
+
+	flow_level int // The current flow level.
+
+	root_context       bool // Is it the document root context?
+	sequence_context   bool // Is it a sequence context?
+	mapping_context    bool // Is it a mapping context?
+	simple_key_context bool // Is it a simple mapping key context?
+
+	line       int  // The current line.
+	column     int  // The current column.
+	whitespace bool // If the last character was a whitespace?
+	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
+	open_ended bool // If an explicit document end is required?
+
+	// Anchor analysis.
+	anchor_data struct {
+		anchor []byte // The anchor value.
+		alias  bool   // Is it an alias?
+	}
+
+	// Tag analysis.
+	tag_data struct {
+		handle []byte // The tag handle.
+		suffix []byte // The tag suffix.
+	}
+
+	// Scalar analysis.
+	scalar_data struct {
+		value                 []byte              // The scalar value.
+		multiline             bool                // Does the scalar contain line breaks?
+		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
+		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
+		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
+		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
+		style                 yaml_scalar_style_t // The output style.
+	}
+
+	// Dumper stuff
+
+	opened bool // If the stream was already opened?
+	closed bool // If the stream was already closed?
+
+	// The information associated with the document nodes.
+	anchors *struct {
+		references int  // The number of references.
+		anchor     int  // The anchor id.
+		serialized bool // If the node has been emitted?
+	}
+
+	last_anchor_id int // The last assigned anchor id.
+
+	document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 000000000..8110ce3c3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+	// The size of the input raw buffer.
+	input_raw_buffer_size = 512
+
+	// The size of the input buffer.
+	// It should be possible to decode the whole raw buffer.
+	input_buffer_size = input_raw_buffer_size * 3
+
+	// The size of the output buffer.
+	output_buffer_size = 128
+
+	// The size of the output raw buffer.
+	// It should be possible to encode the whole output buffer.
+	output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+	// The size of other stacks and queues.
+	initial_stack_size  = 16
+	initial_queue_size  = 16
+	initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+	return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+	bi := b[i]
+	if bi >= 'A' && bi <= 'F' {
+		return int(bi) - 'A' + 10
+	}
+	if bi >= 'a' && bi <= 'f' {
+		return int(bi) - 'a' + 10
+	}
+	return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+	return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+	return ((b[i] == 0x0A) || // . == #x0A
+		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+		(b[i] > 0xC2 && b[i] < 0xED) ||
+		(b[i] == 0xED && b[i+1] < 0xA0) ||
+		(b[i] == 0xEE) ||
+		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+	return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+	return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+	return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+	//return is_space(b, i) || is_tab(b, i)
+	return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+	return (b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+	return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+	//return is_break(b, i) || is_z(b, i)
+	return (        // is_break:
+	b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		// is_z:
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+	//return is_space(b, i) || is_breakz(b, i)
+	return ( // is_space:
+	b[i] == ' ' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+	//return is_blank(b, i) || is_breakz(b, i)
+	return ( // is_blank:
+	b[i] == ' ' || b[i] == '\t' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+	// Don't replace these by a switch without first
+	// confirming that it is being inlined.
+	if b&0x80 == 0x00 {
+		return 1
+	}
+	if b&0xE0 == 0xC0 {
+		return 2
+	}
+	if b&0xF0 == 0xE0 {
+		return 3
+	}
+	if b&0xF8 == 0xF0 {
+		return 4
+	}
+	return 0
+
+}