1
0
mirror of https://github.com/jesseduffield/lazygit.git synced 2025-02-01 13:17:53 +02:00

bump dependencies

This commit is contained in:
Jesse Duffield 2018-08-09 14:41:58 +10:00
parent ace8544512
commit dbf65a422a
377 changed files with 98351 additions and 2 deletions

204
Gopkg.lock generated
View File

@ -1,6 +1,21 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:de4a74b504df31145ffa8ca0c4edbffa2f3eb7f466753962184611b618fa5981"
name = "github.com/emirpasic/gods"
packages = [
"containers",
"lists",
"lists/arraylist",
"trees",
"trees/binaryheap",
"utils",
]
pruneopts = "NUT"
revision = "f6c17b524822278a87e3b3bd809fec33b51f5b46"
version = "v1.9.0"
[[projects]]
digest = "1:ade392a843b2035effb4b4a2efa2c3bab3eb29b992e98bacf9c898b0ecb54e45"
name = "github.com/fatih/color"
@ -17,6 +32,14 @@
pruneopts = "NUT"
revision = "604e922904d35e97f98a774db7881f049cd8d970"
[[projects]]
branch = "master"
digest = "1:62fe3a7ea2050ecbd753a71889026f83d73329337ada66325cbafd5dea5f713d"
name = "github.com/jbenet/go-context"
packages = ["io"]
pruneopts = "NUT"
revision = "d14ea06fba99483203c19d92cfcd13ebe73135f4"
[[projects]]
branch = "master"
digest = "1:82b3bbc50ba7b6065b0229ebf0fc990a76e47410acd510294e435aeb3523293e"
@ -25,6 +48,14 @@
pruneopts = "NUT"
revision = "3c923f53ac9952af649af04a067405843558d56f"
[[projects]]
digest = "1:8021af4dcbd531ae89433c8c3a6520e51064114aaf8eb1724c3cf911c497c9ba"
name = "github.com/kevinburke/ssh_config"
packages = ["."]
pruneopts = "NUT"
revision = "9fc7bb800b555d63157c65a904c86a2cc7b4e795"
version = "0.4"
[[projects]]
digest = "1:08c231ec84231a7e23d67e4b58f975e1423695a32467a362ee55a803f9de8061"
name = "github.com/mattn/go-colorable"
@ -49,6 +80,14 @@
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
digest = "1:a4df73029d2c42fabcb6b41e327d2f87e685284ec03edf76921c267d9cfc9c23"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = "NUT"
revision = "58046073cbffe2f25d425fe1331102f55cf719de"
[[projects]]
branch = "master"
digest = "1:34d9354c2c5d916c05864327553047df59fc10e86ff1f408e4136eba0a25a5ec"
@ -57,6 +96,35 @@
pruneopts = "NUT"
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
[[projects]]
digest = "1:cf254277d898b713195cc6b4a3fac8bf738b9f1121625df27843b52b267eec6c"
name = "github.com/pelletier/go-buffruneio"
packages = ["."]
pruneopts = "NUT"
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
version = "v0.2.0"
[[projects]]
digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04"
name = "github.com/sergi/go-diff"
packages = ["diffmatchpatch"]
pruneopts = "NUT"
revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
version = "v1.0.0"
[[projects]]
digest = "1:ccca1dcd18bc54e23b517a3c5babeff2e3924a7d8fc1932162225876cfe4bfb0"
name = "github.com/src-d/gcfg"
packages = [
".",
"scanner",
"token",
"types",
]
pruneopts = "NUT"
revision = "f187355171c936ac84a82793659ebb4936bc1c23"
version = "v1.3.0"
[[projects]]
digest = "1:cd5ffc5bda4e0296ab3e4de90dbb415259c78e45e7fab13694b14cde8ab74541"
name = "github.com/tcnksm/go-gitconfig"
@ -65,14 +133,144 @@
revision = "d154598bacbf4501c095a309753c5d4af66caa81"
version = "v0.1.2"
[[projects]]
digest = "1:3148cb3478c26a92b4c1a18abb9428234b281e278af6267840721a24b6cbc6a3"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
pruneopts = "NUT"
revision = "640f0ab560aeb89d523bb6ac322b1244d5c3796c"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:4d8a79fbc6fa348fc94afa4235947c5196b7900ed71b94aa5fcbc7e273d150e1"
digest = "1:c76f8b24a4d9b99b502fb7b61ad769125075cb570efff9b9b73e6c428629532d"
name = "golang.org/x/crypto"
packages = [
"cast5",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"internal/subtle",
"openpgp",
"openpgp/armor",
"openpgp/elgamal",
"openpgp/errors",
"openpgp/packet",
"openpgp/s2k",
"poly1305",
"ssh",
"ssh/agent",
"ssh/knownhosts",
]
pruneopts = "NUT"
revision = "de0752318171da717af4ce24d0a2e8626afaeb11"
[[projects]]
branch = "master"
digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70"
name = "golang.org/x/net"
packages = ["context"]
pruneopts = "NUT"
revision = "f9ce57c11b242f0f1599cf25c89d8cb02c45295a"
[[projects]]
branch = "master"
digest = "1:de22999d08e1604a7c3186ef64b5a18a90e399b99c4e8ff2a8d6c6abf42b7934"
name = "golang.org/x/sys"
packages = ["unix"]
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "0ffbfd41fbef8ffcf9b62b0b0aa3a5873ed7a4fe"
[[projects]]
digest = "1:8029e9743749d4be5bc9f7d42ea1659471767860f0cdc34d37c3111bd308a295"
name = "golang.org/x/text"
packages = [
"internal/gen",
"internal/triegen",
"internal/ucd",
"transform",
"unicode/cldr",
"unicode/norm",
]
pruneopts = "NUT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
digest = "1:47a697b155f5214ff14e68e39ce9c2e8d93e1fb035ae5ba7e247d044e0ce64e3"
name = "gopkg.in/src-d/go-billy.v4"
packages = [
".",
"helper/chroot",
"helper/polyfill",
"osfs",
"util",
]
pruneopts = "NUT"
revision = "83cf655d40b15b427014d7875d10850f96edba14"
version = "v4.2.0"
[[projects]]
digest = "1:fa265385a5175b4aaed0cccec60059a3f363c3f209bba1b658e9a12baddbc5e0"
name = "gopkg.in/src-d/go-git.v4"
packages = [
".",
"config",
"internal/revision",
"plumbing",
"plumbing/cache",
"plumbing/filemode",
"plumbing/format/config",
"plumbing/format/diff",
"plumbing/format/gitignore",
"plumbing/format/idxfile",
"plumbing/format/index",
"plumbing/format/objfile",
"plumbing/format/packfile",
"plumbing/format/pktline",
"plumbing/object",
"plumbing/protocol/packp",
"plumbing/protocol/packp/capability",
"plumbing/protocol/packp/sideband",
"plumbing/revlist",
"plumbing/storer",
"plumbing/transport",
"plumbing/transport/client",
"plumbing/transport/file",
"plumbing/transport/git",
"plumbing/transport/http",
"plumbing/transport/internal/common",
"plumbing/transport/server",
"plumbing/transport/ssh",
"storage",
"storage/filesystem",
"storage/filesystem/dotgit",
"storage/memory",
"utils/binary",
"utils/diff",
"utils/ioutil",
"utils/merkletrie",
"utils/merkletrie/filesystem",
"utils/merkletrie/index",
"utils/merkletrie/internal/frame",
"utils/merkletrie/noder",
]
pruneopts = "NUT"
revision = "3bd5e82b2512d85becae9677fa06b5a973fd4cfb"
version = "v4.5.0"
[[projects]]
digest = "1:b233ad4ec87ac916e7bf5e678e98a2cb9e8b52f6de6ad3e11834fc7a71b8e3bf"
name = "gopkg.in/warnings.v0"
packages = ["."]
pruneopts = "NUT"
revision = "ec4a0fea49c7b46c2aeb0b51aac55779c607e52b"
version = "v0.1.2"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
@ -81,6 +279,8 @@
"github.com/golang-collections/collections/stack",
"github.com/jesseduffield/gocui",
"github.com/tcnksm/go-gitconfig",
"gopkg.in/src-d/go-git.v4",
"gopkg.in/src-d/go-git.v4/plumbing/object",
]
solver-name = "gps-cdcl"
solver-version = 1

41
vendor/github.com/emirpasic/gods/LICENSE generated vendored Normal file
View File

@ -0,0 +1,41 @@
Copyright (c) 2015, Emir Pasic
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-------------------------------------------------------------------------------
AVL Tree:
Copyright (c) 2017 Benjamin Scher Purcell <benjapurcell@gmail.com>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@ -0,0 +1,35 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package containers provides core interfaces and functions for data structures.
//
// Container is the base interface for all data structures to implement.
//
// Iterators provide stateful iterators.
//
// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions.
//
// Serialization provides serializers (marshalers) and deserializers (unmarshalers).
package containers
import "github.com/emirpasic/gods/utils"
// Container is base interface that all data structures implement.
type Container interface {
Empty() bool
Size() int
Clear()
Values() []interface{}
}
// GetSortedValues returns sorted container's elements with respect to the passed comparator.
// Does not effect the ordering of elements within the container.
func GetSortedValues(container Container, comparator utils.Comparator) []interface{} {
values := container.Values()
if len(values) < 2 {
return values
}
utils.Sort(values, comparator)
return values
}

View File

@ -0,0 +1,61 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index.
type EnumerableWithIndex interface {
// Each calls the given function once for each element, passing that element's index and value.
Each(func(index int, value interface{}))
// Map invokes the given function once for each element and returns a
// container containing the values returned by the given function.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Map(func(index int, value interface{}) interface{}) Container
// Select returns a new container containing all elements for which the given function returns a true value.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Select(func(index int, value interface{}) bool) Container
// Any passes each element of the container to the given function and
// returns true if the function ever returns true for any element.
Any(func(index int, value interface{}) bool) bool
// All passes each element of the container to the given function and
// returns true if the function returns true for all elements.
All(func(index int, value interface{}) bool) bool
// Find passes each element of the container to the given function and returns
// the first (index,value) for which the function is true or -1,nil otherwise
// if no element matches the criteria.
Find(func(index int, value interface{}) bool) (int, interface{})
}
// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs.
type EnumerableWithKey interface {
// Each calls the given function once for each element, passing that element's key and value.
Each(func(key interface{}, value interface{}))
// Map invokes the given function once for each element and returns a container
// containing the values returned by the given function as key/value pairs.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container
// Select returns a new container containing all elements for which the given function returns a true value.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Select(func(key interface{}, value interface{}) bool) Container
// Any passes each element of the container to the given function and
// returns true if the function ever returns true for any element.
Any(func(key interface{}, value interface{}) bool) bool
// All passes each element of the container to the given function and
// returns true if the function returns true for all elements.
All(func(key interface{}, value interface{}) bool) bool
// Find passes each element of the container to the given function and returns
// the first (key,value) for which the function is true or nil,nil otherwise if no element
// matches the criteria.
Find(func(key interface{}, value interface{}) bool) (interface{}, interface{})
}

109
vendor/github.com/emirpasic/gods/containers/iterator.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
type IteratorWithIndex interface {
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
Next() bool
// Value returns the current element's value.
// Does not modify the state of the iterator.
Value() interface{}
// Index returns the current element's index.
// Does not modify the state of the iterator.
Index() int
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
Begin()
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
First() bool
}
// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
type IteratorWithKey interface {
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
Next() bool
// Value returns the current element's value.
// Does not modify the state of the iterator.
Value() interface{}
// Key returns the current element's key.
// Does not modify the state of the iterator.
Key() interface{}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
Begin()
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
First() bool
}
// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
//
// Essentially it is the same as IteratorWithIndex, but provides additional:
//
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
//
// End() function to move the iterator past the last element (one-past-the-end).
type ReverseIteratorWithIndex interface {
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
Prev() bool
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
End()
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
Last() bool
IteratorWithIndex
}
// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
//
// Essentially it is the same as IteratorWithKey, but provides additional:
//
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
type ReverseIteratorWithKey interface {
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
Prev() bool
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
End()
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
Last() bool
IteratorWithKey
}

View File

@ -0,0 +1,17 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// JSONSerializer provides JSON serialization
type JSONSerializer interface {
// ToJSON outputs the JSON representation of containers's elements.
ToJSON() ([]byte, error)
}
// JSONDeserializer provides JSON deserialization
type JSONDeserializer interface {
// FromJSON populates containers's elements from the input JSON representation.
FromJSON([]byte) error
}

View File

@ -0,0 +1,200 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package arraylist implements the array list.
//
// Structure is not thread safe.
//
// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
package arraylist
import (
"fmt"
"github.com/emirpasic/gods/lists"
"github.com/emirpasic/gods/utils"
"strings"
)
func assertListImplementation() {
var _ lists.List = (*List)(nil)
}
// List holds the elements in a slice
type List struct {
elements []interface{}
size int
}
const (
growthFactor = float32(2.0) // growth by 100%
shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink)
)
// New instantiates a new empty list
func New() *List {
return &List{}
}
// Add appends a value at the end of the list
func (list *List) Add(values ...interface{}) {
list.growBy(len(values))
for _, value := range values {
list.elements[list.size] = value
list.size++
}
}
// Get returns the element at index.
// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false.
func (list *List) Get(index int) (interface{}, bool) {
if !list.withinRange(index) {
return nil, false
}
return list.elements[index], true
}
// Remove removes one or more elements from the list with the supplied indices.
func (list *List) Remove(index int) {
if !list.withinRange(index) {
return
}
list.elements[index] = nil // cleanup reference
copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this)
list.size--
list.shrink()
}
// Contains checks if elements (one or more) are present in the set.
// All elements have to be present in the set for the method to return true.
// Performance time complexity of n^2.
// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set.
func (list *List) Contains(values ...interface{}) bool {
for _, searchValue := range values {
found := false
for _, element := range list.elements {
if element == searchValue {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Values returns all elements in the list.
func (list *List) Values() []interface{} {
newElements := make([]interface{}, list.size, list.size)
copy(newElements, list.elements[:list.size])
return newElements
}
// Empty returns true if list does not contain any elements.
func (list *List) Empty() bool {
return list.size == 0
}
// Size returns number of elements within the list.
func (list *List) Size() int {
return list.size
}
// Clear removes all elements from the list.
func (list *List) Clear() {
list.size = 0
list.elements = []interface{}{}
}
// Sort sorts values (in-place) using.
func (list *List) Sort(comparator utils.Comparator) {
if len(list.elements) < 2 {
return
}
utils.Sort(list.elements[:list.size], comparator)
}
// Swap swaps the two values at the specified positions.
func (list *List) Swap(i, j int) {
if list.withinRange(i) && list.withinRange(j) {
list.elements[i], list.elements[j] = list.elements[j], list.elements[i]
}
}
// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right.
// Does not do anything if position is negative or bigger than list's size
// Note: position equal to list's size is valid, i.e. append.
func (list *List) Insert(index int, values ...interface{}) {
if !list.withinRange(index) {
// Append
if index == list.size {
list.Add(values...)
}
return
}
l := len(values)
list.growBy(l)
list.size += l
// Shift old to right
for i := list.size - 1; i >= index+l; i-- {
list.elements[i] = list.elements[i-l]
}
// Insert new
for i, value := range values {
list.elements[index+i] = value
}
}
// String returns a string representation of container
func (list *List) String() string {
str := "ArrayList\n"
values := []string{}
for _, value := range list.elements[:list.size] {
values = append(values, fmt.Sprintf("%v", value))
}
str += strings.Join(values, ", ")
return str
}
// Check that the index is within bounds of the list
func (list *List) withinRange(index int) bool {
return index >= 0 && index < list.size
}
func (list *List) resize(cap int) {
newElements := make([]interface{}, cap, cap)
copy(newElements, list.elements)
list.elements = newElements
}
// Expand the array if necessary, i.e. capacity will be reached if we add n elements
func (list *List) growBy(n int) {
// When capacity is reached, grow by a factor of growthFactor and add number of elements
currentCapacity := cap(list.elements)
if list.size+n >= currentCapacity {
newCapacity := int(growthFactor * float32(currentCapacity+n))
list.resize(newCapacity)
}
}
// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity
func (list *List) shrink() {
if shrinkFactor == 0.0 {
return
}
// Shrink when size is at shrinkFactor * capacity
currentCapacity := cap(list.elements)
if list.size <= int(float32(currentCapacity)*shrinkFactor) {
list.resize(list.size)
}
}

View File

@ -0,0 +1,79 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import "github.com/emirpasic/gods/containers"
func assertEnumerableImplementation() {
var _ containers.EnumerableWithIndex = (*List)(nil)
}
// Each calls the given function once for each element, passing that element's index and value.
func (list *List) Each(f func(index int, value interface{})) {
iterator := list.Iterator()
for iterator.Next() {
f(iterator.Index(), iterator.Value())
}
}
// Map invokes the given function once for each element and returns a
// container containing the values returned by the given function.
func (list *List) Map(f func(index int, value interface{}) interface{}) *List {
newList := &List{}
iterator := list.Iterator()
for iterator.Next() {
newList.Add(f(iterator.Index(), iterator.Value()))
}
return newList
}
// Select returns a new container containing all elements for which the given function returns a true value.
func (list *List) Select(f func(index int, value interface{}) bool) *List {
newList := &List{}
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
newList.Add(iterator.Value())
}
}
return newList
}
// Any passes each element of the collection to the given function and
// returns true if the function ever returns true for any element.
func (list *List) Any(f func(index int, value interface{}) bool) bool {
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
return true
}
}
return false
}
// All passes each element of the collection to the given function and
// returns true if the function returns true for all elements.
func (list *List) All(f func(index int, value interface{}) bool) bool {
iterator := list.Iterator()
for iterator.Next() {
if !f(iterator.Index(), iterator.Value()) {
return false
}
}
return true
}
// Find passes each element of the container to the given function and returns
// the first (index,value) for which the function is true or -1,nil otherwise
// if no element matches the criteria.
func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) {
iterator := list.Iterator()
for iterator.Next() {
if f(iterator.Index(), iterator.Value()) {
return iterator.Index(), iterator.Value()
}
}
return -1, nil
}

View File

@ -0,0 +1,83 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import "github.com/emirpasic/gods/containers"
func assertIteratorImplementation() {
var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
}
// Iterator holding the iterator's state
type Iterator struct {
list *List
index int
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
func (list *List) Iterator() Iterator {
return Iterator{list: list, index: -1}
}
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
func (iterator *Iterator) Next() bool {
if iterator.index < iterator.list.size {
iterator.index++
}
return iterator.list.withinRange(iterator.index)
}
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Prev() bool {
if iterator.index >= 0 {
iterator.index--
}
return iterator.list.withinRange(iterator.index)
}
// Value returns the current element's value.
// Does not modify the state of the iterator.
func (iterator *Iterator) Value() interface{} {
return iterator.list.elements[iterator.index]
}
// Index returns the current element's index.
// Does not modify the state of the iterator.
func (iterator *Iterator) Index() int {
return iterator.index
}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
func (iterator *Iterator) Begin() {
iterator.index = -1
}
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
func (iterator *Iterator) End() {
iterator.index = iterator.list.size
}
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) First() bool {
iterator.Begin()
return iterator.Next()
}
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Last() bool {
iterator.End()
return iterator.Prev()
}

View File

@ -0,0 +1,29 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package arraylist
import (
"encoding/json"
"github.com/emirpasic/gods/containers"
)
func assertSerializationImplementation() {
var _ containers.JSONSerializer = (*List)(nil)
var _ containers.JSONDeserializer = (*List)(nil)
}
// ToJSON outputs the JSON representation of list's elements.
func (list *List) ToJSON() ([]byte, error) {
return json.Marshal(list.elements[:list.size])
}
// FromJSON populates list's elements from the input JSON representation.
func (list *List) FromJSON(data []byte) error {
err := json.Unmarshal(data, &list.elements)
if err == nil {
list.size = len(list.elements)
}
return err
}

32
vendor/github.com/emirpasic/gods/lists/lists.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lists provides an abstract List interface.
//
// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item.
//
// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29
package lists
import (
"github.com/emirpasic/gods/containers"
"github.com/emirpasic/gods/utils"
)
// List interface that all lists implement
type List interface {
Get(index int) (interface{}, bool)
Remove(index int)
Add(values ...interface{})
Contains(values ...interface{}) bool
Sort(comparator utils.Comparator)
Swap(index1, index2 int)
Insert(index int, values ...interface{})
containers.Container
// Empty() bool
// Size() int
// Clear()
// Values() []interface{}
}

View File

@ -0,0 +1,163 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package binaryheap implements a binary heap backed by array list.
//
// Comparator defines this heap as either min or max heap.
//
// Structure is not thread safe.
//
// References: http://en.wikipedia.org/wiki/Binary_heap
package binaryheap
import (
"fmt"
"github.com/emirpasic/gods/lists/arraylist"
"github.com/emirpasic/gods/trees"
"github.com/emirpasic/gods/utils"
"strings"
)
func assertTreeImplementation() {
var _ trees.Tree = (*Heap)(nil)
}
// Heap holds elements in an array-list
type Heap struct {
list *arraylist.List
Comparator utils.Comparator
}
// NewWith instantiates a new empty heap tree with the custom comparator.
func NewWith(comparator utils.Comparator) *Heap {
return &Heap{list: arraylist.New(), Comparator: comparator}
}
// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int.
func NewWithIntComparator() *Heap {
return &Heap{list: arraylist.New(), Comparator: utils.IntComparator}
}
// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string.
func NewWithStringComparator() *Heap {
return &Heap{list: arraylist.New(), Comparator: utils.StringComparator}
}
// Push adds a value onto the heap and bubbles it up accordingly.
func (heap *Heap) Push(values ...interface{}) {
if len(values) == 1 {
heap.list.Add(values[0])
heap.bubbleUp()
} else {
// Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap
for _, value := range values {
heap.list.Add(value)
}
size := heap.list.Size()/2 + 1
for i := size; i >= 0; i-- {
heap.bubbleDownIndex(i)
}
}
}
// Pop removes top element on heap and returns it, or nil if heap is empty.
// Second return parameter is true, unless the heap was empty and there was nothing to pop.
func (heap *Heap) Pop() (value interface{}, ok bool) {
value, ok = heap.list.Get(0)
if !ok {
return
}
lastIndex := heap.list.Size() - 1
heap.list.Swap(0, lastIndex)
heap.list.Remove(lastIndex)
heap.bubbleDown()
return
}
// Peek returns top element on the heap without removing it, or nil if heap is empty.
// Second return parameter is true, unless the heap was empty and there was nothing to peek.
func (heap *Heap) Peek() (value interface{}, ok bool) {
return heap.list.Get(0)
}
// Empty returns true if heap does not contain any elements.
func (heap *Heap) Empty() bool {
return heap.list.Empty()
}
// Size returns number of elements within the heap.
func (heap *Heap) Size() int {
return heap.list.Size()
}
// Clear removes all elements from the heap.
func (heap *Heap) Clear() {
heap.list.Clear()
}
// Values returns all elements in the heap.
func (heap *Heap) Values() []interface{} {
return heap.list.Values()
}
// String returns a string representation of container
func (heap *Heap) String() string {
str := "BinaryHeap\n"
values := []string{}
for _, value := range heap.list.Values() {
values = append(values, fmt.Sprintf("%v", value))
}
str += strings.Join(values, ", ")
return str
}
// Performs the "bubble down" operation. This is to place the element that is at the root
// of the heap in its correct place so that the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleDown() {
heap.bubbleDownIndex(0)
}
// Performs the "bubble down" operation. This is to place the element that is at the index
// of the heap in its correct place so that the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleDownIndex(index int) {
size := heap.list.Size()
for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 {
rightIndex := index<<1 + 2
smallerIndex := leftIndex
leftValue, _ := heap.list.Get(leftIndex)
rightValue, _ := heap.list.Get(rightIndex)
if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 {
smallerIndex = rightIndex
}
indexValue, _ := heap.list.Get(index)
smallerValue, _ := heap.list.Get(smallerIndex)
if heap.Comparator(indexValue, smallerValue) > 0 {
heap.list.Swap(index, smallerIndex)
} else {
break
}
index = smallerIndex
}
}
// Performs the "bubble up" operation. This is to place a newly inserted
// element (i.e. last element in the list) in its correct place so that
// the heap maintains the min/max-heap order property.
func (heap *Heap) bubbleUp() {
index := heap.list.Size() - 1
for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 {
indexValue, _ := heap.list.Get(index)
parentValue, _ := heap.list.Get(parentIndex)
if heap.Comparator(parentValue, indexValue) <= 0 {
break
}
heap.list.Swap(index, parentIndex)
index = parentIndex
}
}
// Check that the index is within bounds of the list
func (heap *Heap) withinRange(index int) bool {
return index >= 0 && index < heap.list.Size()
}

View File

@ -0,0 +1,84 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binaryheap
import "github.com/emirpasic/gods/containers"
func assertIteratorImplementation() {
var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil)
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
type Iterator struct {
heap *Heap
index int
}
// Iterator returns a stateful iterator whose values can be fetched by an index.
func (heap *Heap) Iterator() Iterator {
return Iterator{heap: heap, index: -1}
}
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
func (iterator *Iterator) Next() bool {
if iterator.index < iterator.heap.Size() {
iterator.index++
}
return iterator.heap.withinRange(iterator.index)
}
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Prev() bool {
if iterator.index >= 0 {
iterator.index--
}
return iterator.heap.withinRange(iterator.index)
}
// Value returns the current element's value.
// Does not modify the state of the iterator.
func (iterator *Iterator) Value() interface{} {
value, _ := iterator.heap.list.Get(iterator.index)
return value
}
// Index returns the current element's index.
// Does not modify the state of the iterator.
func (iterator *Iterator) Index() int {
return iterator.index
}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
func (iterator *Iterator) Begin() {
iterator.index = -1
}
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
func (iterator *Iterator) End() {
iterator.index = iterator.heap.Size()
}
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) First() bool {
iterator.Begin()
return iterator.Next()
}
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Last() bool {
iterator.End()
return iterator.Prev()
}

View File

@ -0,0 +1,22 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package binaryheap
import "github.com/emirpasic/gods/containers"
func assertSerializationImplementation() {
var _ containers.JSONSerializer = (*Heap)(nil)
var _ containers.JSONDeserializer = (*Heap)(nil)
}
// ToJSON outputs the JSON representation of list's elements.
func (heap *Heap) ToJSON() ([]byte, error) {
return heap.list.ToJSON()
}
// FromJSON populates list's elements from the input JSON representation.
func (heap *Heap) FromJSON(data []byte) error {
return heap.list.FromJSON(data)
}

21
vendor/github.com/emirpasic/gods/trees/trees.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package trees provides an abstract Tree interface.
//
// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes.
//
// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29
package trees
import "github.com/emirpasic/gods/containers"
// Tree interface that all trees implement
type Tree interface {
containers.Container
// Empty() bool
// Size() int
// Clear()
// Values() []interface{}
}

251
vendor/github.com/emirpasic/gods/utils/comparator.go generated vendored Normal file
View File

@ -0,0 +1,251 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import "time"
// Comparator will make type assertion (see IntComparator for example),
// which will panic if a or b are not of the asserted type.
//
// Should return a number:
// negative , if a < b
// zero , if a == b
// positive , if a > b
type Comparator func(a, b interface{}) int
// StringComparator provides a fast comparison on strings
func StringComparator(a, b interface{}) int {
s1 := a.(string)
s2 := b.(string)
min := len(s2)
if len(s1) < len(s2) {
min = len(s1)
}
diff := 0
for i := 0; i < min && diff == 0; i++ {
diff = int(s1[i]) - int(s2[i])
}
if diff == 0 {
diff = len(s1) - len(s2)
}
if diff < 0 {
return -1
}
if diff > 0 {
return 1
}
return 0
}
// IntComparator provides a basic comparison on int
func IntComparator(a, b interface{}) int {
aAsserted := a.(int)
bAsserted := b.(int)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int8Comparator provides a basic comparison on int8
func Int8Comparator(a, b interface{}) int {
aAsserted := a.(int8)
bAsserted := b.(int8)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int16Comparator provides a basic comparison on int16
func Int16Comparator(a, b interface{}) int {
aAsserted := a.(int16)
bAsserted := b.(int16)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int32Comparator provides a basic comparison on int32
func Int32Comparator(a, b interface{}) int {
aAsserted := a.(int32)
bAsserted := b.(int32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int64Comparator provides a basic comparison on int64
func Int64Comparator(a, b interface{}) int {
aAsserted := a.(int64)
bAsserted := b.(int64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UIntComparator provides a basic comparison on uint
func UIntComparator(a, b interface{}) int {
aAsserted := a.(uint)
bAsserted := b.(uint)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt8Comparator provides a basic comparison on uint8
func UInt8Comparator(a, b interface{}) int {
aAsserted := a.(uint8)
bAsserted := b.(uint8)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt16Comparator provides a basic comparison on uint16
func UInt16Comparator(a, b interface{}) int {
aAsserted := a.(uint16)
bAsserted := b.(uint16)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt32Comparator provides a basic comparison on uint32
func UInt32Comparator(a, b interface{}) int {
aAsserted := a.(uint32)
bAsserted := b.(uint32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt64Comparator provides a basic comparison on uint64
func UInt64Comparator(a, b interface{}) int {
aAsserted := a.(uint64)
bAsserted := b.(uint64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Float32Comparator provides a basic comparison on float32
func Float32Comparator(a, b interface{}) int {
aAsserted := a.(float32)
bAsserted := b.(float32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Float64Comparator provides a basic comparison on float64
func Float64Comparator(a, b interface{}) int {
aAsserted := a.(float64)
bAsserted := b.(float64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// ByteComparator provides a basic comparison on byte
func ByteComparator(a, b interface{}) int {
aAsserted := a.(byte)
bAsserted := b.(byte)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// RuneComparator provides a basic comparison on rune
func RuneComparator(a, b interface{}) int {
aAsserted := a.(rune)
bAsserted := b.(rune)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// TimeComparator provides a basic comparison on time.Time
func TimeComparator(a, b interface{}) int {
aAsserted := a.(time.Time)
bAsserted := b.(time.Time)
switch {
case aAsserted.After(bAsserted):
return 1
case aAsserted.Before(bAsserted):
return -1
default:
return 0
}
}

29
vendor/github.com/emirpasic/gods/utils/sort.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import "sort"
// Sort sorts values (in-place) with respect to the given comparator.
//
// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices).
func Sort(values []interface{}, comparator Comparator) {
sort.Sort(sortable{values, comparator})
}
type sortable struct {
values []interface{}
comparator Comparator
}
func (s sortable) Len() int {
return len(s.values)
}
func (s sortable) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
}
func (s sortable) Less(i, j int) bool {
return s.comparator(s.values[i], s.values[j]) < 0
}

47
vendor/github.com/emirpasic/gods/utils/utils.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utils provides common utility functions.
//
// Provided functionalities:
// - sorting
// - comparators
package utils
import (
"fmt"
"strconv"
)
// ToString converts a value to string.
func ToString(value interface{}) string {
switch value.(type) {
case string:
return value.(string)
case int8:
return strconv.FormatInt(int64(value.(int8)), 10)
case int16:
return strconv.FormatInt(int64(value.(int16)), 10)
case int32:
return strconv.FormatInt(int64(value.(int32)), 10)
case int64:
return strconv.FormatInt(int64(value.(int64)), 10)
case uint8:
return strconv.FormatUint(uint64(value.(uint8)), 10)
case uint16:
return strconv.FormatUint(uint64(value.(uint16)), 10)
case uint32:
return strconv.FormatUint(uint64(value.(uint32)), 10)
case uint64:
return strconv.FormatUint(uint64(value.(uint64)), 10)
case float32:
return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64)
case float64:
return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64)
case bool:
return strconv.FormatBool(value.(bool))
default:
return fmt.Sprintf("%+v", value)
}
}

21
vendor/github.com/jbenet/go-context/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

120
vendor/github.com/jbenet/go-context/io/ctxio.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
// Package ctxio provides io.Reader and io.Writer wrappers that
// respect context.Contexts. Use these at the interface between
// your context code and your io.
//
// WARNING: read the code. see how writes and reads will continue
// until you cancel the io. Maybe this package should provide
// versions of io.ReadCloser and io.WriteCloser that automatically
// call .Close when the context expires. But for now -- since in my
// use cases I have long-lived connections with ephemeral io wrappers
// -- this has yet to be a need.
package ctxio
import (
"io"
context "golang.org/x/net/context"
)
type ioret struct {
n int
err error
}
type Writer interface {
io.Writer
}
type ctxWriter struct {
w io.Writer
ctx context.Context
}
// NewWriter wraps a writer to make it respect given Context.
// If there is a blocking write, the returned Writer will return
// whenever the context is cancelled (the return values are n=0
// and err=ctx.Err().)
//
// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
// write-- there is no way to do that with the standard go io
// interface. So the read and write _will_ happen or hang. So, use
// this sparingly, make sure to cancel the read or write as necesary
// (e.g. closing a connection whose context is up, etc.)
//
// Furthermore, in order to protect your memory from being read
// _after_ you've cancelled the context, this io.Writer will
// first make a **copy** of the buffer.
func NewWriter(ctx context.Context, w io.Writer) *ctxWriter {
if ctx == nil {
ctx = context.Background()
}
return &ctxWriter{ctx: ctx, w: w}
}
func (w *ctxWriter) Write(buf []byte) (int, error) {
buf2 := make([]byte, len(buf))
copy(buf2, buf)
c := make(chan ioret, 1)
go func() {
n, err := w.w.Write(buf2)
c <- ioret{n, err}
close(c)
}()
select {
case r := <-c:
return r.n, r.err
case <-w.ctx.Done():
return 0, w.ctx.Err()
}
}
type Reader interface {
io.Reader
}
type ctxReader struct {
r io.Reader
ctx context.Context
}
// NewReader wraps a reader to make it respect given Context.
// If there is a blocking read, the returned Reader will return
// whenever the context is cancelled (the return values are n=0
// and err=ctx.Err().)
//
// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying
// write-- there is no way to do that with the standard go io
// interface. So the read and write _will_ happen or hang. So, use
// this sparingly, make sure to cancel the read or write as necesary
// (e.g. closing a connection whose context is up, etc.)
//
// Furthermore, in order to protect your memory from being read
// _before_ you've cancelled the context, this io.Reader will
// allocate a buffer of the same size, and **copy** into the client's
// if the read succeeds in time.
func NewReader(ctx context.Context, r io.Reader) *ctxReader {
return &ctxReader{ctx: ctx, r: r}
}
func (r *ctxReader) Read(buf []byte) (int, error) {
buf2 := make([]byte, len(buf))
c := make(chan ioret, 1)
go func() {
n, err := r.r.Read(buf2)
c <- ioret{n, err}
close(c)
}()
select {
case ret := <-c:
copy(buf, buf2)
return ret.n, ret.err
case <-r.ctx.Done():
return 0, r.ctx.Err()
}
}

3
vendor/github.com/kevinburke/ssh_config/AUTHORS.txt generated vendored Normal file
View File

@ -0,0 +1,3 @@
Eugene Terentev <eugene@terentev.net>
Kevin Burke <kev@inburke.com>
Sergey Lukjanov <me@slukjanov.name>

49
vendor/github.com/kevinburke/ssh_config/LICENSE generated vendored Normal file
View File

@ -0,0 +1,49 @@
Copyright (c) 2017 Kevin Burke.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
===================
The lexer and parser borrow heavily from github.com/pelletier/go-toml. The
license for that project is copied below.
The MIT License (MIT)
Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

639
vendor/github.com/kevinburke/ssh_config/config.go generated vendored Normal file
View File

@ -0,0 +1,639 @@
// Package ssh_config provides tools for manipulating SSH config files.
//
// Importantly, this parser attempts to preserve comments in a given file, so
// you can manipulate a `ssh_config` file from a program, if your heart desires.
//
// The Get() and GetStrict() functions will attempt to read values from
// $HOME/.ssh/config, falling back to /etc/ssh/ssh_config. The first argument is
// the host name to match on ("example.com"), and the second argument is the key
// you want to retrieve ("Port"). The keywords are case insensitive.
//
// port := ssh_config.Get("myhost", "Port")
//
// You can also manipulate an SSH config file and then print it or write it back
// to disk.
//
// f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config"))
// cfg, _ := ssh_config.Decode(f)
// for _, host := range cfg.Hosts {
// fmt.Println("patterns:", host.Patterns)
// for _, node := range host.Nodes {
// fmt.Println(node.String())
// }
// }
//
// // Write the cfg back to disk:
// fmt.Println(cfg.String())
//
// BUG: the Match directive is currently unsupported; parsing a config with
// a Match directive will trigger an error.
package ssh_config
import (
"bytes"
"errors"
"fmt"
"io"
"os"
osuser "os/user"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
)
const version = "0.4"
type configFinder func() string
// UserSettings checks ~/.ssh and /etc/ssh for configuration files. The config
// files are parsed and cached the first time Get() or GetStrict() is called.
type UserSettings struct {
IgnoreErrors bool
systemConfig *Config
systemConfigFinder configFinder
userConfig *Config
userConfigFinder configFinder
loadConfigs sync.Once
onceErr error
}
func homedir() string {
user, err := osuser.Current()
if err == nil {
return user.HomeDir
} else {
return os.Getenv("HOME")
}
}
func userConfigFinder() string {
return filepath.Join(homedir(), ".ssh", "config")
}
// DefaultUserSettings is the default UserSettings and is used by Get and
// GetStrict. It checks both $HOME/.ssh/config and /etc/ssh/ssh_config for keys,
// and it will return parse errors (if any) instead of swallowing them.
var DefaultUserSettings = &UserSettings{
IgnoreErrors: false,
systemConfigFinder: systemConfigFinder,
userConfigFinder: userConfigFinder,
}
func systemConfigFinder() string {
return filepath.Join("/", "etc", "ssh", "ssh_config")
}
func findVal(c *Config, alias, key string) (string, error) {
if c == nil {
return "", nil
}
val, err := c.Get(alias, key)
if err != nil || val == "" {
return "", err
}
if err := validate(key, val); err != nil {
return "", err
}
return val, nil
}
// Get finds the first value for key within a declaration that matches the
// alias. Get returns the empty string if no value was found, or if IgnoreErrors
// is false and we could not parse the configuration file. Use GetStrict to
// disambiguate the latter cases.
//
// The match for key is case insensitive.
//
// Get is a wrapper around DefaultUserSettings.Get.
func Get(alias, key string) string {
return DefaultUserSettings.Get(alias, key)
}
// GetStrict finds the first value for key within a declaration that matches the
// alias. If key has a default value and no matching configuration is found, the
// default will be returned. For more information on default values and the way
// patterns are matched, see the manpage for ssh_config.
//
// error will be non-nil if and only if a user's configuration file or the
// system configuration file could not be parsed, and u.IgnoreErrors is false.
//
// GetStrict is a wrapper around DefaultUserSettings.GetStrict.
func GetStrict(alias, key string) (string, error) {
return DefaultUserSettings.GetStrict(alias, key)
}
// Get finds the first value for key within a declaration that matches the
// alias. Get returns the empty string if no value was found, or if IgnoreErrors
// is false and we could not parse the configuration file. Use GetStrict to
// disambiguate the latter cases.
//
// The match for key is case insensitive.
func (u *UserSettings) Get(alias, key string) string {
val, err := u.GetStrict(alias, key)
if err != nil {
return ""
}
return val
}
// GetStrict finds the first value for key within a declaration that matches the
// alias. If key has a default value and no matching configuration is found, the
// default will be returned. For more information on default values and the way
// patterns are matched, see the manpage for ssh_config.
//
// error will be non-nil if and only if a user's configuration file or the
// system configuration file could not be parsed, and u.IgnoreErrors is false.
func (u *UserSettings) GetStrict(alias, key string) (string, error) {
u.loadConfigs.Do(func() {
// can't parse user file, that's ok.
var filename string
if u.userConfigFinder == nil {
filename = userConfigFinder()
} else {
filename = u.userConfigFinder()
}
var err error
u.userConfig, err = parseFile(filename)
if err != nil && os.IsNotExist(err) == false {
u.onceErr = err
return
}
if u.systemConfigFinder == nil {
filename = systemConfigFinder()
} else {
filename = u.systemConfigFinder()
}
u.systemConfig, err = parseFile(filename)
if err != nil && os.IsNotExist(err) == false {
u.onceErr = err
return
}
})
if u.onceErr != nil && u.IgnoreErrors == false {
return "", u.onceErr
}
val, err := findVal(u.userConfig, alias, key)
if err != nil || val != "" {
return val, err
}
val2, err2 := findVal(u.systemConfig, alias, key)
if err2 != nil || val2 != "" {
return val2, err2
}
return Default(key), nil
}
func parseFile(filename string) (*Config, error) {
return parseWithDepth(filename, 0)
}
func parseWithDepth(filename string, depth uint8) (*Config, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
return decode(f, isSystem(filename), depth)
}
func isSystem(filename string) bool {
// TODO i'm not sure this is the best way to detect a system repo
return strings.HasPrefix(filepath.Clean(filename), "/etc/ssh")
}
// Decode reads r into a Config, or returns an error if r could not be parsed as
// an SSH config file.
func Decode(r io.Reader) (*Config, error) {
return decode(r, false, 0)
}
func decode(r io.Reader, system bool, depth uint8) (c *Config, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
if e, ok := r.(error); ok && e == ErrDepthExceeded {
err = e
return
}
err = errors.New(r.(string))
}
}()
c = parseSSH(lexSSH(r), system, depth)
return c, err
}
// Config represents an SSH config file.
type Config struct {
// A list of hosts to match against. The file begins with an implicit
// "Host *" declaration matching all hosts.
Hosts []*Host
depth uint8
position Position
}
// Get finds the first value in the configuration that matches the alias and
// contains key. Get returns the empty string if no value was found, or if the
// Config contains an invalid conditional Include value.
//
// The match for key is case insensitive.
func (c *Config) Get(alias, key string) (string, error) {
lowerKey := strings.ToLower(key)
for _, host := range c.Hosts {
if !host.Matches(alias) {
continue
}
for _, node := range host.Nodes {
switch t := node.(type) {
case *Empty:
continue
case *KV:
// "keys are case insensitive" per the spec
lkey := strings.ToLower(t.Key)
if lkey == "match" {
panic("can't handle Match directives")
}
if lkey == lowerKey {
return t.Value, nil
}
case *Include:
val := t.Get(alias, key)
if val != "" {
return val, nil
}
default:
return "", fmt.Errorf("unknown Node type %v", t)
}
}
}
return "", nil
}
// String returns a string representation of the Config file.
func (c Config) String() string {
return marshal(c).String()
}
func (c Config) MarshalText() ([]byte, error) {
return marshal(c).Bytes(), nil
}
func marshal(c Config) *bytes.Buffer {
var buf bytes.Buffer
for i := range c.Hosts {
buf.WriteString(c.Hosts[i].String())
}
return &buf
}
// Pattern is a pattern in a Host declaration. Patterns are read-only values;
// create a new one with NewPattern().
type Pattern struct {
str string // Its appearance in the file, not the value that gets compiled.
regex *regexp.Regexp
not bool // True if this is a negated match
}
// String prints the string representation of the pattern.
func (p Pattern) String() string {
return p.str
}
// Copied from regexp.go with * and ? removed.
var specialBytes = []byte(`\.+()|[]{}^$`)
func special(b byte) bool {
return bytes.IndexByte(specialBytes, b) >= 0
}
// NewPattern creates a new Pattern for matching hosts. NewPattern("*") creates
// a Pattern that matches all hosts.
//
// From the manpage, a pattern consists of zero or more non-whitespace
// characters, `*' (a wildcard that matches zero or more characters), or `?' (a
// wildcard that matches exactly one character). For example, to specify a set
// of declarations for any host in the ".co.uk" set of domains, the following
// pattern could be used:
//
// Host *.co.uk
//
// The following pattern would match any host in the 192.168.0.[0-9] network range:
//
// Host 192.168.0.?
func NewPattern(s string) (*Pattern, error) {
if s == "" {
return nil, errors.New("ssh_config: empty pattern")
}
negated := false
if s[0] == '!' {
negated = true
s = s[1:]
}
var buf bytes.Buffer
buf.WriteByte('^')
for i := 0; i < len(s); i++ {
// A byte loop is correct because all metacharacters are ASCII.
switch b := s[i]; b {
case '*':
buf.WriteString(".*")
case '?':
buf.WriteString(".?")
default:
// borrowing from QuoteMeta here.
if special(b) {
buf.WriteByte('\\')
}
buf.WriteByte(b)
}
}
buf.WriteByte('$')
r, err := regexp.Compile(buf.String())
if err != nil {
return nil, err
}
return &Pattern{str: s, regex: r, not: negated}, nil
}
// Host describes a Host directive and the keywords that follow it.
type Host struct {
// A list of host patterns that should match this host.
Patterns []*Pattern
// A Node is either a key/value pair or a comment line.
Nodes []Node
// EOLComment is the comment (if any) terminating the Host line.
EOLComment string
hasEquals bool
leadingSpace uint16 // TODO: handle spaces vs tabs here.
// The file starts with an implicit "Host *" declaration.
implicit bool
}
// Matches returns true if the Host matches for the given alias. For
// a description of the rules that provide a match, see the manpage for
// ssh_config.
func (h *Host) Matches(alias string) bool {
found := false
for i := range h.Patterns {
if h.Patterns[i].regex.MatchString(alias) {
if h.Patterns[i].not == true {
// Negated match. "A pattern entry may be negated by prefixing
// it with an exclamation mark (`!'). If a negated entry is
// matched, then the Host entry is ignored, regardless of
// whether any other patterns on the line match. Negated matches
// are therefore useful to provide exceptions for wildcard
// matches."
return false
}
found = true
}
}
return found
}
// String prints h as it would appear in a config file. Minor tweaks may be
// present in the whitespace in the printed file.
func (h *Host) String() string {
var buf bytes.Buffer
if h.implicit == false {
buf.WriteString(strings.Repeat(" ", int(h.leadingSpace)))
buf.WriteString("Host")
if h.hasEquals {
buf.WriteString(" = ")
} else {
buf.WriteString(" ")
}
for i, pat := range h.Patterns {
buf.WriteString(pat.String())
if i < len(h.Patterns)-1 {
buf.WriteString(" ")
}
}
if h.EOLComment != "" {
buf.WriteString(" #")
buf.WriteString(h.EOLComment)
}
buf.WriteByte('\n')
}
for i := range h.Nodes {
buf.WriteString(h.Nodes[i].String())
buf.WriteByte('\n')
}
return buf.String()
}
// Node represents a line in a Config.
type Node interface {
Pos() Position
String() string
}
// KV is a line in the config file that contains a key, a value, and possibly
// a comment.
type KV struct {
Key string
Value string
Comment string
hasEquals bool
leadingSpace uint16 // Space before the key. TODO handle spaces vs tabs.
position Position
}
// Pos returns k's Position.
func (k *KV) Pos() Position {
return k.position
}
// String prints k as it was parsed in the config file. There may be slight
// changes to the whitespace between values.
func (k *KV) String() string {
if k == nil {
return ""
}
equals := " "
if k.hasEquals {
equals = " = "
}
line := fmt.Sprintf("%s%s%s%s", strings.Repeat(" ", int(k.leadingSpace)), k.Key, equals, k.Value)
if k.Comment != "" {
line += " #" + k.Comment
}
return line
}
// Empty is a line in the config file that contains only whitespace or comments.
type Empty struct {
Comment string
leadingSpace uint16 // TODO handle spaces vs tabs.
position Position
}
// Pos returns e's Position.
func (e *Empty) Pos() Position {
return e.position
}
// String prints e as it was parsed in the config file.
func (e *Empty) String() string {
if e == nil {
return ""
}
if e.Comment == "" {
return ""
}
return fmt.Sprintf("%s#%s", strings.Repeat(" ", int(e.leadingSpace)), e.Comment)
}
// Include holds the result of an Include directive, including the config files
// that have been parsed as part of that directive. At most 5 levels of Include
// statements will be parsed.
type Include struct {
// Comment is the contents of any comment at the end of the Include
// statement.
Comment string
parsed bool
// an include directive can include several different files, and wildcards
directives []string
mu sync.Mutex
// 1:1 mapping between matches and keys in files array; matches preserves
// ordering
matches []string
// actual filenames are listed here
files map[string]*Config
leadingSpace uint16
position Position
depth uint8
hasEquals bool
}
const maxRecurseDepth = 5
// ErrDepthExceeded is returned if too many Include directives are parsed.
// Usually this indicates a recursive loop (an Include directive pointing to the
// file it contains).
var ErrDepthExceeded = errors.New("ssh_config: max recurse depth exceeded")
func removeDups(arr []string) []string {
// Use map to record duplicates as we find them.
encountered := make(map[string]bool, len(arr))
result := make([]string, 0)
for v := range arr {
if encountered[arr[v]] == false {
encountered[arr[v]] = true
result = append(result, arr[v])
}
}
return result
}
// NewInclude creates a new Include with a list of file globs to include.
// Configuration files are parsed greedily (e.g. as soon as this function runs).
// Any error encountered while parsing nested configuration files will be
// returned.
func NewInclude(directives []string, hasEquals bool, pos Position, comment string, system bool, depth uint8) (*Include, error) {
if depth > maxRecurseDepth {
return nil, ErrDepthExceeded
}
inc := &Include{
Comment: comment,
directives: directives,
files: make(map[string]*Config),
position: pos,
leadingSpace: uint16(pos.Col) - 1,
depth: depth,
hasEquals: hasEquals,
}
// no need for inc.mu.Lock() since nothing else can access this inc
matches := make([]string, 0)
for i := range directives {
var path string
if filepath.IsAbs(directives[i]) {
path = directives[i]
} else if system {
path = filepath.Join("/etc/ssh", directives[i])
} else {
path = filepath.Join(homedir(), ".ssh", directives[i])
}
theseMatches, err := filepath.Glob(path)
if err != nil {
return nil, err
}
matches = append(matches, theseMatches...)
}
matches = removeDups(matches)
inc.matches = matches
for i := range matches {
config, err := parseWithDepth(matches[i], depth)
if err != nil {
return nil, err
}
inc.files[matches[i]] = config
}
return inc, nil
}
// Pos returns the position of the Include directive in the larger file.
func (i *Include) Pos() Position {
return i.position
}
// Get finds the first value in the Include statement matching the alias and the
// given key.
func (inc *Include) Get(alias, key string) string {
inc.mu.Lock()
defer inc.mu.Unlock()
// TODO: we search files in any order which is not correct
for i := range inc.matches {
cfg := inc.files[inc.matches[i]]
if cfg == nil {
panic("nil cfg")
}
val, err := cfg.Get(alias, key)
if err == nil && val != "" {
return val
}
}
return ""
}
// String prints out a string representation of this Include directive. Note
// included Config files are not printed as part of this representation.
func (inc *Include) String() string {
equals := " "
if inc.hasEquals {
equals = " = "
}
line := fmt.Sprintf("%sInclude%s%s", strings.Repeat(" ", int(inc.leadingSpace)), equals, strings.Join(inc.directives, " "))
if inc.Comment != "" {
line += " #" + inc.Comment
}
return line
}
var matchAll *Pattern
func init() {
var err error
matchAll, err = NewPattern("*")
if err != nil {
panic(err)
}
}
func newConfig() *Config {
return &Config{
Hosts: []*Host{
&Host{
implicit: true,
Patterns: []*Pattern{matchAll},
Nodes: make([]Node, 0),
},
},
depth: 0,
}
}

241
vendor/github.com/kevinburke/ssh_config/lexer.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
package ssh_config
import (
"io"
buffruneio "github.com/pelletier/go-buffruneio"
)
// Define state functions
type sshLexStateFn func() sshLexStateFn
type sshLexer struct {
input *buffruneio.Reader // Textual source
buffer []rune // Runes composing the current token
tokens chan token
line uint32
col uint16
endbufferLine uint32
endbufferCol uint16
}
func (s *sshLexer) lexComment(previousState sshLexStateFn) sshLexStateFn {
return func() sshLexStateFn {
growingString := ""
for next := s.peek(); next != '\n' && next != eof; next = s.peek() {
if next == '\r' && s.follow("\r\n") {
break
}
growingString += string(next)
s.next()
}
s.emitWithValue(tokenComment, growingString)
s.skip()
return previousState
}
}
// lex the space after an equals sign in a function
func (s *sshLexer) lexRspace() sshLexStateFn {
for {
next := s.peek()
if !isSpace(next) {
break
}
s.skip()
}
return s.lexRvalue
}
func (s *sshLexer) lexEquals() sshLexStateFn {
for {
next := s.peek()
if next == '=' {
s.emit(tokenEquals)
s.skip()
return s.lexRspace
}
// TODO error handling here; newline eof etc.
if !isSpace(next) {
break
}
s.skip()
}
return s.lexRvalue
}
func (s *sshLexer) lexKey() sshLexStateFn {
growingString := ""
for r := s.peek(); isKeyChar(r); r = s.peek() {
// simplified a lot here
if isSpace(r) || r == '=' {
s.emitWithValue(tokenKey, growingString)
s.skip()
return s.lexEquals
}
growingString += string(r)
s.next()
}
s.emitWithValue(tokenKey, growingString)
return s.lexEquals
}
func (s *sshLexer) lexRvalue() sshLexStateFn {
growingString := ""
for {
next := s.peek()
switch next {
case '\r':
if s.follow("\r\n") {
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexVoid
}
case '\n':
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexVoid
case '#':
s.emitWithValue(tokenString, growingString)
s.skip()
return s.lexComment(s.lexVoid)
case eof:
s.next()
}
if next == eof {
break
}
growingString += string(next)
s.next()
}
s.emit(tokenEOF)
return nil
}
func (s *sshLexer) read() rune {
r, _, err := s.input.ReadRune()
if err != nil {
panic(err)
}
if r == '\n' {
s.endbufferLine++
s.endbufferCol = 1
} else {
s.endbufferCol++
}
return r
}
func (s *sshLexer) next() rune {
r := s.read()
if r != eof {
s.buffer = append(s.buffer, r)
}
return r
}
func (s *sshLexer) lexVoid() sshLexStateFn {
for {
next := s.peek()
switch next {
case '#':
s.skip()
return s.lexComment(s.lexVoid)
case '\r':
fallthrough
case '\n':
s.emit(tokenEmptyLine)
s.skip()
continue
}
if isSpace(next) {
s.skip()
}
if isKeyStartChar(next) {
return s.lexKey
}
// removed IsKeyStartChar and lexKey. probably will need to readd
if next == eof {
s.next()
break
}
}
s.emit(tokenEOF)
return nil
}
func (s *sshLexer) ignore() {
s.buffer = make([]rune, 0)
s.line = s.endbufferLine
s.col = s.endbufferCol
}
func (s *sshLexer) skip() {
s.next()
s.ignore()
}
func (s *sshLexer) emit(t tokenType) {
s.emitWithValue(t, string(s.buffer))
}
func (s *sshLexer) emitWithValue(t tokenType, value string) {
tok := token{
Position: Position{s.line, s.col},
typ: t,
val: value,
}
s.tokens <- tok
s.ignore()
}
func (s *sshLexer) peek() rune {
r, _, err := s.input.ReadRune()
if err != nil {
panic(err)
}
s.input.UnreadRune()
return r
}
func (s *sshLexer) follow(next string) bool {
for _, expectedRune := range next {
r, _, err := s.input.ReadRune()
defer s.input.UnreadRune()
if err != nil {
panic(err)
}
if expectedRune != r {
return false
}
}
return true
}
func (s *sshLexer) run() {
for state := s.lexVoid; state != nil; {
state = state()
}
close(s.tokens)
}
func lexSSH(input io.Reader) chan token {
bufferedInput := buffruneio.NewReader(input)
l := &sshLexer{
input: bufferedInput,
tokens: make(chan token),
line: 1,
col: 1,
endbufferLine: 1,
endbufferCol: 1,
}
go l.run()
return l.tokens
}

182
vendor/github.com/kevinburke/ssh_config/parser.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
package ssh_config
import (
"fmt"
"strings"
)
type sshParser struct {
flow chan token
config *Config
tokensBuffer []token
currentTable []string
seenTableKeys []string
// /etc/ssh parser or local parser - used to find the default for relative
// filepaths in the Include directive
system bool
depth uint8
}
type sshParserStateFn func() sshParserStateFn
// Formats and panics an error message based on a token
func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) {
// TODO this format is ugly
panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
}
func (p *sshParser) raiseError(tok *token, err error) {
if err == ErrDepthExceeded {
panic(err)
}
// TODO this format is ugly
panic(tok.Position.String() + ": " + err.Error())
}
func (p *sshParser) run() {
for state := p.parseStart; state != nil; {
state = state()
}
}
func (p *sshParser) peek() *token {
if len(p.tokensBuffer) != 0 {
return &(p.tokensBuffer[0])
}
tok, ok := <-p.flow
if !ok {
return nil
}
p.tokensBuffer = append(p.tokensBuffer, tok)
return &tok
}
func (p *sshParser) getToken() *token {
if len(p.tokensBuffer) != 0 {
tok := p.tokensBuffer[0]
p.tokensBuffer = p.tokensBuffer[1:]
return &tok
}
tok, ok := <-p.flow
if !ok {
return nil
}
return &tok
}
func (p *sshParser) parseStart() sshParserStateFn {
tok := p.peek()
// end of stream, parsing is finished
if tok == nil {
return nil
}
switch tok.typ {
case tokenComment, tokenEmptyLine:
return p.parseComment
case tokenKey:
return p.parseKV
case tokenEOF:
return nil
default:
p.raiseErrorf(tok, fmt.Sprintf("unexpected token %q\n", tok))
}
return nil
}
func (p *sshParser) parseKV() sshParserStateFn {
key := p.getToken()
hasEquals := false
val := p.getToken()
if val.typ == tokenEquals {
hasEquals = true
val = p.getToken()
}
comment := ""
tok := p.peek()
if tok.typ == tokenComment && tok.Position.Line == val.Position.Line {
tok = p.getToken()
comment = tok.val
}
if strings.ToLower(key.val) == "match" {
// https://github.com/kevinburke/ssh_config/issues/6
p.raiseErrorf(val, "ssh_config: Match directive parsing is unsupported")
return nil
}
if strings.ToLower(key.val) == "host" {
strPatterns := strings.Split(val.val, " ")
patterns := make([]*Pattern, 0)
for i := range strPatterns {
if strPatterns[i] == "" {
continue
}
pat, err := NewPattern(strPatterns[i])
if err != nil {
p.raiseErrorf(val, "Invalid host pattern: %v", err)
return nil
}
patterns = append(patterns, pat)
}
p.config.Hosts = append(p.config.Hosts, &Host{
Patterns: patterns,
Nodes: make([]Node, 0),
EOLComment: comment,
hasEquals: hasEquals,
})
return p.parseStart
}
lastHost := p.config.Hosts[len(p.config.Hosts)-1]
if strings.ToLower(key.val) == "include" {
inc, err := NewInclude(strings.Split(val.val, " "), hasEquals, key.Position, comment, p.system, p.depth+1)
if err == ErrDepthExceeded {
p.raiseError(val, err)
return nil
}
if err != nil {
p.raiseErrorf(val, "Error parsing Include directive: %v", err)
return nil
}
lastHost.Nodes = append(lastHost.Nodes, inc)
return p.parseStart
}
kv := &KV{
Key: key.val,
Value: val.val,
Comment: comment,
hasEquals: hasEquals,
leadingSpace: uint16(key.Position.Col) - 1,
position: key.Position,
}
lastHost.Nodes = append(lastHost.Nodes, kv)
return p.parseStart
}
func (p *sshParser) parseComment() sshParserStateFn {
comment := p.getToken()
lastHost := p.config.Hosts[len(p.config.Hosts)-1]
lastHost.Nodes = append(lastHost.Nodes, &Empty{
Comment: comment.val,
// account for the "#" as well
leadingSpace: comment.Position.Col - 2,
position: comment.Position,
})
return p.parseStart
}
func parseSSH(flow chan token, system bool, depth uint8) *Config {
result := newConfig()
result.position = Position{1, 1}
parser := &sshParser{
flow: flow,
config: result,
tokensBuffer: make([]token, 0),
currentTable: make([]string, 0),
seenTableKeys: make([]string, 0),
system: system,
depth: depth,
}
parser.run()
return result
}

25
vendor/github.com/kevinburke/ssh_config/position.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
package ssh_config
import "fmt"
// Position of a document element within a SSH document.
//
// Line and Col are both 1-indexed positions for the element's line number and
// column number, respectively. Values of zero or less will cause Invalid(),
// to return true.
type Position struct {
Line uint32 // line within the document
Col uint16 // column within the line
}
// String representation of the position.
// Displays 1-indexed line and column numbers.
func (p Position) String() string {
return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
}
// Invalid returns whether or not the position is valid (i.e. with negative or
// null values)
func (p Position) Invalid() bool {
return p.Line <= 0 || p.Col <= 0
}

49
vendor/github.com/kevinburke/ssh_config/token.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package ssh_config
import "fmt"
type token struct {
Position
typ tokenType
val string
}
func (t token) String() string {
switch t.typ {
case tokenEOF:
return "EOF"
}
return fmt.Sprintf("%q", t.val)
}
type tokenType int
const (
eof = -(iota + 1)
)
const (
tokenError tokenType = iota
tokenEOF
tokenEmptyLine
tokenComment
tokenKey
tokenEquals
tokenString
)
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
func isKeyStartChar(r rune) bool {
return !(isSpace(r) || r == '\r' || r == '\n' || r == eof)
}
// I'm not sure that this is correct
func isKeyChar(r rune) bool {
// Keys start with the first character that isn't whitespace or [ and end
// with the last non-whitespace character before the equals sign. Keys
// cannot contain a # character."
return !(r == '\r' || r == '\n' || r == eof || r == '=')
}

162
vendor/github.com/kevinburke/ssh_config/validators.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
package ssh_config
import (
"fmt"
"strconv"
"strings"
)
// Default returns the default value for the given keyword, for example "22" if
// the keyword is "Port". Default returns the empty string if the keyword has no
// default, or if the keyword is unknown. Keyword matching is case-insensitive.
//
// Default values are provided by OpenSSH_7.4p1 on a Mac.
func Default(keyword string) string {
return defaults[strings.ToLower(keyword)]
}
// Arguments where the value must be "yes" or "no" and *only* yes or no.
var yesnos = map[string]bool{
strings.ToLower("BatchMode"): true,
strings.ToLower("CanonicalizeFallbackLocal"): true,
strings.ToLower("ChallengeResponseAuthentication"): true,
strings.ToLower("CheckHostIP"): true,
strings.ToLower("ClearAllForwardings"): true,
strings.ToLower("Compression"): true,
strings.ToLower("EnableSSHKeysign"): true,
strings.ToLower("ExitOnForwardFailure"): true,
strings.ToLower("ForwardAgent"): true,
strings.ToLower("ForwardX11"): true,
strings.ToLower("ForwardX11Trusted"): true,
strings.ToLower("GatewayPorts"): true,
strings.ToLower("GSSAPIAuthentication"): true,
strings.ToLower("GSSAPIDelegateCredentials"): true,
strings.ToLower("HostbasedAuthentication"): true,
strings.ToLower("IdentitiesOnly"): true,
strings.ToLower("KbdInteractiveAuthentication"): true,
strings.ToLower("NoHostAuthenticationForLocalhost"): true,
strings.ToLower("PasswordAuthentication"): true,
strings.ToLower("PermitLocalCommand"): true,
strings.ToLower("PubkeyAuthentication"): true,
strings.ToLower("RhostsRSAAuthentication"): true,
strings.ToLower("RSAAuthentication"): true,
strings.ToLower("StreamLocalBindUnlink"): true,
strings.ToLower("TCPKeepAlive"): true,
strings.ToLower("UseKeychain"): true,
strings.ToLower("UsePrivilegedPort"): true,
strings.ToLower("VisualHostKey"): true,
}
var uints = map[string]bool{
strings.ToLower("CanonicalizeMaxDots"): true,
strings.ToLower("CompressionLevel"): true, // 1 to 9
strings.ToLower("ConnectionAttempts"): true,
strings.ToLower("ConnectTimeout"): true,
strings.ToLower("NumberOfPasswordPrompts"): true,
strings.ToLower("Port"): true,
strings.ToLower("ServerAliveCountMax"): true,
strings.ToLower("ServerAliveInterval"): true,
}
func mustBeYesOrNo(lkey string) bool {
return yesnos[lkey]
}
func mustBeUint(lkey string) bool {
return uints[lkey]
}
func validate(key, val string) error {
lkey := strings.ToLower(key)
if mustBeYesOrNo(lkey) && (val != "yes" && val != "no") {
return fmt.Errorf("ssh_config: value for key %q must be 'yes' or 'no', got %q", key, val)
}
if mustBeUint(lkey) {
_, err := strconv.ParseUint(val, 10, 64)
if err != nil {
return fmt.Errorf("ssh_config: %v", err)
}
}
return nil
}
var defaults = map[string]string{
strings.ToLower("AddKeysToAgent"): "no",
strings.ToLower("AddressFamily"): "any",
strings.ToLower("BatchMode"): "no",
strings.ToLower("CanonicalizeFallbackLocal"): "yes",
strings.ToLower("CanonicalizeHostname"): "no",
strings.ToLower("CanonicalizeMaxDots"): "1",
strings.ToLower("ChallengeResponseAuthentication"): "yes",
strings.ToLower("CheckHostIP"): "yes",
// TODO is this still the correct cipher
strings.ToLower("Cipher"): "3des",
strings.ToLower("Ciphers"): "chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc",
strings.ToLower("ClearAllForwardings"): "no",
strings.ToLower("Compression"): "no",
strings.ToLower("CompressionLevel"): "6",
strings.ToLower("ConnectionAttempts"): "1",
strings.ToLower("ControlMaster"): "no",
strings.ToLower("EnableSSHKeysign"): "no",
strings.ToLower("EscapeChar"): "~",
strings.ToLower("ExitOnForwardFailure"): "no",
strings.ToLower("FingerprintHash"): "sha256",
strings.ToLower("ForwardAgent"): "no",
strings.ToLower("ForwardX11"): "no",
strings.ToLower("ForwardX11Timeout"): "20m",
strings.ToLower("ForwardX11Trusted"): "no",
strings.ToLower("GatewayPorts"): "no",
strings.ToLower("GlobalKnownHostsFile"): "/etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2",
strings.ToLower("GSSAPIAuthentication"): "no",
strings.ToLower("GSSAPIDelegateCredentials"): "no",
strings.ToLower("HashKnownHosts"): "no",
strings.ToLower("HostbasedAuthentication"): "no",
strings.ToLower("HostbasedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
strings.ToLower("HostKeyAlgorithms"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
// HostName has a dynamic default (the value passed at the command line).
strings.ToLower("IdentitiesOnly"): "no",
strings.ToLower("IdentityFile"): "~/.ssh/identity",
// IPQoS has a dynamic default based on interactive or non-interactive
// sessions.
strings.ToLower("KbdInteractiveAuthentication"): "yes",
strings.ToLower("KexAlgorithms"): "curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1",
strings.ToLower("LogLevel"): "INFO",
strings.ToLower("MACs"): "umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1",
strings.ToLower("NoHostAuthenticationForLocalhost"): "no",
strings.ToLower("NumberOfPasswordPrompts"): "3",
strings.ToLower("PasswordAuthentication"): "yes",
strings.ToLower("PermitLocalCommand"): "no",
strings.ToLower("Port"): "22",
strings.ToLower("PreferredAuthentications"): "gssapi-with-mic,hostbased,publickey,keyboard-interactive,password",
strings.ToLower("Protocol"): "2",
strings.ToLower("ProxyUseFdpass"): "no",
strings.ToLower("PubkeyAcceptedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa",
strings.ToLower("PubkeyAuthentication"): "yes",
strings.ToLower("RekeyLimit"): "default none",
strings.ToLower("RhostsRSAAuthentication"): "no",
strings.ToLower("RSAAuthentication"): "yes",
strings.ToLower("ServerAliveCountMax"): "3",
strings.ToLower("ServerAliveInterval"): "0",
strings.ToLower("StreamLocalBindMask"): "0177",
strings.ToLower("StreamLocalBindUnlink"): "no",
strings.ToLower("StrictHostKeyChecking"): "ask",
strings.ToLower("TCPKeepAlive"): "yes",
strings.ToLower("Tunnel"): "no",
strings.ToLower("TunnelDevice"): "any:any",
strings.ToLower("UpdateHostKeys"): "no",
strings.ToLower("UseKeychain"): "no",
strings.ToLower("UsePrivilegedPort"): "no",
strings.ToLower("UserKnownHostsFile"): "~/.ssh/known_hosts ~/.ssh/known_hosts2",
strings.ToLower("VerifyHostKeyDNS"): "no",
strings.ToLower("VisualHostKey"): "no",
strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth",
}

21
vendor/github.com/mitchellh/go-homedir/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

157
vendor/github.com/mitchellh/go-homedir/homedir.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
package homedir
import (
"bytes"
"errors"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
)
// DisableCache will disable caching of the home directory. Caching is enabled
// by default.
var DisableCache bool
var homedirCache string
var cacheLock sync.RWMutex
// Dir returns the home directory for the executing user.
//
// This uses an OS-specific method for discovering the home directory.
// An error is returned if a home directory cannot be detected.
func Dir() (string, error) {
if !DisableCache {
cacheLock.RLock()
cached := homedirCache
cacheLock.RUnlock()
if cached != "" {
return cached, nil
}
}
cacheLock.Lock()
defer cacheLock.Unlock()
var result string
var err error
if runtime.GOOS == "windows" {
result, err = dirWindows()
} else {
// Unix-like system, so just assume Unix
result, err = dirUnix()
}
if err != nil {
return "", err
}
homedirCache = result
return result, nil
}
// Expand expands the path to include the home directory if the path
// is prefixed with `~`. If it isn't prefixed with `~`, the path is
// returned as-is.
func Expand(path string) (string, error) {
if len(path) == 0 {
return path, nil
}
if path[0] != '~' {
return path, nil
}
if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
return "", errors.New("cannot expand user-specific home dir")
}
dir, err := Dir()
if err != nil {
return "", err
}
return filepath.Join(dir, path[1:]), nil
}
func dirUnix() (string, error) {
homeEnv := "HOME"
if runtime.GOOS == "plan9" {
// On plan9, env vars are lowercase.
homeEnv = "home"
}
// First prefer the HOME environmental variable
if home := os.Getenv(homeEnv); home != "" {
return home, nil
}
var stdout bytes.Buffer
// If that fails, try OS specific commands
if runtime.GOOS == "darwin" {
cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
cmd.Stdout = &stdout
if err := cmd.Run(); err == nil {
result := strings.TrimSpace(stdout.String())
if result != "" {
return result, nil
}
}
} else {
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
// If the error is ErrNotFound, we ignore it. Otherwise, return it.
if err != exec.ErrNotFound {
return "", err
}
} else {
if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
// username:password:uid:gid:gecos:home:shell
passwdParts := strings.SplitN(passwd, ":", 7)
if len(passwdParts) > 5 {
return passwdParts[5], nil
}
}
}
}
// If all else fails, try the shell
stdout.Reset()
cmd := exec.Command("sh", "-c", "cd && pwd")
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return "", err
}
result := strings.TrimSpace(stdout.String())
if result == "" {
return "", errors.New("blank output when reading home directory")
}
return result, nil
}
func dirWindows() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
return home, nil
}
// Prefer standard environment variable USERPROFILE
if home := os.Getenv("USERPROFILE"); home != "" {
return home, nil
}
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home := drive + path
if drive == "" || path == "" {
return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
}
return home, nil
}

117
vendor/github.com/pelletier/go-buffruneio/buffruneio.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
// Package buffruneio is a wrapper around bufio to provide buffered runes access with unlimited unreads.
package buffruneio
import (
"bufio"
"container/list"
"errors"
"io"
)
// Rune to indicate end of file.
const (
EOF = -(iota + 1)
)
// ErrNoRuneToUnread is returned by UnreadRune() when the read index is already at the beginning of the buffer.
var ErrNoRuneToUnread = errors.New("no rune to unwind")
// Reader implements runes buffering for an io.Reader object.
type Reader struct {
buffer *list.List
current *list.Element
input *bufio.Reader
}
// NewReader returns a new Reader.
func NewReader(rd io.Reader) *Reader {
return &Reader{
buffer: list.New(),
input: bufio.NewReader(rd),
}
}
type runeWithSize struct {
r rune
size int
}
func (rd *Reader) feedBuffer() error {
r, size, err := rd.input.ReadRune()
if err != nil {
if err != io.EOF {
return err
}
r = EOF
}
newRuneWithSize := runeWithSize{r, size}
rd.buffer.PushBack(newRuneWithSize)
if rd.current == nil {
rd.current = rd.buffer.Back()
}
return nil
}
// ReadRune reads the next rune from buffer, or from the underlying reader if needed.
func (rd *Reader) ReadRune() (rune, int, error) {
if rd.current == rd.buffer.Back() || rd.current == nil {
err := rd.feedBuffer()
if err != nil {
return EOF, 0, err
}
}
runeWithSize := rd.current.Value.(runeWithSize)
rd.current = rd.current.Next()
return runeWithSize.r, runeWithSize.size, nil
}
// UnreadRune pushes back the previously read rune in the buffer, extending it if needed.
func (rd *Reader) UnreadRune() error {
if rd.current == rd.buffer.Front() {
return ErrNoRuneToUnread
}
if rd.current == nil {
rd.current = rd.buffer.Back()
} else {
rd.current = rd.current.Prev()
}
return nil
}
// Forget removes runes stored before the current stream position index.
func (rd *Reader) Forget() {
if rd.current == nil {
rd.current = rd.buffer.Back()
}
for ; rd.current != rd.buffer.Front(); rd.buffer.Remove(rd.current.Prev()) {
}
}
// PeekRune returns at most the next n runes, reading from the uderlying source if
// needed. Does not move the current index. It includes EOF if reached.
func (rd *Reader) PeekRunes(n int) []rune {
res := make([]rune, 0, n)
cursor := rd.current
for i := 0; i < n; i++ {
if cursor == nil {
err := rd.feedBuffer()
if err != nil {
return res
}
cursor = rd.buffer.Back()
}
if cursor != nil {
r := cursor.Value.(runeWithSize).r
res = append(res, r)
if r == EOF {
return res
}
cursor = cursor.Next()
}
}
return res
}

25
vendor/github.com/sergi/go-diff/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,25 @@
# This is the official list of go-diff authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Danny Yoo <dannyyoo@google.com>
James Kolb <jkolb@google.com>
Jonathan Amsterdam <jba@google.com>
Markus Zimmermann <markus.zimmermann@nethead.at> <markus.zimmermann@symflower.com> <zimmski@gmail.com>
Matt Kovars <akaskik@gmail.com>
Örjan Persson <orjan@spotify.com>
Osman Masood <oamasood@gmail.com>
Robert Carlsen <rwcarlsen@gmail.com>
Rory Flynn <roryflynn@users.noreply.github.com>
Sergi Mansilla <sergi.mansilla@gmail.com>
Shatrugna Sadhu <ssadhu@apcera.com>
Shawn Smith <shawnpsmith@gmail.com>
Stas Maksimov <maksimov@gmail.com>
Tor Arvid Lund <torarvid@gmail.com>
Zac Bergquist <zbergquist99@gmail.com>

32
vendor/github.com/sergi/go-diff/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,32 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the go-diff
# repository.
#
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, ACME Inc. employees would be listed here
# but not in AUTHORS, because ACME Inc. would hold the copyright.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Danny Yoo <dannyyoo@google.com>
James Kolb <jkolb@google.com>
Jonathan Amsterdam <jba@google.com>
Markus Zimmermann <markus.zimmermann@nethead.at> <markus.zimmermann@symflower.com> <zimmski@gmail.com>
Matt Kovars <akaskik@gmail.com>
Örjan Persson <orjan@spotify.com>
Osman Masood <oamasood@gmail.com>
Robert Carlsen <rwcarlsen@gmail.com>
Rory Flynn <roryflynn@users.noreply.github.com>
Sergi Mansilla <sergi.mansilla@gmail.com>
Shatrugna Sadhu <ssadhu@apcera.com>
Shawn Smith <shawnpsmith@gmail.com>
Stas Maksimov <maksimov@gmail.com>
Tor Arvid Lund <torarvid@gmail.com>
Zac Bergquist <zbergquist99@gmail.com>

20
vendor/github.com/sergi/go-diff/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) 2012-2016 The go-diff Authors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

1344
vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,46 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text.
package diffmatchpatch
import (
"time"
)
// DiffMatchPatch holds the configuration for diff-match-patch operations.
type DiffMatchPatch struct {
// Number of seconds to map a diff before giving up (0 for infinity).
DiffTimeout time.Duration
// Cost of an empty edit operation in terms of edit characters.
DiffEditCost int
// How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match).
MatchDistance int
// When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match.
PatchDeleteThreshold float64
// Chunk size for context length.
PatchMargin int
// The number of bits in an int.
MatchMaxBits int
// At what point is no match declared (0.0 = perfection, 1.0 = very loose).
MatchThreshold float64
}
// New creates a new DiffMatchPatch object with default parameters.
func New() *DiffMatchPatch {
// Defaults.
return &DiffMatchPatch{
DiffTimeout: time.Second,
DiffEditCost: 4,
MatchThreshold: 0.5,
MatchDistance: 1000,
PatchDeleteThreshold: 0.5,
PatchMargin: 4,
MatchMaxBits: 32,
}
}

160
vendor/github.com/sergi/go-diff/diffmatchpatch/match.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
package diffmatchpatch
import (
"math"
)
// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'.
// Returns -1 if no match found.
func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int {
// Check for null inputs not needed since null can't be passed in C#.
loc = int(math.Max(0, math.Min(float64(loc), float64(len(text)))))
if text == pattern {
// Shortcut (potentially not guaranteed by the algorithm)
return 0
} else if len(text) == 0 {
// Nothing to match.
return -1
} else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern {
// Perfect match at the perfect spot! (Includes case of null pattern)
return loc
}
// Do a fuzzy compare.
return dmp.MatchBitap(text, pattern, loc)
}
// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm.
// Returns -1 if no match was found.
func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int {
// Initialise the alphabet.
s := dmp.MatchAlphabet(pattern)
// Highest score beyond which we give up.
scoreThreshold := dmp.MatchThreshold
// Is there a nearby exact match? (speedup)
bestLoc := indexOf(text, pattern, loc)
if bestLoc != -1 {
scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc,
pattern), scoreThreshold)
// What about in the other direction? (speedup)
bestLoc = lastIndexOf(text, pattern, loc+len(pattern))
if bestLoc != -1 {
scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc,
pattern), scoreThreshold)
}
}
// Initialise the bit arrays.
matchmask := 1 << uint((len(pattern) - 1))
bestLoc = -1
var binMin, binMid int
binMax := len(pattern) + len(text)
lastRd := []int{}
for d := 0; d < len(pattern); d++ {
// Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level.
binMin = 0
binMid = binMax
for binMin < binMid {
if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold {
binMin = binMid
} else {
binMax = binMid
}
binMid = (binMax-binMin)/2 + binMin
}
// Use the result from this iteration as the maximum for the next.
binMax = binMid
start := int(math.Max(1, float64(loc-binMid+1)))
finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern)))
rd := make([]int, finish+2)
rd[finish+1] = (1 << uint(d)) - 1
for j := finish; j >= start; j-- {
var charMatch int
if len(text) <= j-1 {
// Out of range.
charMatch = 0
} else if _, ok := s[text[j-1]]; !ok {
charMatch = 0
} else {
charMatch = s[text[j-1]]
}
if d == 0 {
// First pass: exact match.
rd[j] = ((rd[j+1] << 1) | 1) & charMatch
} else {
// Subsequent passes: fuzzy match.
rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1]
}
if (rd[j] & matchmask) != 0 {
score := dmp.matchBitapScore(d, j-1, loc, pattern)
// This match will almost certainly be better than any existing match. But check anyway.
if score <= scoreThreshold {
// Told you so.
scoreThreshold = score
bestLoc = j - 1
if bestLoc > loc {
// When passing loc, don't exceed our current distance from loc.
start = int(math.Max(1, float64(2*loc-bestLoc)))
} else {
// Already passed loc, downhill from here on in.
break
}
}
}
}
if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold {
// No hope for a (better) match at greater error levels.
break
}
lastRd = rd
}
return bestLoc
}
// matchBitapScore computes and returns the score for a match with e errors and x location.
func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 {
accuracy := float64(e) / float64(len(pattern))
proximity := math.Abs(float64(loc - x))
if dmp.MatchDistance == 0 {
// Dodge divide by zero error.
if proximity == 0 {
return accuracy
}
return 1.0
}
return accuracy + (proximity / float64(dmp.MatchDistance))
}
// MatchAlphabet initialises the alphabet for the Bitap algorithm.
func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int {
s := map[byte]int{}
charPattern := []byte(pattern)
for _, c := range charPattern {
_, ok := s[c]
if !ok {
s[c] = 0
}
}
i := 0
for _, c := range charPattern {
value := s[c] | int(uint(1)<<uint((len(pattern)-i-1)))
s[c] = value
i++
}
return s
}

View File

@ -0,0 +1,23 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
package diffmatchpatch
func min(x, y int) int {
if x < y {
return x
}
return y
}
func max(x, y int) int {
if x > y {
return x
}
return y
}

556
vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go generated vendored Normal file
View File

@ -0,0 +1,556 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
package diffmatchpatch
import (
"bytes"
"errors"
"math"
"net/url"
"regexp"
"strconv"
"strings"
)
// Patch represents one patch operation.
type Patch struct {
diffs []Diff
Start1 int
Start2 int
Length1 int
Length2 int
}
// String emulates GNU diff's format.
// Header: @@ -382,8 +481,9 @@
// Indices are printed as 1-based, not 0-based.
func (p *Patch) String() string {
var coords1, coords2 string
if p.Length1 == 0 {
coords1 = strconv.Itoa(p.Start1) + ",0"
} else if p.Length1 == 1 {
coords1 = strconv.Itoa(p.Start1 + 1)
} else {
coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1)
}
if p.Length2 == 0 {
coords2 = strconv.Itoa(p.Start2) + ",0"
} else if p.Length2 == 1 {
coords2 = strconv.Itoa(p.Start2 + 1)
} else {
coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2)
}
var text bytes.Buffer
_, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n")
// Escape the body of the patch with %xx notation.
for _, aDiff := range p.diffs {
switch aDiff.Type {
case DiffInsert:
_, _ = text.WriteString("+")
case DiffDelete:
_, _ = text.WriteString("-")
case DiffEqual:
_, _ = text.WriteString(" ")
}
_, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1))
_, _ = text.WriteString("\n")
}
return unescaper.Replace(text.String())
}
// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits.
func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch {
if len(text) == 0 {
return patch
}
pattern := text[patch.Start2 : patch.Start2+patch.Length1]
padding := 0
// Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length.
for strings.Index(text, pattern) != strings.LastIndex(text, pattern) &&
len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin {
padding += dmp.PatchMargin
maxStart := max(0, patch.Start2-padding)
minEnd := min(len(text), patch.Start2+patch.Length1+padding)
pattern = text[maxStart:minEnd]
}
// Add one chunk for good luck.
padding += dmp.PatchMargin
// Add the prefix.
prefix := text[max(0, patch.Start2-padding):patch.Start2]
if len(prefix) != 0 {
patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...)
}
// Add the suffix.
suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)]
if len(suffix) != 0 {
patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix})
}
// Roll back the start points.
patch.Start1 -= len(prefix)
patch.Start2 -= len(prefix)
// Extend the lengths.
patch.Length1 += len(prefix) + len(suffix)
patch.Length2 += len(prefix) + len(suffix)
return patch
}
// PatchMake computes a list of patches.
func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch {
if len(opt) == 1 {
diffs, _ := opt[0].([]Diff)
text1 := dmp.DiffText1(diffs)
return dmp.PatchMake(text1, diffs)
} else if len(opt) == 2 {
text1 := opt[0].(string)
switch t := opt[1].(type) {
case string:
diffs := dmp.DiffMain(text1, t, true)
if len(diffs) > 2 {
diffs = dmp.DiffCleanupSemantic(diffs)
diffs = dmp.DiffCleanupEfficiency(diffs)
}
return dmp.PatchMake(text1, diffs)
case []Diff:
return dmp.patchMake2(text1, t)
}
} else if len(opt) == 3 {
return dmp.PatchMake(opt[0], opt[2])
}
return []Patch{}
}
// patchMake2 computes a list of patches to turn text1 into text2.
// text2 is not provided, diffs are the delta between text1 and text2.
func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch {
// Check for null inputs not needed since null can't be passed in C#.
patches := []Patch{}
if len(diffs) == 0 {
return patches // Get rid of the null case.
}
patch := Patch{}
charCount1 := 0 // Number of characters into the text1 string.
charCount2 := 0 // Number of characters into the text2 string.
// Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info.
prepatchText := text1
postpatchText := text1
for i, aDiff := range diffs {
if len(patch.diffs) == 0 && aDiff.Type != DiffEqual {
// A new patch starts here.
patch.Start1 = charCount1
patch.Start2 = charCount2
}
switch aDiff.Type {
case DiffInsert:
patch.diffs = append(patch.diffs, aDiff)
patch.Length2 += len(aDiff.Text)
postpatchText = postpatchText[:charCount2] +
aDiff.Text + postpatchText[charCount2:]
case DiffDelete:
patch.Length1 += len(aDiff.Text)
patch.diffs = append(patch.diffs, aDiff)
postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):]
case DiffEqual:
if len(aDiff.Text) <= 2*dmp.PatchMargin &&
len(patch.diffs) != 0 && i != len(diffs)-1 {
// Small equality inside a patch.
patch.diffs = append(patch.diffs, aDiff)
patch.Length1 += len(aDiff.Text)
patch.Length2 += len(aDiff.Text)
}
if len(aDiff.Text) >= 2*dmp.PatchMargin {
// Time for a new patch.
if len(patch.diffs) != 0 {
patch = dmp.PatchAddContext(patch, prepatchText)
patches = append(patches, patch)
patch = Patch{}
// Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch.
prepatchText = postpatchText
charCount1 = charCount2
}
}
}
// Update the current character count.
if aDiff.Type != DiffInsert {
charCount1 += len(aDiff.Text)
}
if aDiff.Type != DiffDelete {
charCount2 += len(aDiff.Text)
}
}
// Pick up the leftover patch if not empty.
if len(patch.diffs) != 0 {
patch = dmp.PatchAddContext(patch, prepatchText)
patches = append(patches, patch)
}
return patches
}
// PatchDeepCopy returns an array that is identical to a given an array of patches.
func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch {
patchesCopy := []Patch{}
for _, aPatch := range patches {
patchCopy := Patch{}
for _, aDiff := range aPatch.diffs {
patchCopy.diffs = append(patchCopy.diffs, Diff{
aDiff.Type,
aDiff.Text,
})
}
patchCopy.Start1 = aPatch.Start1
patchCopy.Start2 = aPatch.Start2
patchCopy.Length1 = aPatch.Length1
patchCopy.Length2 = aPatch.Length2
patchesCopy = append(patchesCopy, patchCopy)
}
return patchesCopy
}
// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied.
func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) {
if len(patches) == 0 {
return text, []bool{}
}
// Deep copy the patches so that no changes are made to originals.
patches = dmp.PatchDeepCopy(patches)
nullPadding := dmp.PatchAddPadding(patches)
text = nullPadding + text + nullPadding
patches = dmp.PatchSplitMax(patches)
x := 0
// delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22.
delta := 0
results := make([]bool, len(patches))
for _, aPatch := range patches {
expectedLoc := aPatch.Start2 + delta
text1 := dmp.DiffText1(aPatch.diffs)
var startLoc int
endLoc := -1
if len(text1) > dmp.MatchMaxBits {
// PatchSplitMax will only provide an oversized pattern in the case of a monster delete.
startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc)
if startLoc != -1 {
endLoc = dmp.MatchMain(text,
text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits)
if endLoc == -1 || startLoc >= endLoc {
// Can't find valid trailing context. Drop this patch.
startLoc = -1
}
}
} else {
startLoc = dmp.MatchMain(text, text1, expectedLoc)
}
if startLoc == -1 {
// No match found. :(
results[x] = false
// Subtract the delta for this failed patch from subsequent patches.
delta -= aPatch.Length2 - aPatch.Length1
} else {
// Found a match. :)
results[x] = true
delta = startLoc - expectedLoc
var text2 string
if endLoc == -1 {
text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))]
} else {
text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))]
}
if text1 == text2 {
// Perfect match, just shove the Replacement text in.
text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):]
} else {
// Imperfect match. Run a diff to get a framework of equivalent indices.
diffs := dmp.DiffMain(text1, text2, false)
if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold {
// The end points match, but the content is unacceptably bad.
results[x] = false
} else {
diffs = dmp.DiffCleanupSemanticLossless(diffs)
index1 := 0
for _, aDiff := range aPatch.diffs {
if aDiff.Type != DiffEqual {
index2 := dmp.DiffXIndex(diffs, index1)
if aDiff.Type == DiffInsert {
// Insertion
text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:]
} else if aDiff.Type == DiffDelete {
// Deletion
startIndex := startLoc + index2
text = text[:startIndex] +
text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:]
}
}
if aDiff.Type != DiffDelete {
index1 += len(aDiff.Text)
}
}
}
}
}
x++
}
// Strip the padding off.
text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))]
return text, results
}
// PatchAddPadding adds some padding on text start and end so that edges can match something.
// Intended to be called only from within patchApply.
func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string {
paddingLength := dmp.PatchMargin
nullPadding := ""
for x := 1; x <= paddingLength; x++ {
nullPadding += string(x)
}
// Bump all the patches forward.
for i := range patches {
patches[i].Start1 += paddingLength
patches[i].Start2 += paddingLength
}
// Add some padding on start of first diff.
if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual {
// Add nullPadding equality.
patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...)
patches[0].Start1 -= paddingLength // Should be 0.
patches[0].Start2 -= paddingLength // Should be 0.
patches[0].Length1 += paddingLength
patches[0].Length2 += paddingLength
} else if paddingLength > len(patches[0].diffs[0].Text) {
// Grow first equality.
extraLength := paddingLength - len(patches[0].diffs[0].Text)
patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text
patches[0].Start1 -= extraLength
patches[0].Start2 -= extraLength
patches[0].Length1 += extraLength
patches[0].Length2 += extraLength
}
// Add some padding on end of last diff.
last := len(patches) - 1
if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual {
// Add nullPadding equality.
patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding})
patches[last].Length1 += paddingLength
patches[last].Length2 += paddingLength
} else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) {
// Grow last equality.
lastDiff := patches[last].diffs[len(patches[last].diffs)-1]
extraLength := paddingLength - len(lastDiff.Text)
patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength]
patches[last].Length1 += extraLength
patches[last].Length2 += extraLength
}
return nullPadding
}
// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm.
// Intended to be called only from within patchApply.
func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch {
patchSize := dmp.MatchMaxBits
for x := 0; x < len(patches); x++ {
if patches[x].Length1 <= patchSize {
continue
}
bigpatch := patches[x]
// Remove the big old patch.
patches = append(patches[:x], patches[x+1:]...)
x--
Start1 := bigpatch.Start1
Start2 := bigpatch.Start2
precontext := ""
for len(bigpatch.diffs) != 0 {
// Create one of several smaller patches.
patch := Patch{}
empty := true
patch.Start1 = Start1 - len(precontext)
patch.Start2 = Start2 - len(precontext)
if len(precontext) != 0 {
patch.Length1 = len(precontext)
patch.Length2 = len(precontext)
patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext})
}
for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin {
diffType := bigpatch.diffs[0].Type
diffText := bigpatch.diffs[0].Text
if diffType == DiffInsert {
// Insertions are harmless.
patch.Length2 += len(diffText)
Start2 += len(diffText)
patch.diffs = append(patch.diffs, bigpatch.diffs[0])
bigpatch.diffs = bigpatch.diffs[1:]
empty = false
} else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize {
// This is a large deletion. Let it pass in one chunk.
patch.Length1 += len(diffText)
Start1 += len(diffText)
empty = false
patch.diffs = append(patch.diffs, Diff{diffType, diffText})
bigpatch.diffs = bigpatch.diffs[1:]
} else {
// Deletion or equality. Only take as much as we can stomach.
diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)]
patch.Length1 += len(diffText)
Start1 += len(diffText)
if diffType == DiffEqual {
patch.Length2 += len(diffText)
Start2 += len(diffText)
} else {
empty = false
}
patch.diffs = append(patch.diffs, Diff{diffType, diffText})
if diffText == bigpatch.diffs[0].Text {
bigpatch.diffs = bigpatch.diffs[1:]
} else {
bigpatch.diffs[0].Text =
bigpatch.diffs[0].Text[len(diffText):]
}
}
}
// Compute the head context for the next patch.
precontext = dmp.DiffText2(patch.diffs)
precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):]
postcontext := ""
// Append the end context for this patch.
if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin {
postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin]
} else {
postcontext = dmp.DiffText1(bigpatch.diffs)
}
if len(postcontext) != 0 {
patch.Length1 += len(postcontext)
patch.Length2 += len(postcontext)
if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual {
patch.diffs[len(patch.diffs)-1].Text += postcontext
} else {
patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext})
}
}
if !empty {
x++
patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...)
}
}
}
return patches
}
// PatchToText takes a list of patches and returns a textual representation.
func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string {
var text bytes.Buffer
for _, aPatch := range patches {
_, _ = text.WriteString(aPatch.String())
}
return text.String()
}
// PatchFromText parses a textual representation of patches and returns a List of Patch objects.
func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) {
patches := []Patch{}
if len(textline) == 0 {
return patches, nil
}
text := strings.Split(textline, "\n")
textPointer := 0
patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$")
var patch Patch
var sign uint8
var line string
for textPointer < len(text) {
if !patchHeader.MatchString(text[textPointer]) {
return patches, errors.New("Invalid patch string: " + text[textPointer])
}
patch = Patch{}
m := patchHeader.FindStringSubmatch(text[textPointer])
patch.Start1, _ = strconv.Atoi(m[1])
if len(m[2]) == 0 {
patch.Start1--
patch.Length1 = 1
} else if m[2] == "0" {
patch.Length1 = 0
} else {
patch.Start1--
patch.Length1, _ = strconv.Atoi(m[2])
}
patch.Start2, _ = strconv.Atoi(m[3])
if len(m[4]) == 0 {
patch.Start2--
patch.Length2 = 1
} else if m[4] == "0" {
patch.Length2 = 0
} else {
patch.Start2--
patch.Length2, _ = strconv.Atoi(m[4])
}
textPointer++
for textPointer < len(text) {
if len(text[textPointer]) > 0 {
sign = text[textPointer][0]
} else {
textPointer++
continue
}
line = text[textPointer][1:]
line = strings.Replace(line, "+", "%2b", -1)
line, _ = url.QueryUnescape(line)
if sign == '-' {
// Deletion.
patch.diffs = append(patch.diffs, Diff{DiffDelete, line})
} else if sign == '+' {
// Insertion.
patch.diffs = append(patch.diffs, Diff{DiffInsert, line})
} else if sign == ' ' {
// Minor equality.
patch.diffs = append(patch.diffs, Diff{DiffEqual, line})
} else if sign == '@' {
// Start of next patch.
break
} else {
// WTF?
return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line))
}
textPointer++
}
patches = append(patches, patch)
}
return patches, nil
}

View File

@ -0,0 +1,88 @@
// Copyright (c) 2012-2016 The go-diff authors. All rights reserved.
// https://github.com/sergi/go-diff
// See the included LICENSE file for license details.
//
// go-diff is a Go implementation of Google's Diff, Match, and Patch library
// Original library is Copyright (c) 2006 Google Inc.
// http://code.google.com/p/google-diff-match-patch/
package diffmatchpatch
import (
"strings"
"unicode/utf8"
)
// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI.
// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc.
var unescaper = strings.NewReplacer(
"%21", "!", "%7E", "~", "%27", "'",
"%28", "(", "%29", ")", "%3B", ";",
"%2F", "/", "%3F", "?", "%3A", ":",
"%40", "@", "%26", "&", "%3D", "=",
"%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*")
// indexOf returns the first index of pattern in str, starting at str[i].
func indexOf(str string, pattern string, i int) int {
if i > len(str)-1 {
return -1
}
if i <= 0 {
return strings.Index(str, pattern)
}
ind := strings.Index(str[i:], pattern)
if ind == -1 {
return -1
}
return ind + i
}
// lastIndexOf returns the last index of pattern in str, starting at str[i].
func lastIndexOf(str string, pattern string, i int) int {
if i < 0 {
return -1
}
if i >= len(str) {
return strings.LastIndex(str, pattern)
}
_, size := utf8.DecodeRuneInString(str[i:])
return strings.LastIndex(str[:i+size], pattern)
}
// runesIndexOf returns the index of pattern in target, starting at target[i].
func runesIndexOf(target, pattern []rune, i int) int {
if i > len(target)-1 {
return -1
}
if i <= 0 {
return runesIndex(target, pattern)
}
ind := runesIndex(target[i:], pattern)
if ind == -1 {
return -1
}
return ind + i
}
func runesEqual(r1, r2 []rune) bool {
if len(r1) != len(r2) {
return false
}
for i, c := range r1 {
if c != r2[i] {
return false
}
}
return true
}
// runesIndex is the equivalent of strings.Index for rune slices.
func runesIndex(r1, r2 []rune) int {
last := len(r1) - len(r2)
for i := 0; i <= last; i++ {
if runesEqual(r1[i:i+len(r2)], r2) {
return i
}
}
return -1
}

28
vendor/github.com/src-d/gcfg/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

145
vendor/github.com/src-d/gcfg/doc.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
// Package gcfg reads "INI-style" text-based configuration files with
// "name=value" pairs grouped into sections (gcfg files).
//
// This package is still a work in progress; see the sections below for planned
// changes.
//
// Syntax
//
// The syntax is based on that used by git config:
// http://git-scm.com/docs/git-config#_syntax .
// There are some (planned) differences compared to the git config format:
// - improve data portability:
// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
// - include and "path" type is not supported
// (path type may be implementable as a user-defined type)
// - internationalization
// - section and variable names can contain unicode letters, unicode digits
// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
// (U+002D), starting with a unicode letter
// - disallow potentially ambiguous or misleading definitions:
// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
// - `[sec ""]` is not allowed
// - use `[sec]` for section name "sec" and empty subsection name
// - (planned) within a single file, definitions must be contiguous for each:
// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
//
// Data structure
//
// The functions in this package read values into a user-defined struct.
// Each section corresponds to a struct field in the config struct, and each
// variable in a section corresponds to a data field in the section struct.
// The mapping of each section or variable name to fields is done either based
// on the "gcfg" struct tag or by matching the name of the section or variable,
// ignoring case. In the latter case, hyphens '-' in section and variable names
// correspond to underscores '_' in field names.
// Fields must be exported; to use a section or variable name starting with a
// letter that is neither upper- or lower-case, prefix the field name with 'X'.
// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
//
// For sections with subsections, the corresponding field in config must be a
// map, rather than a struct, with string keys and pointer-to-struct values.
// Values for subsection variables are stored in the map with the subsection
// name used as the map key.
// (Note that unlike section and variable names, subsection names are case
// sensitive.)
// When using a map, and there is a section with the same section name but
// without a subsection name, its values are stored with the empty string used
// as the key.
// It is possible to provide default values for subsections in the section
// "default-<sectionname>" (or by setting values in the corresponding struct
// field "Default_<sectionname>").
//
// The functions in this package panic if config is not a pointer to a struct,
// or when a field is not of a suitable type (either a struct or a map with
// string keys and pointer-to-struct values).
//
// Parsing of values
//
// The section structs in the config struct may contain single-valued or
// multi-valued variables. Variables of unnamed slice type (that is, a type
// starting with `[]`) are treated as multi-value; all others (including named
// slice types) are treated as single-valued variables.
//
// Single-valued variables are handled based on the type as follows.
// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
// and if necessary, a new instance is allocated.
//
// For types implementing the encoding.TextUnmarshaler interface, the
// UnmarshalText method is used to set the value. Implementing this method is
// the recommended way for parsing user-defined types.
//
// For fields of string kind, the value string is assigned to the field, after
// unquoting and unescaping as needed.
// For fields of bool kind, the field is set to true if the value is "true",
// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
// "0", ignoring case. In addition, single-valued bool fields can be specified
// with a "blank" value (variable name without equals sign and value); in such
// case the value is set to true.
//
// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
// unintuitively handling zero-padded numbers as octal.) Other types having
// [u]int* as the underlying type, such as os.FileMode and uintptr allow
// decimal, hexadecimal, or octal values.
// Parsing mode for integer types can be overridden using the struct tag option
// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
// (each standing for decimal, hexadecimal, and octal, respectively.)
//
// All other types are parsed using fmt.Sscanf with the "%v" verb.
//
// For multi-valued variables, each individual value is parsed as above and
// appended to the slice. If the first value is specified as a "blank" value
// (variable name without equals sign and value), a new slice is allocated;
// that is any values previously set in the slice will be ignored.
//
// The types subpackage for provides helpers for parsing "enum-like" and integer
// types.
//
// Error handling
//
// There are 3 types of errors:
//
// - programmer errors / panics:
// - invalid configuration structure
// - data errors:
// - fatal errors:
// - invalid configuration syntax
// - warnings:
// - data that doesn't belong to any part of the config structure
//
// Programmer errors trigger panics. These are should be fixed by the programmer
// before releasing code that uses gcfg.
//
// Data errors cause gcfg to return a non-nil error value. This includes the
// case when there are extra unknown key-value definitions in the configuration
// data (extra data).
// However, in some occasions it is desirable to be able to proceed in
// situations when the only data error is that of extra data.
// These errors are handled at a different (warning) priority and can be
// filtered out programmatically. To ignore extra data warnings, wrap the
// gcfg.Read*Into invocation into a call to gcfg.FatalOnly.
//
// TODO
//
// The following is a list of changes under consideration:
// - documentation
// - self-contained syntax documentation
// - more practical examples
// - move TODOs to issue tracker (eventually)
// - syntax
// - reconsider valid escape sequences
// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
// - reading / parsing gcfg files
// - define internal representation structure
// - support multiple inputs (readers, strings, files)
// - support declaring encoding (?)
// - support varying fields sets for subsections (?)
// - writing gcfg files
// - error handling
// - make error context accessible programmatically?
// - limit input size?
//
package gcfg // import "github.com/src-d/gcfg"

41
vendor/github.com/src-d/gcfg/errors.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
package gcfg
import (
"gopkg.in/warnings.v0"
)
// FatalOnly filters the results of a Read*Into invocation and returns only
// fatal errors. That is, errors (warnings) indicating data for unknown
// sections / variables is ignored. Example invocation:
//
// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
// if err != nil {
// ...
//
func FatalOnly(err error) error {
return warnings.FatalOnly(err)
}
func isFatal(err error) bool {
_, ok := err.(extraData)
return !ok
}
type extraData struct {
section string
subsection *string
variable *string
}
func (e extraData) Error() string {
s := "can't store data at section \"" + e.section + "\""
if e.subsection != nil {
s += ", subsection \"" + *e.subsection + "\""
}
if e.variable != nil {
s += ", variable \"" + *e.variable + "\""
}
return s
}
var _ error = extraData{}

7
vendor/github.com/src-d/gcfg/go1_0.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// +build !go1.2
package gcfg
type textUnmarshaler interface {
UnmarshalText(text []byte) error
}

9
vendor/github.com/src-d/gcfg/go1_2.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build go1.2
package gcfg
import (
"encoding"
)
type textUnmarshaler encoding.TextUnmarshaler

273
vendor/github.com/src-d/gcfg/read.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
package gcfg
import (
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/src-d/gcfg/scanner"
"github.com/src-d/gcfg/token"
"gopkg.in/warnings.v0"
)
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'}
// no error: invalid literals should be caught by scanner
func unquote(s string) string {
u, q, esc := make([]rune, 0, len(s)), false, false
for _, c := range s {
if esc {
uc, ok := unescape[c]
switch {
case ok:
u = append(u, uc)
fallthrough
case !q && c == '\n':
esc = false
continue
}
panic("invalid escape sequence")
}
switch c {
case '"':
q = !q
case '\\':
esc = true
default:
u = append(u, c)
}
}
if q {
panic("missing end quote")
}
if esc {
panic("invalid escape sequence")
}
return string(u)
}
func read(c *warnings.Collector, callback func(string,string,string,string,bool)error,
fset *token.FileSet, file *token.File, src []byte) error {
//
var s scanner.Scanner
var errs scanner.ErrorList
s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
sect, sectsub := "", ""
pos, tok, lit := s.Scan()
errfn := func(msg string) error {
return fmt.Errorf("%s: %s", fset.Position(pos), msg)
}
for {
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
switch tok {
case token.EOF:
return nil
case token.EOL, token.COMMENT:
pos, tok, lit = s.Scan()
case token.LBRACK:
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.IDENT {
if err := c.Collect(errfn("expected section name")); err != nil {
return err
}
}
sect, sectsub = lit, ""
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok == token.STRING {
sectsub = unquote(lit)
if sectsub == "" {
if err := c.Collect(errfn("empty subsection name")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
}
if tok != token.RBRACK {
if sectsub == "" {
if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil {
return err
}
}
if err := c.Collect(errfn("expected right bracket")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
return err
}
}
// If a section/subsection header was found, ensure a
// container object is created, even if there are no
// variables further down.
err := c.Collect(callback(sect, sectsub, "", "", true))
if err != nil {
return err
}
case token.IDENT:
if sect == "" {
if err := c.Collect(errfn("expected section header")); err != nil {
return err
}
}
n := lit
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
return errs.Err()
}
blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
if !blank {
if tok != token.ASSIGN {
if err := c.Collect(errfn("expected '='")); err != nil {
return err
}
}
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.STRING {
if err := c.Collect(errfn("expected value")); err != nil {
return err
}
}
v = unquote(lit)
pos, tok, lit = s.Scan()
if errs.Len() > 0 {
if err := c.Collect(errs.Err()); err != nil {
return err
}
}
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil {
return err
}
}
}
err := c.Collect(callback(sect, sectsub, n, v, blank))
if err != nil {
return err
}
default:
if sect == "" {
if err := c.Collect(errfn("expected section header")); err != nil {
return err
}
}
if err := c.Collect(errfn("expected section header or variable declaration")); err != nil {
return err
}
}
}
panic("never reached")
}
func readInto(config interface{}, fset *token.FileSet, file *token.File,
src []byte) error {
//
c := warnings.NewCollector(isFatal)
firstPassCallback := func(s string, ss string, k string, v string, bv bool) error {
return set(c, config, s, ss, k, v, bv, false)
}
err := read(c, firstPassCallback, fset, file, src)
if err != nil {
return err
}
secondPassCallback := func(s string, ss string, k string, v string, bv bool) error {
return set(c, config, s, ss, k, v, bv, true)
}
err = read(c, secondPassCallback, fset, file, src)
if err != nil {
return err
}
return c.Done()
}
// ReadWithCallback reads gcfg formatted data from reader and calls
// callback with each section and option found.
//
// Callback is called with section, subsection, option key, option value
// and blank value flag as arguments.
//
// When a section is found, callback is called with nil subsection, option key
// and option value.
//
// When a subsection is found, callback is called with nil option key and
// option value.
//
// If blank value flag is true, it means that the value was not set for an option
// (as opposed to set to empty string).
//
// If callback returns an error, ReadWithCallback terminates with an error too.
func ReadWithCallback(reader io.Reader, callback func(string,string,string,string,bool)error) error {
src, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
c := warnings.NewCollector(isFatal)
return read(c, callback, fset, file, src)
}
// ReadInto reads gcfg formatted data from reader and sets the values into the
// corresponding fields in config.
func ReadInto(config interface{}, reader io.Reader) error {
src, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
return readInto(config, fset, file, src)
}
// ReadStringInto reads gcfg formatted data from str and sets the values into
// the corresponding fields in config.
func ReadStringInto(config interface{}, str string) error {
r := strings.NewReader(str)
return ReadInto(config, r)
}
// ReadFileInto reads gcfg formatted data from the file filename and sets the
// values into the corresponding fields in config.
func ReadFileInto(config interface{}, filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
src, err := ioutil.ReadAll(f)
if err != nil {
return err
}
fset := token.NewFileSet()
file := fset.AddFile(filename, fset.Base(), len(src))
return readInto(config, fset, file, src)
}

121
vendor/github.com/src-d/gcfg/scanner/errors.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package scanner
import (
"fmt"
"io"
"sort"
)
import (
"github.com/src-d/gcfg/token"
)
// In an ErrorList, an error is represented by an *Error.
// The position Pos, if valid, points to the beginning of
// the offending token, and the error condition is described
// by Msg.
//
type Error struct {
Pos token.Position
Msg string
}
// Error implements the error interface.
func (e Error) Error() string {
if e.Pos.Filename != "" || e.Pos.IsValid() {
// don't print "<unknown position>"
// TODO(gri) reconsider the semantics of Position.IsValid
return e.Pos.String() + ": " + e.Msg
}
return e.Msg
}
// ErrorList is a list of *Errors.
// The zero value for an ErrorList is an empty ErrorList ready to use.
//
type ErrorList []*Error
// Add adds an Error with given position and error message to an ErrorList.
func (p *ErrorList) Add(pos token.Position, msg string) {
*p = append(*p, &Error{pos, msg})
}
// Reset resets an ErrorList to no errors.
func (p *ErrorList) Reset() { *p = (*p)[0:0] }
// ErrorList implements the sort Interface.
func (p ErrorList) Len() int { return len(p) }
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ErrorList) Less(i, j int) bool {
e := &p[i].Pos
f := &p[j].Pos
if e.Filename < f.Filename {
return true
}
if e.Filename == f.Filename {
return e.Offset < f.Offset
}
return false
}
// Sort sorts an ErrorList. *Error entries are sorted by position,
// other errors are sorted by error message, and before any *Error
// entry.
//
func (p ErrorList) Sort() {
sort.Sort(p)
}
// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
func (p *ErrorList) RemoveMultiples() {
sort.Sort(p)
var last token.Position // initial last.Line is != any legal error line
i := 0
for _, e := range *p {
if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
last = e.Pos
(*p)[i] = e
i++
}
}
(*p) = (*p)[0:i]
}
// An ErrorList implements the error interface.
func (p ErrorList) Error() string {
switch len(p) {
case 0:
return "no errors"
case 1:
return p[0].Error()
}
return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
}
// Err returns an error equivalent to this error list.
// If the list is empty, Err returns nil.
func (p ErrorList) Err() error {
if len(p) == 0 {
return nil
}
return p
}
// PrintError is a utility function that prints a list of errors to w,
// one error per line, if the err parameter is an ErrorList. Otherwise
// it prints the err string.
//
func PrintError(w io.Writer, err error) {
if list, ok := err.(ErrorList); ok {
for _, e := range list {
fmt.Fprintf(w, "%s\n", e)
}
} else if err != nil {
fmt.Fprintf(w, "%s\n", err)
}
}

342
vendor/github.com/src-d/gcfg/scanner/scanner.go generated vendored Normal file
View File

@ -0,0 +1,342 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package scanner implements a scanner for gcfg configuration text.
// It takes a []byte as source which can then be tokenized
// through repeated calls to the Scan method.
//
// Note that the API for the scanner package may change to accommodate new
// features or implementation changes in gcfg.
//
package scanner
import (
"fmt"
"path/filepath"
"unicode"
"unicode/utf8"
)
import (
"github.com/src-d/gcfg/token"
)
// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
// encountered and a handler was installed, the handler is called with a
// position and an error message. The position points to the beginning of
// the offending token.
//
type ErrorHandler func(pos token.Position, msg string)
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
//
type Scanner struct {
// immutable state
file *token.File // source file handle
dir string // directory portion of file.Name()
src []byte // source
err ErrorHandler // error reporting; or nil
mode Mode // scanning mode
// scanning state
ch rune // current character
offset int // character offset
rdOffset int // reading offset (position after current character)
lineOffset int // current line offset
nextVal bool // next token is expected to be a value
// public state - ok to modify
ErrorCount int // number of errors encountered
}
// Read the next Unicode char into s.ch.
// s.ch < 0 means end-of-file.
//
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
r, w := rune(s.src[s.rdOffset]), 1
switch {
case r == 0:
s.error(s.offset, "illegal character NUL")
case r >= 0x80:
// not ASCII
r, w = utf8.DecodeRune(s.src[s.rdOffset:])
if r == utf8.RuneError && w == 1 {
s.error(s.offset, "illegal UTF-8 encoding")
}
}
s.rdOffset += w
s.ch = r
} else {
s.offset = len(s.src)
if s.ch == '\n' {
s.lineOffset = s.offset
s.file.AddLine(s.offset)
}
s.ch = -1 // eof
}
}
// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
const (
ScanComments Mode = 1 << iota // return comments as COMMENT tokens
)
// Init prepares the scanner s to tokenize the text src by setting the
// scanner at the beginning of src. The scanner uses the file set file
// for position information and it adds line information for each line.
// It is ok to re-use the same file when re-scanning the same file as
// line information which is already present is ignored. Init causes a
// panic if the file size does not match the src size.
//
// Calls to Scan will invoke the error handler err if they encounter a
// syntax error and err is not nil. Also, for each error encountered,
// the Scanner field ErrorCount is incremented by one. The mode parameter
// determines how comments are handled.
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
}
s.file = file
s.dir, _ = filepath.Split(file.Name())
s.src = src
s.err = err
s.mode = mode
s.ch = ' '
s.offset = 0
s.rdOffset = 0
s.lineOffset = 0
s.ErrorCount = 0
s.nextVal = false
s.next()
}
func (s *Scanner) error(offs int, msg string) {
if s.err != nil {
s.err(s.file.Position(s.file.Pos(offs)), msg)
}
s.ErrorCount++
}
func (s *Scanner) scanComment() string {
// initial [;#] already consumed
offs := s.offset - 1 // position of initial [;#]
for s.ch != '\n' && s.ch >= 0 {
s.next()
}
return string(s.src[offs:s.offset])
}
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
}
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
func (s *Scanner) scanIdentifier() string {
offs := s.offset
for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
s.next()
}
return string(s.src[offs:s.offset])
}
func (s *Scanner) scanEscape(val bool) {
offs := s.offset
ch := s.ch
s.next() // always make progress
switch ch {
case '\\', '"':
// ok
case 'n', 't':
if val {
break // ok
}
fallthrough
default:
s.error(offs, "unknown escape sequence")
}
}
func (s *Scanner) scanString() string {
// '"' opening already consumed
offs := s.offset - 1
for s.ch != '"' {
ch := s.ch
s.next()
if ch == '\n' || ch < 0 {
s.error(offs, "string not terminated")
break
}
if ch == '\\' {
s.scanEscape(false)
}
}
s.next()
return string(s.src[offs:s.offset])
}
func stripCR(b []byte) []byte {
c := make([]byte, len(b))
i := 0
for _, ch := range b {
if ch != '\r' {
c[i] = ch
i++
}
}
return c[:i]
}
func (s *Scanner) scanValString() string {
offs := s.offset
hasCR := false
end := offs
inQuote := false
loop:
for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
ch := s.ch
s.next()
switch {
case inQuote && ch == '\\':
s.scanEscape(true)
case !inQuote && ch == '\\':
if s.ch == '\r' {
hasCR = true
s.next()
}
if s.ch != '\n' {
s.error(offs, "unquoted '\\' must be followed by new line")
break loop
}
s.next()
case ch == '"':
inQuote = !inQuote
case ch == '\r':
hasCR = true
case ch < 0 || inQuote && ch == '\n':
s.error(offs, "string not terminated")
break loop
}
if inQuote || !isWhiteSpace(ch) {
end = s.offset
}
}
lit := s.src[offs:end]
if hasCR {
lit = stripCR(lit)
}
return string(lit)
}
func isWhiteSpace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\r'
}
func (s *Scanner) skipWhitespace() {
for isWhiteSpace(s.ch) {
s.next()
}
}
// Scan scans the next token and returns the token position, the token,
// and its literal string if applicable. The source end is indicated by
// token.EOF.
//
// If the returned token is a literal (token.IDENT, token.STRING) or
// token.COMMENT, the literal string has the corresponding value.
//
// If the returned token is token.ILLEGAL, the literal string is the
// offending character.
//
// In all other cases, Scan returns an empty literal string.
//
// For more tolerant parsing, Scan will return a valid token if
// possible even if a syntax error was encountered. Thus, even
// if the resulting token sequence contains no illegal tokens,
// a client may not assume that no error occurred. Instead it
// must check the scanner's ErrorCount or the number of calls
// of the error handler, if there was one installed.
//
// Scan adds line information to the file added to the file
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
s.skipWhitespace()
// current token start
pos = s.file.Pos(s.offset)
// determine token value
switch ch := s.ch; {
case s.nextVal:
lit = s.scanValString()
tok = token.STRING
s.nextVal = false
case isLetter(ch):
lit = s.scanIdentifier()
tok = token.IDENT
default:
s.next() // always make progress
switch ch {
case -1:
tok = token.EOF
case '\n':
tok = token.EOL
case '"':
tok = token.STRING
lit = s.scanString()
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case ';', '#':
// comment
lit = s.scanComment()
if s.mode&ScanComments == 0 {
// skip comment
goto scanAgain
}
tok = token.COMMENT
case '=':
tok = token.ASSIGN
s.nextVal = true
default:
s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
tok = token.ILLEGAL
lit = string(ch)
}
}
return
}

332
vendor/github.com/src-d/gcfg/set.go generated vendored Normal file
View File

@ -0,0 +1,332 @@
package gcfg
import (
"bytes"
"encoding/gob"
"fmt"
"math/big"
"reflect"
"strings"
"unicode"
"unicode/utf8"
"github.com/src-d/gcfg/types"
"gopkg.in/warnings.v0"
)
type tag struct {
ident string
intMode string
}
func newTag(ts string) tag {
t := tag{}
s := strings.Split(ts, ",")
t.ident = s[0]
for _, tse := range s[1:] {
if strings.HasPrefix(tse, "int=") {
t.intMode = tse[len("int="):]
}
}
return t
}
func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
var n string
r0, _ := utf8.DecodeRuneInString(name)
if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
n = "X"
}
n += strings.Replace(name, "-", "_", -1)
f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
if !v.FieldByName(fieldName).CanSet() {
return false
}
f, _ := v.Type().FieldByName(fieldName)
t := newTag(f.Tag.Get("gcfg"))
if t.ident != "" {
return strings.EqualFold(t.ident, name)
}
return strings.EqualFold(n, fieldName)
})
if !ok {
return reflect.Value{}, tag{}
}
return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
}
type setter func(destp interface{}, blank bool, val string, t tag) error
var errUnsupportedType = fmt.Errorf("unsupported type")
var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
var setters = []setter{
typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
}
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
dtu, ok := d.(textUnmarshaler)
if !ok {
return errUnsupportedType
}
if blank {
return errBlankUnsupported
}
return dtu.UnmarshalText([]byte(val))
}
func boolSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
return nil
}
b, err := types.ParseBool(val)
if err == nil {
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
}
return err
}
func intMode(mode string) types.IntMode {
var m types.IntMode
if strings.ContainsAny(mode, "dD") {
m |= types.Dec
}
if strings.ContainsAny(mode, "hH") {
m |= types.Hex
}
if strings.ContainsAny(mode, "oO") {
m |= types.Oct
}
return m
}
var typeModes = map[reflect.Type]types.IntMode{
reflect.TypeOf(int(0)): types.Dec | types.Hex,
reflect.TypeOf(int8(0)): types.Dec | types.Hex,
reflect.TypeOf(int16(0)): types.Dec | types.Hex,
reflect.TypeOf(int32(0)): types.Dec | types.Hex,
reflect.TypeOf(int64(0)): types.Dec | types.Hex,
reflect.TypeOf(uint(0)): types.Dec | types.Hex,
reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
// use default mode (allow dec/hex/oct) for uintptr type
reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
}
func intModeDefault(t reflect.Type) types.IntMode {
m, ok := typeModes[t]
if !ok {
m = types.Dec | types.Hex | types.Oct
}
return m
}
func intSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
mode := intMode(t.intMode)
if mode == 0 {
mode = intModeDefault(reflect.TypeOf(d).Elem())
}
return types.ParseInt(d, val, mode)
}
func stringSetter(d interface{}, blank bool, val string, t tag) error {
if blank {
return errBlankUnsupported
}
dsp, ok := d.(*string)
if !ok {
return errUnsupportedType
}
*dsp = val
return nil
}
var kindSetters = map[reflect.Kind]setter{
reflect.String: stringSetter,
reflect.Bool: boolSetter,
reflect.Int: intSetter,
reflect.Int8: intSetter,
reflect.Int16: intSetter,
reflect.Int32: intSetter,
reflect.Int64: intSetter,
reflect.Uint: intSetter,
reflect.Uint8: intSetter,
reflect.Uint16: intSetter,
reflect.Uint32: intSetter,
reflect.Uint64: intSetter,
reflect.Uintptr: intSetter,
}
var typeSetters = map[reflect.Type]setter{
reflect.TypeOf(big.Int{}): intSetter,
}
func typeSetter(d interface{}, blank bool, val string, tt tag) error {
t := reflect.ValueOf(d).Type().Elem()
setter, ok := typeSetters[t]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func kindSetter(d interface{}, blank bool, val string, tt tag) error {
k := reflect.ValueOf(d).Type().Elem().Kind()
setter, ok := kindSetters[k]
if !ok {
return errUnsupportedType
}
return setter(d, blank, val, tt)
}
func scanSetter(d interface{}, blank bool, val string, tt tag) error {
if blank {
return errBlankUnsupported
}
return types.ScanFully(d, val, 'v')
}
func newValue(c *warnings.Collector, sect string, vCfg reflect.Value,
vType reflect.Type) (reflect.Value, error) {
//
pv := reflect.New(vType)
dfltName := "default-" + sect
dfltField, _ := fieldFold(vCfg, dfltName)
var err error
if dfltField.IsValid() {
b := bytes.NewBuffer(nil)
ge := gob.NewEncoder(b)
if err = c.Collect(ge.EncodeValue(dfltField)); err != nil {
return pv, err
}
gd := gob.NewDecoder(bytes.NewReader(b.Bytes()))
if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil {
return pv, err
}
}
return pv, nil
}
func set(c *warnings.Collector, cfg interface{}, sect, sub, name string,
value string, blankValue bool, subsectPass bool) error {
//
vPCfg := reflect.ValueOf(cfg)
if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("config must be a pointer to a struct"))
}
vCfg := vPCfg.Elem()
vSect, _ := fieldFold(vCfg, sect)
if !vSect.IsValid() {
err := extraData{section: sect}
return c.Collect(err)
}
isSubsect := vSect.Kind() == reflect.Map
if subsectPass != isSubsect {
return nil
}
if isSubsect {
vst := vSect.Type()
if vst.Key().Kind() != reflect.String ||
vst.Elem().Kind() != reflect.Ptr ||
vst.Elem().Elem().Kind() != reflect.Struct {
panic(fmt.Errorf("map field for section must have string keys and "+
" pointer-to-struct values: section %q", sect))
}
if vSect.IsNil() {
vSect.Set(reflect.MakeMap(vst))
}
k := reflect.ValueOf(sub)
pv := vSect.MapIndex(k)
if !pv.IsValid() {
vType := vSect.Type().Elem().Elem()
var err error
if pv, err = newValue(c, sect, vCfg, vType); err != nil {
return err
}
vSect.SetMapIndex(k, pv)
}
vSect = pv.Elem()
} else if vSect.Kind() != reflect.Struct {
panic(fmt.Errorf("field for section must be a map or a struct: "+
"section %q", sect))
} else if sub != "" {
err := extraData{section: sect, subsection: &sub}
return c.Collect(err)
}
// Empty name is a special value, meaning that only the
// section/subsection object is to be created, with no values set.
if name == "" {
return nil
}
vVar, t := fieldFold(vSect, name)
if !vVar.IsValid() {
var err error
if isSubsect {
err = extraData{section: sect, subsection: &sub, variable: &name}
} else {
err = extraData{section: sect, variable: &name}
}
return c.Collect(err)
}
// vVal is either single-valued var, or newly allocated value within multi-valued var
var vVal reflect.Value
// multi-value if unnamed slice type
isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice ||
vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice
if isMulti && vVar.Kind() == reflect.Ptr {
if vVar.IsNil() {
vVar.Set(reflect.New(vVar.Type().Elem()))
}
vVar = vVar.Elem()
}
if isMulti && blankValue {
vVar.Set(reflect.Zero(vVar.Type()))
return nil
}
if isMulti {
vVal = reflect.New(vVar.Type().Elem()).Elem()
} else {
vVal = vVar
}
isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
isNew := isDeref && vVal.IsNil()
// vAddr is address of value to set (dereferenced & allocated as needed)
var vAddr reflect.Value
switch {
case isNew:
vAddr = reflect.New(vVal.Type().Elem())
case isDeref && !isNew:
vAddr = vVal
default:
vAddr = vVal.Addr()
}
vAddrI := vAddr.Interface()
err, ok := error(nil), false
for _, s := range setters {
err = s(vAddrI, blankValue, value, t)
if err == nil {
ok = true
break
}
if err != errUnsupportedType {
return err
}
}
if !ok {
// in case all setters returned errUnsupportedType
return err
}
if isNew { // set reference if it was dereferenced and newly allocated
vVal.Set(vAddr)
}
if isMulti { // append if multi-valued
vVar.Set(reflect.Append(vVar, vVal))
}
return nil
}

435
vendor/github.com/src-d/gcfg/token/position.go generated vendored Normal file
View File

@ -0,0 +1,435 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TODO(gri) consider making this a separate package outside the go directory.
package token
import (
"fmt"
"sort"
"sync"
)
// -----------------------------------------------------------------------------
// Positions
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
//
type Position struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count)
}
// IsValid returns true if the position is valid.
func (pos *Position) IsValid() bool { return pos.Line > 0 }
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
//
func (pos Position) String() string {
s := pos.Filename
if pos.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
}
if s == "" {
s = "-"
}
return s
}
// Pos is a compact encoding of a source position within a file set.
// It can be converted into a Position for a more convenient, but much
// larger, representation.
//
// The Pos value for a given file is a number in the range [base, base+size],
// where base and size are specified when adding the file to the file set via
// AddFile.
//
// To create the Pos value for a specific source offset, first add
// the respective file to the current file set (via FileSet.AddFile)
// and then call File.Pos(offset) for that file. Given a Pos value p
// for a specific file set fset, the corresponding Position value is
// obtained by calling fset.Position(p).
//
// Pos values can be compared directly with the usual comparison operators:
// If two Pos values p and q are in the same file, comparing p and q is
// equivalent to comparing the respective source file offsets. If p and q
// are in different files, p < q is true if the file implied by p was added
// to the respective file set before the file implied by q.
//
type Pos int
// The zero value for Pos is NoPos; there is no file and line information
// associated with it, and NoPos().IsValid() is false. NoPos is always
// smaller than any other Pos value. The corresponding Position value
// for NoPos is the zero value for Position.
//
const NoPos Pos = 0
// IsValid returns true if the position is valid.
func (p Pos) IsValid() bool {
return p != NoPos
}
// -----------------------------------------------------------------------------
// File
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
//
type File struct {
set *FileSet
name string // file name as provided to AddFile
base int // Pos value range for this file is [base...base+size]
size int // file size as provided to AddFile
// lines and infos are protected by set.mutex
lines []int
infos []lineInfo
}
// Name returns the file name of file f as registered with AddFile.
func (f *File) Name() string {
return f.name
}
// Base returns the base offset of file f as registered with AddFile.
func (f *File) Base() int {
return f.base
}
// Size returns the size of file f as registered with AddFile.
func (f *File) Size() int {
return f.size
}
// LineCount returns the number of lines in file f.
func (f *File) LineCount() int {
f.set.mutex.RLock()
n := len(f.lines)
f.set.mutex.RUnlock()
return n
}
// AddLine adds the line offset for a new line.
// The line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise the line offset is ignored.
//
func (f *File) AddLine(offset int) {
f.set.mutex.Lock()
if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
f.lines = append(f.lines, offset)
}
f.set.mutex.Unlock()
}
// SetLines sets the line offsets for a file and returns true if successful.
// The line offsets are the offsets of the first character of each line;
// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
// An empty file has an empty line offset table.
// Each line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise SetLines fails and returns
// false.
//
func (f *File) SetLines(lines []int) bool {
// verify validity of lines table
size := f.size
for i, offset := range lines {
if i > 0 && offset <= lines[i-1] || size <= offset {
return false
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
return true
}
// SetLinesForContent sets the line offsets for the given file content.
func (f *File) SetLinesForContent(content []byte) {
var lines []int
line := 0
for offset, b := range content {
if line >= 0 {
lines = append(lines, line)
}
line = -1
if b == '\n' {
line = offset + 1
}
}
// set lines table
f.set.mutex.Lock()
f.lines = lines
f.set.mutex.Unlock()
}
// A lineInfo object describes alternative file and line number
// information (such as provided via a //line comment in a .go
// file) for a given file offset.
type lineInfo struct {
// fields are exported to make them accessible to gob
Offset int
Filename string
Line int
}
// AddLineInfo adds alternative file and line number information for
// a given file offset. The offset must be larger than the offset for
// the previously added alternative line info and smaller than the
// file size; otherwise the information is ignored.
//
// AddLineInfo is typically used to register alternative position
// information for //line filename:line comments in source files.
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
f.infos = append(f.infos, lineInfo{offset, filename, line})
}
f.set.mutex.Unlock()
}
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
//
func (f *File) Pos(offset int) Pos {
if offset > f.size {
panic("illegal file offset")
}
return Pos(f.base + offset)
}
// Offset returns the offset for the given file position p;
// p must be a valid Pos value in that file.
// f.Offset(f.Pos(offset)) == offset.
//
func (f *File) Offset(p Pos) int {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
return int(p) - f.base
}
// Line returns the line number for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Line(p Pos) int {
// TODO(gri) this can be implemented much more efficiently
return f.Position(p).Line
}
func searchLineInfos(a []lineInfo, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
}
// info returns the file name, line, and column number for a file offset.
func (f *File) info(offset int) (filename string, line, column int) {
filename = f.name
if i := searchInts(f.lines, offset); i >= 0 {
line, column = i+1, offset-f.lines[i]+1
}
if len(f.infos) > 0 {
// almost no files have extra line infos
if i := searchLineInfos(f.infos, offset); i >= 0 {
alt := &f.infos[i]
filename = alt.Filename
if i := searchInts(f.lines, alt.Offset); i >= 0 {
line += alt.Line - i - 1
}
}
}
return
}
func (f *File) position(p Pos) (pos Position) {
offset := int(p) - f.base
pos.Offset = offset
pos.Filename, pos.Line, pos.Column = f.info(offset)
return
}
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Position(p Pos) (pos Position) {
if p != NoPos {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
}
pos = f.position(p)
}
return
}
// -----------------------------------------------------------------------------
// FileSet
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
//
type FileSet struct {
mutex sync.RWMutex // protects the file set
base int // base offset for the next file
files []*File // list of files in the order added to the set
last *File // cache of last file looked up
}
// NewFileSet creates a new file set.
func NewFileSet() *FileSet {
s := new(FileSet)
s.base = 1 // 0 == NoPos
return s
}
// Base returns the minimum base offset that must be provided to
// AddFile when adding the next file.
//
func (s *FileSet) Base() int {
s.mutex.RLock()
b := s.base
s.mutex.RUnlock()
return b
}
// AddFile adds a new file with a given filename, base offset, and file size
// to the file set s and returns the file. Multiple files may have the same
// name. The base offset must not be smaller than the FileSet's Base(), and
// size must not be negative.
//
// Adding the file will set the file set's Base() value to base + size + 1
// as the minimum base value for the next file. The following relationship
// exists between a Pos value p for a given file offset offs:
//
// int(p) = base + offs
//
// with offs in the range [0, size] and thus p in the range [base, base+size].
// For convenience, File.Pos may be used to create file-specific position
// values from a file offset.
//
func (s *FileSet) AddFile(filename string, base, size int) *File {
s.mutex.Lock()
defer s.mutex.Unlock()
if base < s.base || size < 0 {
panic("illegal base or size")
}
// base >= s.base && size >= 0
f := &File{s, filename, base, size, []int{0}, nil}
base += size + 1 // +1 because EOF also has a position
if base < 0 {
panic("token.Pos offset overflow (> 2G of source code in file set)")
}
// add the file to the file set
s.base = base
s.files = append(s.files, f)
s.last = f
return f
}
// Iterate calls f for the files in the file set in the order they were added
// until f returns false.
//
func (s *FileSet) Iterate(f func(*File) bool) {
for i := 0; ; i++ {
var file *File
s.mutex.RLock()
if i < len(s.files) {
file = s.files[i]
}
s.mutex.RUnlock()
if file == nil || !f(file) {
break
}
}
}
func searchFiles(a []*File, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
}
func (s *FileSet) file(p Pos) *File {
// common case: p is in last file
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
return f
}
// p is not in last file - search all files
if i := searchFiles(s.files, int(p)); i >= 0 {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
s.last = f
return f
}
}
return nil
}
// File returns the file that contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
s.mutex.RLock()
f = s.file(p)
s.mutex.RUnlock()
}
return
}
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
s.mutex.RLock()
if f := s.file(p); f != nil {
pos = f.position(p)
}
s.mutex.RUnlock()
}
return
}
// -----------------------------------------------------------------------------
// Helper functions
func searchInts(a []int, x int) int {
// This function body is a manually inlined version of:
//
// return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
//
// With better compiler optimizations, this may not be needed in the
// future, but at the moment this change improves the go/printer
// benchmark performance by ~30%. This has a direct impact on the
// speed of gofmt and thus seems worthwhile (2011-04-29).
// TODO(gri): Remove this when compilers have caught up.
i, j := 0, len(a)
for i < j {
h := i + (j-i)/2 // avoid overflow when computing h
// i ≤ h < j
if a[h] <= x {
i = h + 1
} else {
j = h
}
}
return i - 1
}

56
vendor/github.com/src-d/gcfg/token/serialize.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
type serializedFile struct {
// fields correspond 1:1 to fields with same (lower-case) name in File
Name string
Base int
Size int
Lines []int
Infos []lineInfo
}
type serializedFileSet struct {
Base int
Files []serializedFile
}
// Read calls decode to deserialize a file set into s; s must not be nil.
func (s *FileSet) Read(decode func(interface{}) error) error {
var ss serializedFileSet
if err := decode(&ss); err != nil {
return err
}
s.mutex.Lock()
s.base = ss.Base
files := make([]*File, len(ss.Files))
for i := 0; i < len(ss.Files); i++ {
f := &ss.Files[i]
files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
}
s.files = files
s.last = nil
s.mutex.Unlock()
return nil
}
// Write calls encode to serialize the file set s.
func (s *FileSet) Write(encode func(interface{}) error) error {
var ss serializedFileSet
s.mutex.Lock()
ss.Base = s.base
files := make([]serializedFile, len(s.files))
for i, f := range s.files {
files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
}
ss.Files = files
s.mutex.Unlock()
return encode(ss)
}

83
vendor/github.com/src-d/gcfg/token/token.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package token defines constants representing the lexical tokens of the gcfg
// configuration syntax and basic operations on tokens (printing, predicates).
//
// Note that the API for the token package may change to accommodate new
// features or implementation changes in gcfg.
//
package token
import "strconv"
// Token is the set of lexical tokens of the gcfg configuration syntax.
type Token int
// The list of tokens.
const (
// Special tokens
ILLEGAL Token = iota
EOF
COMMENT
literal_beg
// Identifiers and basic type literals
// (these tokens stand for classes of literals)
IDENT // section-name, variable-name
STRING // "subsection-name", variable value
literal_end
operator_beg
// Operators and delimiters
ASSIGN // =
LBRACK // [
RBRACK // ]
EOL // \n
operator_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
COMMENT: "COMMENT",
IDENT: "IDENT",
STRING: "STRING",
ASSIGN: "=",
LBRACK: "[",
RBRACK: "]",
EOL: "\n",
}
// String returns the string corresponding to the token tok.
// For operators and delimiters, the string is the actual token character
// sequence (e.g., for the token ASSIGN, the string is "="). For all other
// tokens the string corresponds to the token constant name (e.g. for the
// token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
s := ""
if 0 <= tok && tok < Token(len(tokens)) {
s = tokens[tok]
}
if s == "" {
s = "token(" + strconv.Itoa(int(tok)) + ")"
}
return s
}
// Predicates
// IsLiteral returns true for tokens corresponding to identifiers
// and basic type literals; it returns false otherwise.
//
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }

23
vendor/github.com/src-d/gcfg/types/bool.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package types
// BoolValues defines the name and value mappings for ParseBool.
var BoolValues = map[string]interface{}{
"true": true, "yes": true, "on": true, "1": true,
"false": false, "no": false, "off": false, "0": false,
}
var boolParser = func() *EnumParser {
ep := &EnumParser{}
ep.AddVals(BoolValues)
return ep
}()
// ParseBool parses bool values according to the definitions in BoolValues.
// Parsing is case-insensitive.
func ParseBool(s string) (bool, error) {
v, err := boolParser.Parse(s)
if err != nil {
return false, err
}
return v.(bool), nil
}

4
vendor/github.com/src-d/gcfg/types/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
// Package types defines helpers for type conversions.
//
// The API for this package is not finalized yet.
package types

44
vendor/github.com/src-d/gcfg/types/enum.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package types
import (
"fmt"
"reflect"
"strings"
)
// EnumParser parses "enum" values; i.e. a predefined set of strings to
// predefined values.
type EnumParser struct {
Type string // type name; if not set, use type of first value added
CaseMatch bool // if true, matching of strings is case-sensitive
// PrefixMatch bool
vals map[string]interface{}
}
// AddVals adds strings and values to an EnumParser.
func (ep *EnumParser) AddVals(vals map[string]interface{}) {
if ep.vals == nil {
ep.vals = make(map[string]interface{})
}
for k, v := range vals {
if ep.Type == "" {
ep.Type = reflect.TypeOf(v).Name()
}
if !ep.CaseMatch {
k = strings.ToLower(k)
}
ep.vals[k] = v
}
}
// Parse parses the string and returns the value or an error.
func (ep EnumParser) Parse(s string) (interface{}, error) {
if !ep.CaseMatch {
s = strings.ToLower(s)
}
v, ok := ep.vals[s]
if !ok {
return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
}
return v, nil
}

86
vendor/github.com/src-d/gcfg/types/int.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package types
import (
"fmt"
"strings"
)
// An IntMode is a mode for parsing integer values, representing a set of
// accepted bases.
type IntMode uint8
// IntMode values for ParseInt; can be combined using binary or.
const (
Dec IntMode = 1 << iota
Hex
Oct
)
// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
func (m IntMode) String() string {
var modes []string
if m&Dec != 0 {
modes = append(modes, "Dec")
}
if m&Hex != 0 {
modes = append(modes, "Hex")
}
if m&Oct != 0 {
modes = append(modes, "Oct")
}
return "IntMode(" + strings.Join(modes, "|") + ")"
}
var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
func prefix0(val string) bool {
return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
}
func prefix0x(val string) bool {
return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
}
// ParseInt parses val using mode into intptr, which must be a pointer to an
// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
// when mode permits ambiguity of base; otherwise the prefix can be omitted.
func ParseInt(intptr interface{}, val string, mode IntMode) error {
val = strings.TrimSpace(val)
verb := byte(0)
switch mode {
case Dec:
verb = 'd'
case Dec + Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Oct:
if prefix0(val) && !prefix0x(val) {
verb = 'v'
} else {
verb = 'd'
}
case Dec + Hex + Oct:
verb = 'v'
case Hex:
if prefix0x(val) {
verb = 'v'
} else {
verb = 'x'
}
case Oct:
verb = 'o'
case Hex + Oct:
if prefix0(val) {
verb = 'v'
} else {
return errIntAmbig
}
}
if verb == 0 {
panic("unsupported mode")
}
return ScanFully(intptr, val, verb)
}

23
vendor/github.com/src-d/gcfg/types/scan.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package types
import (
"fmt"
"io"
"reflect"
)
// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
func ScanFully(ptr interface{}, val string, verb byte) error {
t := reflect.ValueOf(ptr).Elem().Type()
// attempt to read extra bytes to make sure the value is consumed
var b []byte
n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
switch {
case n < 1 || n == 1 && err != io.EOF:
return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
case n > 1:
return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
}
// n == 1 && err == io.EOF
return nil
}

202
vendor/github.com/xanzy/ssh-agent/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

146
vendor/github.com/xanzy/ssh-agent/pageant_windows.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
//
// Copyright (c) 2014 David Mzareulyan
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
// and associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial
// portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// +build windows
package sshagent
// see https://github.com/Yasushi/putty/blob/master/windows/winpgntc.c#L155
// see https://github.com/paramiko/paramiko/blob/master/paramiko/win_pageant.py
import (
"encoding/binary"
"errors"
"fmt"
"sync"
"syscall"
"unsafe"
)
// Maximum size of message can be sent to pageant
const MaxMessageLen = 8192
var (
ErrPageantNotFound = errors.New("pageant process not found")
ErrSendMessage = errors.New("error sending message")
ErrMessageTooLong = errors.New("message too long")
ErrInvalidMessageFormat = errors.New("invalid message format")
ErrResponseTooLong = errors.New("response too long")
)
const (
agentCopydataID = 0x804e50ba
wmCopydata = 74
)
type copyData struct {
dwData uintptr
cbData uint32
lpData unsafe.Pointer
}
var (
lock sync.Mutex
winFindWindow = winAPI("user32.dll", "FindWindowW")
winGetCurrentThreadID = winAPI("kernel32.dll", "GetCurrentThreadId")
winSendMessage = winAPI("user32.dll", "SendMessageW")
)
func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) {
proc := syscall.MustLoadDLL(dllName).MustFindProc(funcName)
return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) }
}
// Available returns true if Pageant is running
func Available() bool { return pageantWindow() != 0 }
// Query sends message msg to Pageant and returns response or error.
// 'msg' is raw agent request with length prefix
// Response is raw agent response with length prefix
func query(msg []byte) ([]byte, error) {
if len(msg) > MaxMessageLen {
return nil, ErrMessageTooLong
}
msgLen := binary.BigEndian.Uint32(msg[:4])
if len(msg) != int(msgLen)+4 {
return nil, ErrInvalidMessageFormat
}
lock.Lock()
defer lock.Unlock()
paWin := pageantWindow()
if paWin == 0 {
return nil, ErrPageantNotFound
}
thID, _, _ := winGetCurrentThreadID()
mapName := fmt.Sprintf("PageantRequest%08x", thID)
pMapName, _ := syscall.UTF16PtrFromString(mapName)
mmap, err := syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READWRITE, 0, MaxMessageLen+4, pMapName)
if err != nil {
return nil, err
}
defer syscall.CloseHandle(mmap)
ptr, err := syscall.MapViewOfFile(mmap, syscall.FILE_MAP_WRITE, 0, 0, 0)
if err != nil {
return nil, err
}
defer syscall.UnmapViewOfFile(ptr)
mmSlice := (*(*[MaxMessageLen]byte)(unsafe.Pointer(ptr)))[:]
copy(mmSlice, msg)
mapNameBytesZ := append([]byte(mapName), 0)
cds := copyData{
dwData: agentCopydataID,
cbData: uint32(len(mapNameBytesZ)),
lpData: unsafe.Pointer(&(mapNameBytesZ[0])),
}
resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(unsafe.Pointer(&cds)))
if resp == 0 {
return nil, ErrSendMessage
}
respLen := binary.BigEndian.Uint32(mmSlice[:4])
if respLen > MaxMessageLen-4 {
return nil, ErrResponseTooLong
}
respData := make([]byte, respLen+4)
copy(respData, mmSlice)
return respData, nil
}
func pageantWindow() uintptr {
nameP, _ := syscall.UTF16PtrFromString("Pageant")
h, _, _ := winFindWindow(uintptr(unsafe.Pointer(nameP)), uintptr(unsafe.Pointer(nameP)))
return h
}

49
vendor/github.com/xanzy/ssh-agent/sshagent.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
//
// Copyright 2015, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// +build !windows
package sshagent
import (
"errors"
"fmt"
"net"
"os"
"golang.org/x/crypto/ssh/agent"
)
// New returns a new agent.Agent that uses a unix socket
func New() (agent.Agent, net.Conn, error) {
if !Available() {
return nil, nil, errors.New("SSH agent requested but SSH_AUTH_SOCK not-specified")
}
sshAuthSock := os.Getenv("SSH_AUTH_SOCK")
conn, err := net.Dial("unix", sshAuthSock)
if err != nil {
return nil, nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err)
}
return agent.NewClient(conn), conn, nil
}
// Available returns true is a auth socket is defined
func Available() bool {
return os.Getenv("SSH_AUTH_SOCK") != ""
}

80
vendor/github.com/xanzy/ssh-agent/sshagent_windows.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
//
// Copyright (c) 2014 David Mzareulyan
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software
// and associated documentation files (the "Software"), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial
// portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// +build windows
package sshagent
import (
"errors"
"io"
"net"
"sync"
"golang.org/x/crypto/ssh/agent"
)
// New returns a new agent.Agent and the (custom) connection it uses
// to communicate with a running pagent.exe instance (see README.md)
func New() (agent.Agent, net.Conn, error) {
if !Available() {
return nil, nil, errors.New("SSH agent requested but Pageant not running")
}
return agent.NewClient(&conn{}), nil, nil
}
type conn struct {
sync.Mutex
buf []byte
}
func (c *conn) Close() {
c.Lock()
defer c.Unlock()
c.buf = nil
}
func (c *conn) Write(p []byte) (int, error) {
c.Lock()
defer c.Unlock()
resp, err := query(p)
if err != nil {
return 0, err
}
c.buf = append(c.buf, resp...)
return len(p), nil
}
func (c *conn) Read(p []byte) (int, error) {
c.Lock()
defer c.Unlock()
if len(c.buf) == 0 {
return 0, io.EOF
}
n := copy(p, c.buf)
c.buf = c.buf[n:]
return n, nil
}

3
vendor/golang.org/x/crypto/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at https://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/crypto/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at https://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/crypto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/crypto/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

526
vendor/golang.org/x/crypto/cast5/cast5.go generated vendored Normal file
View File

@ -0,0 +1,526 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
// OpenPGP cipher.
package cast5 // import "golang.org/x/crypto/cast5"
import "errors"
const BlockSize = 8
const KeySize = 16
type Cipher struct {
masking [16]uint32
rotate [16]uint8
}
func NewCipher(key []byte) (c *Cipher, err error) {
if len(key) != KeySize {
return nil, errors.New("CAST5: keys must be 16 bytes")
}
c = new(Cipher)
c.keySchedule(key)
return
}
func (c *Cipher) BlockSize() int {
return BlockSize
}
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
dst[0] = uint8(r >> 24)
dst[1] = uint8(r >> 16)
dst[2] = uint8(r >> 8)
dst[3] = uint8(r)
dst[4] = uint8(l >> 24)
dst[5] = uint8(l >> 16)
dst[6] = uint8(l >> 8)
dst[7] = uint8(l)
}
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
dst[0] = uint8(r >> 24)
dst[1] = uint8(r >> 16)
dst[2] = uint8(r >> 8)
dst[3] = uint8(r)
dst[4] = uint8(l >> 24)
dst[5] = uint8(l >> 16)
dst[6] = uint8(l >> 8)
dst[7] = uint8(l)
}
type keyScheduleA [4][7]uint8
type keyScheduleB [4][5]uint8
// keyScheduleRound contains the magic values for a round of the key schedule.
// The keyScheduleA deals with the lines like:
// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
// Conceptually, both x and z are in the same array, x first. The first
// element describes which word of this array gets written to and the
// second, which word gets read. So, for the line above, it's "4, 0", because
// it's writing to the first word of z, which, being after x, is word 4, and
// reading from the first word of x: word 0.
//
// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
// that it's z that we're indexing.
//
// keyScheduleB deals with lines like:
// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
// "K1" is ignored because key words are always written in order. So the five
// elements are the S-box indexes. They use the same form as in keyScheduleA,
// above.
type keyScheduleRound struct{}
type keySchedule []keyScheduleRound
var schedule = []struct {
a keyScheduleA
b keyScheduleB
}{
{
keyScheduleA{
{4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
},
keyScheduleB{
{16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
{16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
{16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
{16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
},
},
{
keyScheduleA{
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
{1, 4, 0, 2, 1, 3, 16 + 2},
{2, 5, 7, 6, 5, 4, 16 + 1},
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
},
keyScheduleB{
{3, 2, 0xc, 0xd, 8},
{1, 0, 0xe, 0xf, 0xd},
{7, 6, 8, 9, 3},
{5, 4, 0xa, 0xb, 7},
},
},
{
keyScheduleA{
{4, 0, 0xd, 0xf, 0xc, 0xe, 8},
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
},
keyScheduleB{
{16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
{16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
{16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
{16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
},
},
{
keyScheduleA{
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
{1, 4, 0, 2, 1, 3, 16 + 2},
{2, 5, 7, 6, 5, 4, 16 + 1},
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
},
keyScheduleB{
{8, 9, 7, 6, 3},
{0xa, 0xb, 5, 4, 7},
{0xc, 0xd, 3, 2, 8},
{0xe, 0xf, 1, 0, 0xd},
},
},
}
func (c *Cipher) keySchedule(in []byte) {
var t [8]uint32
var k [32]uint32
for i := 0; i < 4; i++ {
j := i * 4
t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
}
x := []byte{6, 7, 4, 5}
ki := 0
for half := 0; half < 2; half++ {
for _, round := range schedule {
for j := 0; j < 4; j++ {
var a [7]uint8
copy(a[:], round.a[j][:])
w := t[a[1]]
w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
t[a[0]] = w
}
for j := 0; j < 4; j++ {
var b [5]uint8
copy(b[:], round.b[j][:])
w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
k[ki] = w
ki++
}
}
}
for i := 0; i < 16; i++ {
c.masking[i] = k[i]
c.rotate[i] = uint8(k[16+i] & 0x1f)
}
}
// These are the three 'f' functions. See RFC 2144, section 2.2.
func f1(d, m uint32, r uint8) uint32 {
t := m + d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
}
func f2(d, m uint32, r uint8) uint32 {
t := m ^ d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
}
func f3(d, m uint32, r uint8) uint32 {
t := m - d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
}
var sBox = [8][256]uint32{
{
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
},
{
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
},
{
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
},
{
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
},
{
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
},
{
0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
},
{
0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
},
{
0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
},
}

8
vendor/golang.org/x/crypto/curve25519/const_amd64.h generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
#define REDMASK51 0x0007FFFFFFFFFFFF

20
vendor/golang.org/x/crypto/curve25519/const_amd64.s generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
// These constants cannot be encoded in non-MOVQ immediates.
// We access them directly from memory instead.
DATA ·_121666_213(SB)/8, $996687872
GLOBL ·_121666_213(SB), 8, $8
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
GLOBL ·_2P0(SB), 8, $8
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
GLOBL ·_2P1234(SB), 8, $8

65
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s generated vendored Normal file
View File

@ -0,0 +1,65 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
// func cswap(inout *[4][5]uint64, v uint64)
TEXT ·cswap(SB),7,$0
MOVQ inout+0(FP),DI
MOVQ v+8(FP),SI
SUBQ $1, SI
NOTQ SI
MOVQ SI, X15
PSHUFD $0x44, X15, X15
MOVOU 0(DI), X0
MOVOU 16(DI), X2
MOVOU 32(DI), X4
MOVOU 48(DI), X6
MOVOU 64(DI), X8
MOVOU 80(DI), X1
MOVOU 96(DI), X3
MOVOU 112(DI), X5
MOVOU 128(DI), X7
MOVOU 144(DI), X9
MOVO X1, X10
MOVO X3, X11
MOVO X5, X12
MOVO X7, X13
MOVO X9, X14
PXOR X0, X10
PXOR X2, X11
PXOR X4, X12
PXOR X6, X13
PXOR X8, X14
PAND X15, X10
PAND X15, X11
PAND X15, X12
PAND X15, X13
PAND X15, X14
PXOR X10, X0
PXOR X10, X1
PXOR X11, X2
PXOR X11, X3
PXOR X12, X4
PXOR X12, X5
PXOR X13, X6
PXOR X13, X7
PXOR X14, X8
PXOR X14, X9
MOVOU X0, 0(DI)
MOVOU X2, 16(DI)
MOVOU X4, 32(DI)
MOVOU X6, 48(DI)
MOVOU X8, 64(DI)
MOVOU X1, 80(DI)
MOVOU X3, 96(DI)
MOVOU X5, 112(DI)
MOVOU X7, 128(DI)
MOVOU X9, 144(DI)
RET

834
vendor/golang.org/x/crypto/curve25519/curve25519.go generated vendored Normal file
View File

@ -0,0 +1,834 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// We have an implementation in amd64 assembly so this code is only run on
// non-amd64 platforms. The amd64 assembly does not support gccgo.
// +build !amd64 gccgo appengine
package curve25519
import (
"encoding/binary"
)
// This code is a port of the public domain, "ref10" implementation of
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
// fieldElement represents an element of the field GF(2^255 - 19). An element
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
// context.
type fieldElement [10]int32
func feZero(fe *fieldElement) {
for i := range fe {
fe[i] = 0
}
}
func feOne(fe *fieldElement) {
feZero(fe)
fe[0] = 1
}
func feAdd(dst, a, b *fieldElement) {
for i := range dst {
dst[i] = a[i] + b[i]
}
}
func feSub(dst, a, b *fieldElement) {
for i := range dst {
dst[i] = a[i] - b[i]
}
}
func feCopy(dst, src *fieldElement) {
for i := range dst {
dst[i] = src[i]
}
}
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
//
// Preconditions: b in {0,1}.
func feCSwap(f, g *fieldElement, b int32) {
b = -b
for i := range f {
t := b & (f[i] ^ g[i])
f[i] ^= t
g[i] ^= t
}
}
// load3 reads a 24-bit, little-endian value from in.
func load3(in []byte) int64 {
var r int64
r = int64(in[0])
r |= int64(in[1]) << 8
r |= int64(in[2]) << 16
return r
}
// load4 reads a 32-bit, little-endian value from in.
func load4(in []byte) int64 {
return int64(binary.LittleEndian.Uint32(in))
}
func feFromBytes(dst *fieldElement, src *[32]byte) {
h0 := load4(src[:])
h1 := load3(src[4:]) << 6
h2 := load3(src[7:]) << 5
h3 := load3(src[10:]) << 3
h4 := load3(src[13:]) << 2
h5 := load4(src[16:])
h6 := load3(src[20:]) << 7
h7 := load3(src[23:]) << 5
h8 := load3(src[26:]) << 4
h9 := load3(src[29:]) << 2
var carry [10]int64
carry[9] = (h9 + 1<<24) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[1] = (h1 + 1<<24) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[3] = (h3 + 1<<24) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[5] = (h5 + 1<<24) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[7] = (h7 + 1<<24) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[0] = (h0 + 1<<25) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[2] = (h2 + 1<<25) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[4] = (h4 + 1<<25) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[6] = (h6 + 1<<25) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[8] = (h8 + 1<<25) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
dst[0] = int32(h0)
dst[1] = int32(h1)
dst[2] = int32(h2)
dst[3] = int32(h3)
dst[4] = int32(h4)
dst[5] = int32(h5)
dst[6] = int32(h6)
dst[7] = int32(h7)
dst[8] = int32(h8)
dst[9] = int32(h9)
}
// feToBytes marshals h to s.
// Preconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Write p=2^255-19; q=floor(h/p).
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
//
// Proof:
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
//
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
// Then 0<y<1.
//
// Write r=h-pq.
// Have 0<=r<=p-1=2^255-20.
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
//
// Write x=r+19(2^-255)r+y.
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
//
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
func feToBytes(s *[32]byte, h *fieldElement) {
var carry [10]int32
q := (19*h[9] + (1 << 24)) >> 25
q = (h[0] + q) >> 26
q = (h[1] + q) >> 25
q = (h[2] + q) >> 26
q = (h[3] + q) >> 25
q = (h[4] + q) >> 26
q = (h[5] + q) >> 25
q = (h[6] + q) >> 26
q = (h[7] + q) >> 25
q = (h[8] + q) >> 26
q = (h[9] + q) >> 25
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
h[0] += 19 * q
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
carry[0] = h[0] >> 26
h[1] += carry[0]
h[0] -= carry[0] << 26
carry[1] = h[1] >> 25
h[2] += carry[1]
h[1] -= carry[1] << 25
carry[2] = h[2] >> 26
h[3] += carry[2]
h[2] -= carry[2] << 26
carry[3] = h[3] >> 25
h[4] += carry[3]
h[3] -= carry[3] << 25
carry[4] = h[4] >> 26
h[5] += carry[4]
h[4] -= carry[4] << 26
carry[5] = h[5] >> 25
h[6] += carry[5]
h[5] -= carry[5] << 25
carry[6] = h[6] >> 26
h[7] += carry[6]
h[6] -= carry[6] << 26
carry[7] = h[7] >> 25
h[8] += carry[7]
h[7] -= carry[7] << 25
carry[8] = h[8] >> 26
h[9] += carry[8]
h[8] -= carry[8] << 26
carry[9] = h[9] >> 25
h[9] -= carry[9] << 25
// h10 = carry9
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
// evidently 2^255 h10-2^255 q = 0.
// Goal: Output h[0]+...+2^230 h[9].
s[0] = byte(h[0] >> 0)
s[1] = byte(h[0] >> 8)
s[2] = byte(h[0] >> 16)
s[3] = byte((h[0] >> 24) | (h[1] << 2))
s[4] = byte(h[1] >> 6)
s[5] = byte(h[1] >> 14)
s[6] = byte((h[1] >> 22) | (h[2] << 3))
s[7] = byte(h[2] >> 5)
s[8] = byte(h[2] >> 13)
s[9] = byte((h[2] >> 21) | (h[3] << 5))
s[10] = byte(h[3] >> 3)
s[11] = byte(h[3] >> 11)
s[12] = byte((h[3] >> 19) | (h[4] << 6))
s[13] = byte(h[4] >> 2)
s[14] = byte(h[4] >> 10)
s[15] = byte(h[4] >> 18)
s[16] = byte(h[5] >> 0)
s[17] = byte(h[5] >> 8)
s[18] = byte(h[5] >> 16)
s[19] = byte((h[5] >> 24) | (h[6] << 1))
s[20] = byte(h[6] >> 7)
s[21] = byte(h[6] >> 15)
s[22] = byte((h[6] >> 23) | (h[7] << 3))
s[23] = byte(h[7] >> 5)
s[24] = byte(h[7] >> 13)
s[25] = byte((h[7] >> 21) | (h[8] << 4))
s[26] = byte(h[8] >> 4)
s[27] = byte(h[8] >> 12)
s[28] = byte((h[8] >> 20) | (h[9] << 6))
s[29] = byte(h[9] >> 2)
s[30] = byte(h[9] >> 10)
s[31] = byte(h[9] >> 18)
}
// feMul calculates h = f * g
// Can overlap h with f or g.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
//
// Notes on implementation strategy:
//
// Using schoolbook multiplication.
// Karatsuba would save a little in some cost models.
//
// Most multiplications by 2 and 19 are 32-bit precomputations;
// cheaper than 64-bit postcomputations.
//
// There is one remaining multiplication by 19 in the carry chain;
// one *19 precomputation can be merged into this,
// but the resulting data flow is considerably less clean.
//
// There are 12 carries below.
// 10 of them are 2-way parallelizable and vectorizable.
// Can get away with 11 carries, but then data flow is much deeper.
//
// With tighter constraints on inputs can squeeze carries into int32.
func feMul(h, f, g *fieldElement) {
f0 := f[0]
f1 := f[1]
f2 := f[2]
f3 := f[3]
f4 := f[4]
f5 := f[5]
f6 := f[6]
f7 := f[7]
f8 := f[8]
f9 := f[9]
g0 := g[0]
g1 := g[1]
g2 := g[2]
g3 := g[3]
g4 := g[4]
g5 := g[5]
g6 := g[6]
g7 := g[7]
g8 := g[8]
g9 := g[9]
g1_19 := 19 * g1 // 1.4*2^29
g2_19 := 19 * g2 // 1.4*2^30; still ok
g3_19 := 19 * g3
g4_19 := 19 * g4
g5_19 := 19 * g5
g6_19 := 19 * g6
g7_19 := 19 * g7
g8_19 := 19 * g8
g9_19 := 19 * g9
f1_2 := 2 * f1
f3_2 := 2 * f3
f5_2 := 2 * f5
f7_2 := 2 * f7
f9_2 := 2 * f9
f0g0 := int64(f0) * int64(g0)
f0g1 := int64(f0) * int64(g1)
f0g2 := int64(f0) * int64(g2)
f0g3 := int64(f0) * int64(g3)
f0g4 := int64(f0) * int64(g4)
f0g5 := int64(f0) * int64(g5)
f0g6 := int64(f0) * int64(g6)
f0g7 := int64(f0) * int64(g7)
f0g8 := int64(f0) * int64(g8)
f0g9 := int64(f0) * int64(g9)
f1g0 := int64(f1) * int64(g0)
f1g1_2 := int64(f1_2) * int64(g1)
f1g2 := int64(f1) * int64(g2)
f1g3_2 := int64(f1_2) * int64(g3)
f1g4 := int64(f1) * int64(g4)
f1g5_2 := int64(f1_2) * int64(g5)
f1g6 := int64(f1) * int64(g6)
f1g7_2 := int64(f1_2) * int64(g7)
f1g8 := int64(f1) * int64(g8)
f1g9_38 := int64(f1_2) * int64(g9_19)
f2g0 := int64(f2) * int64(g0)
f2g1 := int64(f2) * int64(g1)
f2g2 := int64(f2) * int64(g2)
f2g3 := int64(f2) * int64(g3)
f2g4 := int64(f2) * int64(g4)
f2g5 := int64(f2) * int64(g5)
f2g6 := int64(f2) * int64(g6)
f2g7 := int64(f2) * int64(g7)
f2g8_19 := int64(f2) * int64(g8_19)
f2g9_19 := int64(f2) * int64(g9_19)
f3g0 := int64(f3) * int64(g0)
f3g1_2 := int64(f3_2) * int64(g1)
f3g2 := int64(f3) * int64(g2)
f3g3_2 := int64(f3_2) * int64(g3)
f3g4 := int64(f3) * int64(g4)
f3g5_2 := int64(f3_2) * int64(g5)
f3g6 := int64(f3) * int64(g6)
f3g7_38 := int64(f3_2) * int64(g7_19)
f3g8_19 := int64(f3) * int64(g8_19)
f3g9_38 := int64(f3_2) * int64(g9_19)
f4g0 := int64(f4) * int64(g0)
f4g1 := int64(f4) * int64(g1)
f4g2 := int64(f4) * int64(g2)
f4g3 := int64(f4) * int64(g3)
f4g4 := int64(f4) * int64(g4)
f4g5 := int64(f4) * int64(g5)
f4g6_19 := int64(f4) * int64(g6_19)
f4g7_19 := int64(f4) * int64(g7_19)
f4g8_19 := int64(f4) * int64(g8_19)
f4g9_19 := int64(f4) * int64(g9_19)
f5g0 := int64(f5) * int64(g0)
f5g1_2 := int64(f5_2) * int64(g1)
f5g2 := int64(f5) * int64(g2)
f5g3_2 := int64(f5_2) * int64(g3)
f5g4 := int64(f5) * int64(g4)
f5g5_38 := int64(f5_2) * int64(g5_19)
f5g6_19 := int64(f5) * int64(g6_19)
f5g7_38 := int64(f5_2) * int64(g7_19)
f5g8_19 := int64(f5) * int64(g8_19)
f5g9_38 := int64(f5_2) * int64(g9_19)
f6g0 := int64(f6) * int64(g0)
f6g1 := int64(f6) * int64(g1)
f6g2 := int64(f6) * int64(g2)
f6g3 := int64(f6) * int64(g3)
f6g4_19 := int64(f6) * int64(g4_19)
f6g5_19 := int64(f6) * int64(g5_19)
f6g6_19 := int64(f6) * int64(g6_19)
f6g7_19 := int64(f6) * int64(g7_19)
f6g8_19 := int64(f6) * int64(g8_19)
f6g9_19 := int64(f6) * int64(g9_19)
f7g0 := int64(f7) * int64(g0)
f7g1_2 := int64(f7_2) * int64(g1)
f7g2 := int64(f7) * int64(g2)
f7g3_38 := int64(f7_2) * int64(g3_19)
f7g4_19 := int64(f7) * int64(g4_19)
f7g5_38 := int64(f7_2) * int64(g5_19)
f7g6_19 := int64(f7) * int64(g6_19)
f7g7_38 := int64(f7_2) * int64(g7_19)
f7g8_19 := int64(f7) * int64(g8_19)
f7g9_38 := int64(f7_2) * int64(g9_19)
f8g0 := int64(f8) * int64(g0)
f8g1 := int64(f8) * int64(g1)
f8g2_19 := int64(f8) * int64(g2_19)
f8g3_19 := int64(f8) * int64(g3_19)
f8g4_19 := int64(f8) * int64(g4_19)
f8g5_19 := int64(f8) * int64(g5_19)
f8g6_19 := int64(f8) * int64(g6_19)
f8g7_19 := int64(f8) * int64(g7_19)
f8g8_19 := int64(f8) * int64(g8_19)
f8g9_19 := int64(f8) * int64(g9_19)
f9g0 := int64(f9) * int64(g0)
f9g1_38 := int64(f9_2) * int64(g1_19)
f9g2_19 := int64(f9) * int64(g2_19)
f9g3_38 := int64(f9_2) * int64(g3_19)
f9g4_19 := int64(f9) * int64(g4_19)
f9g5_38 := int64(f9_2) * int64(g5_19)
f9g6_19 := int64(f9) * int64(g6_19)
f9g7_38 := int64(f9_2) * int64(g7_19)
f9g8_19 := int64(f9) * int64(g8_19)
f9g9_38 := int64(f9_2) * int64(g9_19)
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
var carry [10]int64
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
// |h0| <= 2^25
// |h4| <= 2^25
// |h1| <= 1.51*2^58
// |h5| <= 1.51*2^58
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
// |h1| <= 2^24; from now on fits into int32
// |h5| <= 2^24; from now on fits into int32
// |h2| <= 1.21*2^59
// |h6| <= 1.21*2^59
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
// |h2| <= 2^25; from now on fits into int32 unchanged
// |h6| <= 2^25; from now on fits into int32 unchanged
// |h3| <= 1.51*2^58
// |h7| <= 1.51*2^58
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
// |h3| <= 2^24; from now on fits into int32 unchanged
// |h7| <= 2^24; from now on fits into int32 unchanged
// |h4| <= 1.52*2^33
// |h8| <= 1.52*2^33
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
// |h4| <= 2^25; from now on fits into int32 unchanged
// |h8| <= 2^25; from now on fits into int32 unchanged
// |h5| <= 1.01*2^24
// |h9| <= 1.51*2^58
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
// |h9| <= 2^24; from now on fits into int32 unchanged
// |h0| <= 1.8*2^37
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
// |h0| <= 2^25; from now on fits into int32 unchanged
// |h1| <= 1.01*2^24
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feSquare calculates h = f*f. Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
func feSquare(h, f *fieldElement) {
f0 := f[0]
f1 := f[1]
f2 := f[2]
f3 := f[3]
f4 := f[4]
f5 := f[5]
f6 := f[6]
f7 := f[7]
f8 := f[8]
f9 := f[9]
f0_2 := 2 * f0
f1_2 := 2 * f1
f2_2 := 2 * f2
f3_2 := 2 * f3
f4_2 := 2 * f4
f5_2 := 2 * f5
f6_2 := 2 * f6
f7_2 := 2 * f7
f5_38 := 38 * f5 // 1.31*2^30
f6_19 := 19 * f6 // 1.31*2^30
f7_38 := 38 * f7 // 1.31*2^30
f8_19 := 19 * f8 // 1.31*2^30
f9_38 := 38 * f9 // 1.31*2^30
f0f0 := int64(f0) * int64(f0)
f0f1_2 := int64(f0_2) * int64(f1)
f0f2_2 := int64(f0_2) * int64(f2)
f0f3_2 := int64(f0_2) * int64(f3)
f0f4_2 := int64(f0_2) * int64(f4)
f0f5_2 := int64(f0_2) * int64(f5)
f0f6_2 := int64(f0_2) * int64(f6)
f0f7_2 := int64(f0_2) * int64(f7)
f0f8_2 := int64(f0_2) * int64(f8)
f0f9_2 := int64(f0_2) * int64(f9)
f1f1_2 := int64(f1_2) * int64(f1)
f1f2_2 := int64(f1_2) * int64(f2)
f1f3_4 := int64(f1_2) * int64(f3_2)
f1f4_2 := int64(f1_2) * int64(f4)
f1f5_4 := int64(f1_2) * int64(f5_2)
f1f6_2 := int64(f1_2) * int64(f6)
f1f7_4 := int64(f1_2) * int64(f7_2)
f1f8_2 := int64(f1_2) * int64(f8)
f1f9_76 := int64(f1_2) * int64(f9_38)
f2f2 := int64(f2) * int64(f2)
f2f3_2 := int64(f2_2) * int64(f3)
f2f4_2 := int64(f2_2) * int64(f4)
f2f5_2 := int64(f2_2) * int64(f5)
f2f6_2 := int64(f2_2) * int64(f6)
f2f7_2 := int64(f2_2) * int64(f7)
f2f8_38 := int64(f2_2) * int64(f8_19)
f2f9_38 := int64(f2) * int64(f9_38)
f3f3_2 := int64(f3_2) * int64(f3)
f3f4_2 := int64(f3_2) * int64(f4)
f3f5_4 := int64(f3_2) * int64(f5_2)
f3f6_2 := int64(f3_2) * int64(f6)
f3f7_76 := int64(f3_2) * int64(f7_38)
f3f8_38 := int64(f3_2) * int64(f8_19)
f3f9_76 := int64(f3_2) * int64(f9_38)
f4f4 := int64(f4) * int64(f4)
f4f5_2 := int64(f4_2) * int64(f5)
f4f6_38 := int64(f4_2) * int64(f6_19)
f4f7_38 := int64(f4) * int64(f7_38)
f4f8_38 := int64(f4_2) * int64(f8_19)
f4f9_38 := int64(f4) * int64(f9_38)
f5f5_38 := int64(f5) * int64(f5_38)
f5f6_38 := int64(f5_2) * int64(f6_19)
f5f7_76 := int64(f5_2) * int64(f7_38)
f5f8_38 := int64(f5_2) * int64(f8_19)
f5f9_76 := int64(f5_2) * int64(f9_38)
f6f6_19 := int64(f6) * int64(f6_19)
f6f7_38 := int64(f6) * int64(f7_38)
f6f8_38 := int64(f6_2) * int64(f8_19)
f6f9_38 := int64(f6) * int64(f9_38)
f7f7_38 := int64(f7) * int64(f7_38)
f7f8_38 := int64(f7_2) * int64(f8_19)
f7f9_76 := int64(f7_2) * int64(f9_38)
f8f8_19 := int64(f8) * int64(f8_19)
f8f9_38 := int64(f8) * int64(f9_38)
f9f9_38 := int64(f9) * int64(f9_38)
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
var carry [10]int64
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feMul121666 calculates h = f * 121666. Can overlap h with f.
//
// Preconditions:
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
//
// Postconditions:
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
func feMul121666(h, f *fieldElement) {
h0 := int64(f[0]) * 121666
h1 := int64(f[1]) * 121666
h2 := int64(f[2]) * 121666
h3 := int64(f[3]) * 121666
h4 := int64(f[4]) * 121666
h5 := int64(f[5]) * 121666
h6 := int64(f[6]) * 121666
h7 := int64(f[7]) * 121666
h8 := int64(f[8]) * 121666
h9 := int64(f[9]) * 121666
var carry [10]int64
carry[9] = (h9 + (1 << 24)) >> 25
h0 += carry[9] * 19
h9 -= carry[9] << 25
carry[1] = (h1 + (1 << 24)) >> 25
h2 += carry[1]
h1 -= carry[1] << 25
carry[3] = (h3 + (1 << 24)) >> 25
h4 += carry[3]
h3 -= carry[3] << 25
carry[5] = (h5 + (1 << 24)) >> 25
h6 += carry[5]
h5 -= carry[5] << 25
carry[7] = (h7 + (1 << 24)) >> 25
h8 += carry[7]
h7 -= carry[7] << 25
carry[0] = (h0 + (1 << 25)) >> 26
h1 += carry[0]
h0 -= carry[0] << 26
carry[2] = (h2 + (1 << 25)) >> 26
h3 += carry[2]
h2 -= carry[2] << 26
carry[4] = (h4 + (1 << 25)) >> 26
h5 += carry[4]
h4 -= carry[4] << 26
carry[6] = (h6 + (1 << 25)) >> 26
h7 += carry[6]
h6 -= carry[6] << 26
carry[8] = (h8 + (1 << 25)) >> 26
h9 += carry[8]
h8 -= carry[8] << 26
h[0] = int32(h0)
h[1] = int32(h1)
h[2] = int32(h2)
h[3] = int32(h3)
h[4] = int32(h4)
h[5] = int32(h5)
h[6] = int32(h6)
h[7] = int32(h7)
h[8] = int32(h8)
h[9] = int32(h9)
}
// feInvert sets out = z^-1.
func feInvert(out, z *fieldElement) {
var t0, t1, t2, t3 fieldElement
var i int
feSquare(&t0, z)
for i = 1; i < 1; i++ {
feSquare(&t0, &t0)
}
feSquare(&t1, &t0)
for i = 1; i < 2; i++ {
feSquare(&t1, &t1)
}
feMul(&t1, z, &t1)
feMul(&t0, &t0, &t1)
feSquare(&t2, &t0)
for i = 1; i < 1; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t1, &t2)
feSquare(&t2, &t1)
for i = 1; i < 5; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t2, &t1)
for i = 1; i < 10; i++ {
feSquare(&t2, &t2)
}
feMul(&t2, &t2, &t1)
feSquare(&t3, &t2)
for i = 1; i < 20; i++ {
feSquare(&t3, &t3)
}
feMul(&t2, &t3, &t2)
feSquare(&t2, &t2)
for i = 1; i < 10; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t2, &t1)
for i = 1; i < 50; i++ {
feSquare(&t2, &t2)
}
feMul(&t2, &t2, &t1)
feSquare(&t3, &t2)
for i = 1; i < 100; i++ {
feSquare(&t3, &t3)
}
feMul(&t2, &t3, &t2)
feSquare(&t2, &t2)
for i = 1; i < 50; i++ {
feSquare(&t2, &t2)
}
feMul(&t1, &t2, &t1)
feSquare(&t1, &t1)
for i = 1; i < 5; i++ {
feSquare(&t1, &t1)
}
feMul(out, &t1, &t0)
}
func scalarMult(out, in, base *[32]byte) {
var e [32]byte
copy(e[:], in[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
feFromBytes(&x1, base)
feOne(&x2)
feCopy(&x3, &x1)
feOne(&z3)
swap := int32(0)
for pos := 254; pos >= 0; pos-- {
b := e[pos/8] >> uint(pos&7)
b &= 1
swap ^= int32(b)
feCSwap(&x2, &x3, swap)
feCSwap(&z2, &z3, swap)
swap = int32(b)
feSub(&tmp0, &x3, &z3)
feSub(&tmp1, &x2, &z2)
feAdd(&x2, &x2, &z2)
feAdd(&z2, &x3, &z3)
feMul(&z3, &tmp0, &x2)
feMul(&z2, &z2, &tmp1)
feSquare(&tmp0, &tmp1)
feSquare(&tmp1, &x2)
feAdd(&x3, &z3, &z2)
feSub(&z2, &z3, &z2)
feMul(&x2, &tmp1, &tmp0)
feSub(&tmp1, &tmp1, &tmp0)
feSquare(&z2, &z2)
feMul121666(&z3, &tmp1)
feSquare(&x3, &x3)
feAdd(&tmp0, &tmp0, &z3)
feMul(&z3, &x1, &z2)
feMul(&z2, &tmp1, &tmp0)
}
feCSwap(&x2, &x3, swap)
feCSwap(&z2, &z3, swap)
feInvert(&z2, &z2)
feMul(&x2, &x2, &z2)
feToBytes(out, &x2)
}

23
vendor/golang.org/x/crypto/curve25519/doc.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package curve25519 provides an implementation of scalar multiplication on
// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html
package curve25519 // import "golang.org/x/crypto/curve25519"
// basePoint is the x coordinate of the generator of the curve.
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
// ScalarMult sets dst to the product in*base where dst and base are the x
// coordinates of group points and all values are in little-endian form.
func ScalarMult(dst, in, base *[32]byte) {
scalarMult(dst, in, base)
}
// ScalarBaseMult sets dst to the product in*base where dst and base are the x
// coordinates of group points, base is the standard generator and all values
// are in little-endian form.
func ScalarBaseMult(dst, in *[32]byte) {
ScalarMult(dst, in, &basePoint)
}

73
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
#include "const_amd64.h"
// func freeze(inout *[5]uint64)
TEXT ·freeze(SB),7,$0-8
MOVQ inout+0(FP), DI
MOVQ 0(DI),SI
MOVQ 8(DI),DX
MOVQ 16(DI),CX
MOVQ 24(DI),R8
MOVQ 32(DI),R9
MOVQ $REDMASK51,AX
MOVQ AX,R10
SUBQ $18,R10
MOVQ $3,R11
REDUCELOOP:
MOVQ SI,R12
SHRQ $51,R12
ANDQ AX,SI
ADDQ R12,DX
MOVQ DX,R12
SHRQ $51,R12
ANDQ AX,DX
ADDQ R12,CX
MOVQ CX,R12
SHRQ $51,R12
ANDQ AX,CX
ADDQ R12,R8
MOVQ R8,R12
SHRQ $51,R12
ANDQ AX,R8
ADDQ R12,R9
MOVQ R9,R12
SHRQ $51,R12
ANDQ AX,R9
IMUL3Q $19,R12,R12
ADDQ R12,SI
SUBQ $1,R11
JA REDUCELOOP
MOVQ $1,R12
CMPQ R10,SI
CMOVQLT R11,R12
CMPQ AX,DX
CMOVQNE R11,R12
CMPQ AX,CX
CMOVQNE R11,R12
CMPQ AX,R8
CMOVQNE R11,R12
CMPQ AX,R9
CMOVQNE R11,R12
NEGQ R12
ANDQ R12,AX
ANDQ R12,R10
SUBQ R10,SI
SUBQ AX,DX
SUBQ AX,CX
SUBQ AX,R8
SUBQ AX,R9
MOVQ SI,0(DI)
MOVQ DX,8(DI)
MOVQ CX,16(DI)
MOVQ R8,24(DI)
MOVQ R9,32(DI)
RET

1377
vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,240 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
package curve25519
// These functions are implemented in the .s files. The names of the functions
// in the rest of the file are also taken from the SUPERCOP sources to help
// people following along.
//go:noescape
func cswap(inout *[5]uint64, v uint64)
//go:noescape
func ladderstep(inout *[5][5]uint64)
//go:noescape
func freeze(inout *[5]uint64)
//go:noescape
func mul(dest, a, b *[5]uint64)
//go:noescape
func square(out, in *[5]uint64)
// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
func mladder(xr, zr *[5]uint64, s *[32]byte) {
var work [5][5]uint64
work[0] = *xr
setint(&work[1], 1)
setint(&work[2], 0)
work[3] = *xr
setint(&work[4], 1)
j := uint(6)
var prevbit byte
for i := 31; i >= 0; i-- {
for j < 8 {
bit := ((*s)[i] >> j) & 1
swap := bit ^ prevbit
prevbit = bit
cswap(&work[1], uint64(swap))
ladderstep(&work)
j--
}
j = 7
}
*xr = work[1]
*zr = work[2]
}
func scalarMult(out, in, base *[32]byte) {
var e [32]byte
copy(e[:], (*in)[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var t, z [5]uint64
unpack(&t, base)
mladder(&t, &z, &e)
invert(&z, &z)
mul(&t, &t, &z)
pack(out, &t)
}
func setint(r *[5]uint64, v uint64) {
r[0] = v
r[1] = 0
r[2] = 0
r[3] = 0
r[4] = 0
}
// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
// order.
func unpack(r *[5]uint64, x *[32]byte) {
r[0] = uint64(x[0]) |
uint64(x[1])<<8 |
uint64(x[2])<<16 |
uint64(x[3])<<24 |
uint64(x[4])<<32 |
uint64(x[5])<<40 |
uint64(x[6]&7)<<48
r[1] = uint64(x[6])>>3 |
uint64(x[7])<<5 |
uint64(x[8])<<13 |
uint64(x[9])<<21 |
uint64(x[10])<<29 |
uint64(x[11])<<37 |
uint64(x[12]&63)<<45
r[2] = uint64(x[12])>>6 |
uint64(x[13])<<2 |
uint64(x[14])<<10 |
uint64(x[15])<<18 |
uint64(x[16])<<26 |
uint64(x[17])<<34 |
uint64(x[18])<<42 |
uint64(x[19]&1)<<50
r[3] = uint64(x[19])>>1 |
uint64(x[20])<<7 |
uint64(x[21])<<15 |
uint64(x[22])<<23 |
uint64(x[23])<<31 |
uint64(x[24])<<39 |
uint64(x[25]&15)<<47
r[4] = uint64(x[25])>>4 |
uint64(x[26])<<4 |
uint64(x[27])<<12 |
uint64(x[28])<<20 |
uint64(x[29])<<28 |
uint64(x[30])<<36 |
uint64(x[31]&127)<<44
}
// pack sets out = x where out is the usual, little-endian form of the 5,
// 51-bit limbs in x.
func pack(out *[32]byte, x *[5]uint64) {
t := *x
freeze(&t)
out[0] = byte(t[0])
out[1] = byte(t[0] >> 8)
out[2] = byte(t[0] >> 16)
out[3] = byte(t[0] >> 24)
out[4] = byte(t[0] >> 32)
out[5] = byte(t[0] >> 40)
out[6] = byte(t[0] >> 48)
out[6] ^= byte(t[1]<<3) & 0xf8
out[7] = byte(t[1] >> 5)
out[8] = byte(t[1] >> 13)
out[9] = byte(t[1] >> 21)
out[10] = byte(t[1] >> 29)
out[11] = byte(t[1] >> 37)
out[12] = byte(t[1] >> 45)
out[12] ^= byte(t[2]<<6) & 0xc0
out[13] = byte(t[2] >> 2)
out[14] = byte(t[2] >> 10)
out[15] = byte(t[2] >> 18)
out[16] = byte(t[2] >> 26)
out[17] = byte(t[2] >> 34)
out[18] = byte(t[2] >> 42)
out[19] = byte(t[2] >> 50)
out[19] ^= byte(t[3]<<1) & 0xfe
out[20] = byte(t[3] >> 7)
out[21] = byte(t[3] >> 15)
out[22] = byte(t[3] >> 23)
out[23] = byte(t[3] >> 31)
out[24] = byte(t[3] >> 39)
out[25] = byte(t[3] >> 47)
out[25] ^= byte(t[4]<<4) & 0xf0
out[26] = byte(t[4] >> 4)
out[27] = byte(t[4] >> 12)
out[28] = byte(t[4] >> 20)
out[29] = byte(t[4] >> 28)
out[30] = byte(t[4] >> 36)
out[31] = byte(t[4] >> 44)
}
// invert calculates r = x^-1 mod p using Fermat's little theorem.
func invert(r *[5]uint64, x *[5]uint64) {
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
square(&z2, x) /* 2 */
square(&t, &z2) /* 4 */
square(&t, &t) /* 8 */
mul(&z9, &t, x) /* 9 */
mul(&z11, &z9, &z2) /* 11 */
square(&t, &z11) /* 22 */
mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
square(&t, &z2_5_0) /* 2^6 - 2^1 */
for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
square(&t, &t)
}
mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
square(&t, &z2_10_0) /* 2^11 - 2^1 */
for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
square(&t, &t)
}
mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
square(&t, &z2_20_0) /* 2^21 - 2^1 */
for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
square(&t, &t)
}
mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
square(&t, &t) /* 2^41 - 2^1 */
for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
square(&t, &t)
}
mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
square(&t, &z2_50_0) /* 2^51 - 2^1 */
for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
square(&t, &t)
}
mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
square(&t, &z2_100_0) /* 2^101 - 2^1 */
for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
square(&t, &t)
}
mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
square(&t, &t) /* 2^201 - 2^1 */
for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
square(&t, &t)
}
mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
square(&t, &t) /* 2^251 - 2^1 */
square(&t, &t) /* 2^252 - 2^2 */
square(&t, &t) /* 2^253 - 2^3 */
square(&t, &t) /* 2^254 - 2^4 */
square(&t, &t) /* 2^255 - 2^5 */
mul(r, &t, &z11) /* 2^255 - 21 */
}

169
vendor/golang.org/x/crypto/curve25519/mul_amd64.s generated vendored Normal file
View File

@ -0,0 +1,169 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
#include "const_amd64.h"
// func mul(dest, a, b *[5]uint64)
TEXT ·mul(SB),0,$16-24
MOVQ dest+0(FP), DI
MOVQ a+8(FP), SI
MOVQ b+16(FP), DX
MOVQ DX,CX
MOVQ 24(SI),DX
IMUL3Q $19,DX,AX
MOVQ AX,0(SP)
MULQ 16(CX)
MOVQ AX,R8
MOVQ DX,R9
MOVQ 32(SI),DX
IMUL3Q $19,DX,AX
MOVQ AX,8(SP)
MULQ 8(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 0(SI),AX
MULQ 0(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 0(SI),AX
MULQ 8(CX)
MOVQ AX,R10
MOVQ DX,R11
MOVQ 0(SI),AX
MULQ 16(CX)
MOVQ AX,R12
MOVQ DX,R13
MOVQ 0(SI),AX
MULQ 24(CX)
MOVQ AX,R14
MOVQ DX,R15
MOVQ 0(SI),AX
MULQ 32(CX)
MOVQ AX,BX
MOVQ DX,BP
MOVQ 8(SI),AX
MULQ 0(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 8(SI),AX
MULQ 8(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 8(SI),AX
MULQ 16(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 8(SI),AX
MULQ 24(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 8(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 16(SI),AX
MULQ 0(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 16(SI),AX
MULQ 8(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 16(SI),AX
MULQ 16(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 16(SI),DX
IMUL3Q $19,DX,AX
MULQ 24(CX)
ADDQ AX,R8
ADCQ DX,R9
MOVQ 16(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 24(SI),AX
MULQ 0(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ 24(SI),AX
MULQ 8(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 0(SP),AX
MULQ 24(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 0(SP),AX
MULQ 32(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 32(SI),AX
MULQ 0(CX)
ADDQ AX,BX
ADCQ DX,BP
MOVQ 8(SP),AX
MULQ 16(CX)
ADDQ AX,R10
ADCQ DX,R11
MOVQ 8(SP),AX
MULQ 24(CX)
ADDQ AX,R12
ADCQ DX,R13
MOVQ 8(SP),AX
MULQ 32(CX)
ADDQ AX,R14
ADCQ DX,R15
MOVQ $REDMASK51,SI
SHLQ $13,R9:R8
ANDQ SI,R8
SHLQ $13,R11:R10
ANDQ SI,R10
ADDQ R9,R10
SHLQ $13,R13:R12
ANDQ SI,R12
ADDQ R11,R12
SHLQ $13,R15:R14
ANDQ SI,R14
ADDQ R13,R14
SHLQ $13,BP:BX
ANDQ SI,BX
ADDQ R15,BX
IMUL3Q $19,BP,DX
ADDQ DX,R8
MOVQ R8,DX
SHRQ $51,DX
ADDQ R10,DX
MOVQ DX,CX
SHRQ $51,DX
ANDQ SI,R8
ADDQ R12,DX
MOVQ DX,R9
SHRQ $51,DX
ANDQ SI,CX
ADDQ R14,DX
MOVQ DX,AX
SHRQ $51,DX
ANDQ SI,R9
ADDQ BX,DX
MOVQ DX,R10
SHRQ $51,DX
ANDQ SI,AX
IMUL3Q $19,DX,DX
ADDQ DX,R8
ANDQ SI,R10
MOVQ R8,0(DI)
MOVQ CX,8(DI)
MOVQ R9,16(DI)
MOVQ AX,24(DI)
MOVQ R10,32(DI)
RET

132
vendor/golang.org/x/crypto/curve25519/square_amd64.s generated vendored Normal file
View File

@ -0,0 +1,132 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
// +build amd64,!gccgo,!appengine
#include "const_amd64.h"
// func square(out, in *[5]uint64)
TEXT ·square(SB),7,$0-16
MOVQ out+0(FP), DI
MOVQ in+8(FP), SI
MOVQ 0(SI),AX
MULQ 0(SI)
MOVQ AX,CX
MOVQ DX,R8
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 8(SI)
MOVQ AX,R9
MOVQ DX,R10
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 16(SI)
MOVQ AX,R11
MOVQ DX,R12
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 24(SI)
MOVQ AX,R13
MOVQ DX,R14
MOVQ 0(SI),AX
SHLQ $1,AX
MULQ 32(SI)
MOVQ AX,R15
MOVQ DX,BX
MOVQ 8(SI),AX
MULQ 8(SI)
ADDQ AX,R11
ADCQ DX,R12
MOVQ 8(SI),AX
SHLQ $1,AX
MULQ 16(SI)
ADDQ AX,R13
ADCQ DX,R14
MOVQ 8(SI),AX
SHLQ $1,AX
MULQ 24(SI)
ADDQ AX,R15
ADCQ DX,BX
MOVQ 8(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,CX
ADCQ DX,R8
MOVQ 16(SI),AX
MULQ 16(SI)
ADDQ AX,R15
ADCQ DX,BX
MOVQ 16(SI),DX
IMUL3Q $38,DX,AX
MULQ 24(SI)
ADDQ AX,CX
ADCQ DX,R8
MOVQ 16(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,R9
ADCQ DX,R10
MOVQ 24(SI),DX
IMUL3Q $19,DX,AX
MULQ 24(SI)
ADDQ AX,R9
ADCQ DX,R10
MOVQ 24(SI),DX
IMUL3Q $38,DX,AX
MULQ 32(SI)
ADDQ AX,R11
ADCQ DX,R12
MOVQ 32(SI),DX
IMUL3Q $19,DX,AX
MULQ 32(SI)
ADDQ AX,R13
ADCQ DX,R14
MOVQ $REDMASK51,SI
SHLQ $13,R8:CX
ANDQ SI,CX
SHLQ $13,R10:R9
ANDQ SI,R9
ADDQ R8,R9
SHLQ $13,R12:R11
ANDQ SI,R11
ADDQ R10,R11
SHLQ $13,R14:R13
ANDQ SI,R13
ADDQ R12,R13
SHLQ $13,BX:R15
ANDQ SI,R15
ADDQ R14,R15
IMUL3Q $19,BX,DX
ADDQ DX,CX
MOVQ CX,DX
SHRQ $51,DX
ADDQ R9,DX
ANDQ SI,CX
MOVQ DX,R8
SHRQ $51,DX
ADDQ R11,DX
ANDQ SI,R8
MOVQ DX,R9
SHRQ $51,DX
ADDQ R13,DX
ANDQ SI,R9
MOVQ DX,AX
SHRQ $51,DX
ADDQ R15,DX
ANDQ SI,AX
MOVQ DX,R10
SHRQ $51,DX
IMUL3Q $19,DX,DX
ADDQ DX,CX
ANDQ SI,R10
MOVQ CX,0(DI)
MOVQ R8,8(DI)
MOVQ R9,16(DI)
MOVQ AX,24(DI)
MOVQ R10,32(DI)
RET

217
vendor/golang.org/x/crypto/ed25519/ed25519.go generated vendored Normal file
View File

@ -0,0 +1,217 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519
// from SUPERCOP.
import (
"bytes"
"crypto"
cryptorand "crypto/rand"
"crypto/sha512"
"errors"
"io"
"strconv"
"golang.org/x/crypto/ed25519/internal/edwards25519"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
type PublicKey []byte
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
type PrivateKey []byte
// Public returns the PublicKey corresponding to priv.
func (priv PrivateKey) Public() crypto.PublicKey {
publicKey := make([]byte, PublicKeySize)
copy(publicKey, priv[32:])
return PublicKey(publicKey)
}
// Seed returns the private key seed corresponding to priv. It is provided for
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
// in this package.
func (priv PrivateKey) Seed() []byte {
seed := make([]byte, SeedSize)
copy(seed, priv[:32])
return seed
}
// Sign signs the given message with priv.
// Ed25519 performs two passes over messages to be signed and therefore cannot
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
// indicate the message hasn't been hashed. This can be achieved by passing
// crypto.Hash(0) as the value for opts.
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
if opts.HashFunc() != crypto.Hash(0) {
return nil, errors.New("ed25519: cannot sign hashed message")
}
return Sign(priv, message), nil
}
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
if rand == nil {
rand = cryptorand.Reader
}
seed := make([]byte, SeedSize)
if _, err := io.ReadFull(rand, seed); err != nil {
return nil, nil, err
}
privateKey := NewKeyFromSeed(seed)
publicKey := make([]byte, PublicKeySize)
copy(publicKey, privateKey[32:])
return publicKey, privateKey, nil
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
if l := len(seed); l != SeedSize {
panic("ed25519: bad seed length: " + strconv.Itoa(l))
}
digest := sha512.Sum512(seed)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
var A edwards25519.ExtendedGroupElement
var hBytes [32]byte
copy(hBytes[:], digest[:])
edwards25519.GeScalarMultBase(&A, &hBytes)
var publicKeyBytes [32]byte
A.ToBytes(&publicKeyBytes)
privateKey := make([]byte, PrivateKeySize)
copy(privateKey, seed)
copy(privateKey[32:], publicKeyBytes[:])
return privateKey
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
if l := len(privateKey); l != PrivateKeySize {
panic("ed25519: bad private key length: " + strconv.Itoa(l))
}
h := sha512.New()
h.Write(privateKey[:32])
var digest1, messageDigest, hramDigest [64]byte
var expandedSecretKey [32]byte
h.Sum(digest1[:0])
copy(expandedSecretKey[:], digest1[:])
expandedSecretKey[0] &= 248
expandedSecretKey[31] &= 63
expandedSecretKey[31] |= 64
h.Reset()
h.Write(digest1[32:])
h.Write(message)
h.Sum(messageDigest[:0])
var messageDigestReduced [32]byte
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
var R edwards25519.ExtendedGroupElement
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
var encodedR [32]byte
R.ToBytes(&encodedR)
h.Reset()
h.Write(encodedR[:])
h.Write(privateKey[32:])
h.Write(message)
h.Sum(hramDigest[:0])
var hramDigestReduced [32]byte
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
var s [32]byte
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
signature := make([]byte, SignatureSize)
copy(signature[:], encodedR[:])
copy(signature[32:], s[:])
return signature
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
if l := len(publicKey); l != PublicKeySize {
panic("ed25519: bad public key length: " + strconv.Itoa(l))
}
if len(sig) != SignatureSize || sig[63]&224 != 0 {
return false
}
var A edwards25519.ExtendedGroupElement
var publicKeyBytes [32]byte
copy(publicKeyBytes[:], publicKey)
if !A.FromBytes(&publicKeyBytes) {
return false
}
edwards25519.FeNeg(&A.X, &A.X)
edwards25519.FeNeg(&A.T, &A.T)
h := sha512.New()
h.Write(sig[:32])
h.Write(publicKey[:])
h.Write(message)
var digest [64]byte
h.Sum(digest[:0])
var hReduced [32]byte
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
var s [32]byte
copy(s[:], sig[32:])
// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
// the range [0, order) in order to prevent signature malleability.
if !edwards25519.ScMinimal(&s) {
return false
}
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
var checkR [32]byte
R.ToBytes(&checkR)
return bytes.Equal(sig[:32], checkR[:])
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,264 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ChaCha20 implements the core ChaCha20 function as specified
// in https://tools.ietf.org/html/rfc7539#section-2.3.
package chacha20
import (
"crypto/cipher"
"encoding/binary"
"golang.org/x/crypto/internal/subtle"
)
// assert that *Cipher implements cipher.Stream
var _ cipher.Stream = (*Cipher)(nil)
// Cipher is a stateful instance of ChaCha20 using a particular key
// and nonce. A *Cipher implements the cipher.Stream interface.
type Cipher struct {
key [8]uint32
counter uint32 // incremented after each block
nonce [3]uint32
buf [bufSize]byte // buffer for unused keystream bytes
len int // number of unused keystream bytes at end of buf
}
// New creates a new ChaCha20 stream cipher with the given key and nonce.
// The initial counter value is set to 0.
func New(key [8]uint32, nonce [3]uint32) *Cipher {
return &Cipher{key: key, nonce: nonce}
}
// ChaCha20 constants spelling "expand 32-byte k"
const (
j0 uint32 = 0x61707865
j1 uint32 = 0x3320646e
j2 uint32 = 0x79622d32
j3 uint32 = 0x6b206574
)
func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
a += b
d ^= a
d = (d << 16) | (d >> 16)
c += d
b ^= c
b = (b << 12) | (b >> 20)
a += b
d ^= a
d = (d << 8) | (d >> 24)
c += d
b ^= c
b = (b << 7) | (b >> 25)
return a, b, c, d
}
// XORKeyStream XORs each byte in the given slice with a byte from the
// cipher's key stream. Dst and src must overlap entirely or not at all.
//
// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
// to pass a dst bigger than src, and in that case, XORKeyStream will
// only update dst[:len(src)] and will not touch the rest of dst.
//
// Multiple calls to XORKeyStream behave as if the concatenation of
// the src buffers was passed in a single run. That is, Cipher
// maintains state and does not reset at each XORKeyStream call.
func (s *Cipher) XORKeyStream(dst, src []byte) {
if len(dst) < len(src) {
panic("chacha20: output smaller than input")
}
if subtle.InexactOverlap(dst[:len(src)], src) {
panic("chacha20: invalid buffer overlap")
}
// xor src with buffered keystream first
if s.len != 0 {
buf := s.buf[len(s.buf)-s.len:]
if len(src) < len(buf) {
buf = buf[:len(src)]
}
td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint
for i, b := range buf {
td[i] = ts[i] ^ b
}
s.len -= len(buf)
if s.len != 0 {
return
}
s.buf = [len(s.buf)]byte{} // zero the empty buffer
src = src[len(buf):]
dst = dst[len(buf):]
}
if len(src) == 0 {
return
}
if haveAsm {
if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 {
panic("chacha20: counter overflow")
}
s.xorKeyStreamAsm(dst, src)
return
}
// set up a 64-byte buffer to pad out the final block if needed
// (hoisted out of the main loop to avoid spills)
rem := len(src) % 64 // length of final block
fin := len(src) - rem // index of final block
if rem > 0 {
copy(s.buf[len(s.buf)-64:], src[fin:])
}
// pre-calculate most of the first round
s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0])
s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1])
s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2])
n := len(src)
src, dst = src[:n:n], dst[:n:n] // BCE hint
for i := 0; i < n; i += 64 {
// calculate the remainder of the first round
s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter)
// execute the second round
x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15)
x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12)
x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13)
x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14)
// execute the remaining 18 rounds
for i := 0; i < 9; i++ {
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += s.key[0]
x5 += s.key[1]
x6 += s.key[2]
x7 += s.key[3]
x8 += s.key[4]
x9 += s.key[5]
x10 += s.key[6]
x11 += s.key[7]
x12 += s.counter
x13 += s.nonce[0]
x14 += s.nonce[1]
x15 += s.nonce[2]
// increment the counter
s.counter += 1
if s.counter == 0 {
panic("chacha20: counter overflow")
}
// pad to 64 bytes if needed
in, out := src[i:], dst[i:]
if i == fin {
// src[fin:] has already been copied into s.buf before
// the main loop
in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:]
}
in, out = in[:64], out[:64] // BCE hint
// XOR the key stream with the source and write out the result
xor(out[0:], in[0:], x0)
xor(out[4:], in[4:], x1)
xor(out[8:], in[8:], x2)
xor(out[12:], in[12:], x3)
xor(out[16:], in[16:], x4)
xor(out[20:], in[20:], x5)
xor(out[24:], in[24:], x6)
xor(out[28:], in[28:], x7)
xor(out[32:], in[32:], x8)
xor(out[36:], in[36:], x9)
xor(out[40:], in[40:], x10)
xor(out[44:], in[44:], x11)
xor(out[48:], in[48:], x12)
xor(out[52:], in[52:], x13)
xor(out[56:], in[56:], x14)
xor(out[60:], in[60:], x15)
}
// copy any trailing bytes out of the buffer and into dst
if rem != 0 {
s.len = 64 - rem
copy(dst[fin:], s.buf[len(s.buf)-64:])
}
}
// Advance discards bytes in the key stream until the next 64 byte block
// boundary is reached and updates the counter accordingly. If the key
// stream is already at a block boundary no bytes will be discarded and
// the counter will be unchanged.
func (s *Cipher) Advance() {
s.len -= s.len % 64
if s.len == 0 {
s.buf = [len(s.buf)]byte{}
}
}
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out must overlap entirely or not at all. Counter contains the raw
// ChaCha20 counter bytes (i.e. block counter followed by nonce).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
s := Cipher{
key: [8]uint32{
binary.LittleEndian.Uint32(key[0:4]),
binary.LittleEndian.Uint32(key[4:8]),
binary.LittleEndian.Uint32(key[8:12]),
binary.LittleEndian.Uint32(key[12:16]),
binary.LittleEndian.Uint32(key[16:20]),
binary.LittleEndian.Uint32(key[20:24]),
binary.LittleEndian.Uint32(key[24:28]),
binary.LittleEndian.Uint32(key[28:32]),
},
nonce: [3]uint32{
binary.LittleEndian.Uint32(counter[4:8]),
binary.LittleEndian.Uint32(counter[8:12]),
binary.LittleEndian.Uint32(counter[12:16]),
},
counter: binary.LittleEndian.Uint32(counter[0:4]),
}
s.XORKeyStream(out, in)
}
// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a
// nonce. It should only be used as part of the XChaCha20 construction.
func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 {
x0, x1, x2, x3 := j0, j1, j2, j3
x4, x5, x6, x7 := key[0], key[1], key[2], key[3]
x8, x9, x10, x11 := key[4], key[5], key[6], key[7]
x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3]
for i := 0; i < 10; i++ {
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
}
var out [8]uint32
out[0], out[1], out[2], out[3] = x0, x1, x2, x3
out[4], out[5], out[6], out[7] = x12, x13, x14, x15
return out
}

View File

@ -0,0 +1,16 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !s390x gccgo appengine
package chacha20
const (
bufSize = 64
haveAsm = false
)
func (*Cipher) xorKeyStreamAsm(dst, src []byte) {
panic("not implemented")
}

View File

@ -0,0 +1,30 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!gccgo,!appengine
package chacha20
var haveAsm = hasVectorFacility()
const bufSize = 256
// hasVectorFacility reports whether the machine supports the vector
// facility (vx).
// Implementation in asm_s390x.s.
func hasVectorFacility() bool
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
// be called when the vector facility is available.
// Implementation in asm_s390x.s.
//go:noescape
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len)
}
// EXRL targets, DO NOT CALL!
func mvcSrcToBuf()
func mvcBufToDst()

View File

@ -0,0 +1,283 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build s390x,!gccgo,!appengine
#include "go_asm.h"
#include "textflag.h"
// This is an implementation of the ChaCha20 encryption algorithm as
// specified in RFC 7539. It uses vector instructions to compute
// 4 keystream blocks in parallel (256 bytes) which are then XORed
// with the bytes in the input slice.
GLOBL ·constants<>(SB), RODATA|NOPTR, $32
// BSWAP: swap bytes in each 4-byte element
DATA ·constants<>+0x00(SB)/4, $0x03020100
DATA ·constants<>+0x04(SB)/4, $0x07060504
DATA ·constants<>+0x08(SB)/4, $0x0b0a0908
DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c
// J0: [j0, j1, j2, j3]
DATA ·constants<>+0x10(SB)/4, $0x61707865
DATA ·constants<>+0x14(SB)/4, $0x3320646e
DATA ·constants<>+0x18(SB)/4, $0x79622d32
DATA ·constants<>+0x1c(SB)/4, $0x6b206574
// EXRL targets:
TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0
MVC $1, (R1), (R8)
RET
TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0
MVC $1, (R8), (R9)
RET
#define BSWAP V5
#define J0 V6
#define KEY0 V7
#define KEY1 V8
#define NONCE V9
#define CTR V10
#define M0 V11
#define M1 V12
#define M2 V13
#define M3 V14
#define INC V15
#define X0 V16
#define X1 V17
#define X2 V18
#define X3 V19
#define X4 V20
#define X5 V21
#define X6 V22
#define X7 V23
#define X8 V24
#define X9 V25
#define X10 V26
#define X11 V27
#define X12 V28
#define X13 V29
#define X14 V30
#define X15 V31
#define NUM_ROUNDS 20
#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \
VAF a1, a0, a0 \
VAF b1, b0, b0 \
VAF c1, c0, c0 \
VAF d1, d0, d0 \
VX a0, a2, a2 \
VX b0, b2, b2 \
VX c0, c2, c2 \
VX d0, d2, d2 \
VERLLF $16, a2, a2 \
VERLLF $16, b2, b2 \
VERLLF $16, c2, c2 \
VERLLF $16, d2, d2 \
VAF a2, a3, a3 \
VAF b2, b3, b3 \
VAF c2, c3, c3 \
VAF d2, d3, d3 \
VX a3, a1, a1 \
VX b3, b1, b1 \
VX c3, c1, c1 \
VX d3, d1, d1 \
VERLLF $12, a1, a1 \
VERLLF $12, b1, b1 \
VERLLF $12, c1, c1 \
VERLLF $12, d1, d1 \
VAF a1, a0, a0 \
VAF b1, b0, b0 \
VAF c1, c0, c0 \
VAF d1, d0, d0 \
VX a0, a2, a2 \
VX b0, b2, b2 \
VX c0, c2, c2 \
VX d0, d2, d2 \
VERLLF $8, a2, a2 \
VERLLF $8, b2, b2 \
VERLLF $8, c2, c2 \
VERLLF $8, d2, d2 \
VAF a2, a3, a3 \
VAF b2, b3, b3 \
VAF c2, c3, c3 \
VAF d2, d3, d3 \
VX a3, a1, a1 \
VX b3, b1, b1 \
VX c3, c1, c1 \
VX d3, d1, d1 \
VERLLF $7, a1, a1 \
VERLLF $7, b1, b1 \
VERLLF $7, c1, c1 \
VERLLF $7, d1, d1
#define PERMUTE(mask, v0, v1, v2, v3) \
VPERM v0, v0, mask, v0 \
VPERM v1, v1, mask, v1 \
VPERM v2, v2, mask, v2 \
VPERM v3, v3, mask, v3
#define ADDV(x, v0, v1, v2, v3) \
VAF x, v0, v0 \
VAF x, v1, v1 \
VAF x, v2, v2 \
VAF x, v3, v3
#define XORV(off, dst, src, v0, v1, v2, v3) \
VLM off(src), M0, M3 \
PERMUTE(BSWAP, v0, v1, v2, v3) \
VX v0, M0, M0 \
VX v1, M1, M1 \
VX v2, M2, M2 \
VX v3, M3, M3 \
VSTM M0, M3, off(dst)
#define SHUFFLE(a, b, c, d, t, u, v, w) \
VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]}
VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]}
VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]}
VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]}
VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]}
VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]}
VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]}
VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]}
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
MOVD $·constants<>(SB), R1
MOVD dst+0(FP), R2 // R2=&dst[0]
LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src)
MOVD key+48(FP), R5 // R5=key
MOVD nonce+56(FP), R6 // R6=nonce
MOVD counter+64(FP), R7 // R7=counter
MOVD buf+72(FP), R8 // R8=buf
MOVD len+80(FP), R9 // R9=len
// load BSWAP and J0
VLM (R1), BSWAP, J0
// set up tail buffer
ADD $-1, R4, R12
MOVBZ R12, R12
CMPUBEQ R12, $255, aligned
MOVD R4, R1
AND $~255, R1
MOVD $(R3)(R1*1), R1
EXRL $·mvcSrcToBuf(SB), R12
MOVD $255, R0
SUB R12, R0
MOVD R0, (R9) // update len
aligned:
// setup
MOVD $95, R0
VLM (R5), KEY0, KEY1
VLL R0, (R6), NONCE
VZERO M0
VLEIB $7, $32, M0
VSRLB M0, NONCE, NONCE
// initialize counter values
VLREPF (R7), CTR
VZERO INC
VLEIF $1, $1, INC
VLEIF $2, $2, INC
VLEIF $3, $3, INC
VAF INC, CTR, CTR
VREPIF $4, INC
chacha:
VREPF $0, J0, X0
VREPF $1, J0, X1
VREPF $2, J0, X2
VREPF $3, J0, X3
VREPF $0, KEY0, X4
VREPF $1, KEY0, X5
VREPF $2, KEY0, X6
VREPF $3, KEY0, X7
VREPF $0, KEY1, X8
VREPF $1, KEY1, X9
VREPF $2, KEY1, X10
VREPF $3, KEY1, X11
VLR CTR, X12
VREPF $1, NONCE, X13
VREPF $2, NONCE, X14
VREPF $3, NONCE, X15
MOVD $(NUM_ROUNDS/2), R1
loop:
ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11)
ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9)
ADD $-1, R1
BNE loop
// decrement length
ADD $-256, R4
BLT tail
continue:
// rearrange vectors
SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3)
ADDV(J0, X0, X1, X2, X3)
SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3)
ADDV(KEY0, X4, X5, X6, X7)
SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3)
ADDV(KEY1, X8, X9, X10, X11)
VAF CTR, X12, X12
SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3)
ADDV(NONCE, X12, X13, X14, X15)
// increment counters
VAF INC, CTR, CTR
// xor keystream with plaintext
XORV(0*64, R2, R3, X0, X4, X8, X12)
XORV(1*64, R2, R3, X1, X5, X9, X13)
XORV(2*64, R2, R3, X2, X6, X10, X14)
XORV(3*64, R2, R3, X3, X7, X11, X15)
// increment pointers
MOVD $256(R2), R2
MOVD $256(R3), R3
CMPBNE R4, $0, chacha
CMPUBEQ R12, $255, return
EXRL $·mvcBufToDst(SB), R12 // len was updated during setup
return:
VSTEF $0, CTR, (R7)
RET
tail:
MOVD R2, R9
MOVD R8, R2
MOVD R8, R3
MOVD $0, R4
JMP continue
// func hasVectorFacility() bool
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
MOVD $x-24(SP), R1
XC $24, 0(R1), 0(R1) // clear the storage
MOVD $2, R0 // R0 is the number of double words stored -1
WORD $0xB2B01000 // STFLE 0(R1)
XOR R0, R0 // reset the value of R0
MOVBZ z-8(SP), R1
AND $0x40, R1
BEQ novector
vectorinstalled:
// check if the vector instruction has been enabled
VLEIB $0, $0xF, V16
VLGVB $0, V16, R1
CMPBNE R1, $0xF, novector
MOVB $1, ret+0(FP) // have vx
RET
novector:
MOVB $0, ret+0(FP) // no vx
RET

43
vendor/golang.org/x/crypto/internal/chacha20/xor.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found src the LICENSE file.
package chacha20
import (
"runtime"
)
// Platforms that have fast unaligned 32-bit little endian accesses.
const unaligned = runtime.GOARCH == "386" ||
runtime.GOARCH == "amd64" ||
runtime.GOARCH == "arm64" ||
runtime.GOARCH == "ppc64le" ||
runtime.GOARCH == "s390x"
// xor reads a little endian uint32 from src, XORs it with u and
// places the result in little endian byte order in dst.
func xor(dst, src []byte, u uint32) {
_, _ = src[3], dst[3] // eliminate bounds checks
if unaligned {
// The compiler should optimize this code into
// 32-bit unaligned little endian loads and stores.
// TODO: delete once the compiler does a reliably
// good job with the generic code below.
// See issue #25111 for more details.
v := uint32(src[0])
v |= uint32(src[1]) << 8
v |= uint32(src[2]) << 16
v |= uint32(src[3]) << 24
v ^= u
dst[0] = byte(v)
dst[1] = byte(v >> 8)
dst[2] = byte(v >> 16)
dst[3] = byte(v >> 24)
} else {
dst[0] = src[0] ^ byte(u)
dst[1] = src[1] ^ byte(u>>8)
dst[2] = src[2] ^ byte(u>>16)
dst[3] = src[3] ^ byte(u>>24)
}
}

32
vendor/golang.org/x/crypto/internal/subtle/aliasing.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle // import "golang.org/x/crypto/internal/subtle"
import "unsafe"
// AnyOverlap reports whether x and y share memory at any (not necessarily
// corresponding) index. The memory beyond the slice length is ignored.
func AnyOverlap(x, y []byte) bool {
return len(x) > 0 && len(y) > 0 &&
uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
}
// InexactOverlap reports whether x and y share memory at any non-corresponding
// index. The memory beyond the slice length is ignored. Note that x and y can
// have different lengths and still not have any inexact overlap.
//
// InexactOverlap can be used to implement the requirements of the crypto/cipher
// AEAD, Block, BlockMode and Stream interfaces.
func InexactOverlap(x, y []byte) bool {
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
return false
}
return AnyOverlap(x, y)
}

View File

@ -0,0 +1,35 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine
// Package subtle implements functions that are often useful in cryptographic
// code but require careful thought to use correctly.
package subtle // import "golang.org/x/crypto/internal/subtle"
// This is the Google App Engine standard variant based on reflect
// because the unsafe package and cgo are disallowed.
import "reflect"
// AnyOverlap reports whether x and y share memory at any (not necessarily
// corresponding) index. The memory beyond the slice length is ignored.
func AnyOverlap(x, y []byte) bool {
return len(x) > 0 && len(y) > 0 &&
reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
}
// InexactOverlap reports whether x and y share memory at any non-corresponding
// index. The memory beyond the slice length is ignored. Note that x and y can
// have different lengths and still not have any inexact overlap.
//
// InexactOverlap can be used to implement the requirements of the crypto/cipher
// AEAD, Block, BlockMode and Stream interfaces.
func InexactOverlap(x, y []byte) bool {
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
return false
}
return AnyOverlap(x, y)
}

219
vendor/golang.org/x/crypto/openpgp/armor/armor.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
// very similar to PEM except that it has an additional CRC checksum.
package armor // import "golang.org/x/crypto/openpgp/armor"
import (
"bufio"
"bytes"
"encoding/base64"
"golang.org/x/crypto/openpgp/errors"
"io"
)
// A Block represents an OpenPGP armored structure.
//
// The encoded form is:
// -----BEGIN Type-----
// Headers
//
// base64-encoded Bytes
// '=' base64 encoded checksum
// -----END Type-----
// where Headers is a possibly empty sequence of Key: Value lines.
//
// Since the armored data can be very large, this package presents a streaming
// interface.
type Block struct {
Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
Header map[string]string // Optional headers.
Body io.Reader // A Reader from which the contents can be read
lReader lineReader
oReader openpgpReader
}
var ArmorCorrupt error = errors.StructuralError("armor invalid")
const crc24Init = 0xb704ce
const crc24Poly = 0x1864cfb
const crc24Mask = 0xffffff
// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
func crc24(crc uint32, d []byte) uint32 {
for _, b := range d {
crc ^= uint32(b) << 16
for i := 0; i < 8; i++ {
crc <<= 1
if crc&0x1000000 != 0 {
crc ^= crc24Poly
}
}
}
return crc
}
var armorStart = []byte("-----BEGIN ")
var armorEnd = []byte("-----END ")
var armorEndOfLine = []byte("-----")
// lineReader wraps a line based reader. It watches for the end of an armor
// block and records the expected CRC value.
type lineReader struct {
in *bufio.Reader
buf []byte
eof bool
crc uint32
}
func (l *lineReader) Read(p []byte) (n int, err error) {
if l.eof {
return 0, io.EOF
}
if len(l.buf) > 0 {
n = copy(p, l.buf)
l.buf = l.buf[n:]
return
}
line, isPrefix, err := l.in.ReadLine()
if err != nil {
return
}
if isPrefix {
return 0, ArmorCorrupt
}
if len(line) == 5 && line[0] == '=' {
// This is the checksum line
var expectedBytes [3]byte
var m int
m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
if m != 3 || err != nil {
return
}
l.crc = uint32(expectedBytes[0])<<16 |
uint32(expectedBytes[1])<<8 |
uint32(expectedBytes[2])
line, _, err = l.in.ReadLine()
if err != nil && err != io.EOF {
return
}
if !bytes.HasPrefix(line, armorEnd) {
return 0, ArmorCorrupt
}
l.eof = true
return 0, io.EOF
}
if len(line) > 96 {
return 0, ArmorCorrupt
}
n = copy(p, line)
bytesToSave := len(line) - n
if bytesToSave > 0 {
if cap(l.buf) < bytesToSave {
l.buf = make([]byte, 0, bytesToSave)
}
l.buf = l.buf[0:bytesToSave]
copy(l.buf, line[n:])
}
return
}
// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
// a running CRC of the resulting data and checks the CRC against the value
// found by the lineReader at EOF.
type openpgpReader struct {
lReader *lineReader
b64Reader io.Reader
currentCRC uint32
}
func (r *openpgpReader) Read(p []byte) (n int, err error) {
n, err = r.b64Reader.Read(p)
r.currentCRC = crc24(r.currentCRC, p[:n])
if err == io.EOF {
if r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
return 0, ArmorCorrupt
}
}
return
}
// Decode reads a PGP armored block from the given Reader. It will ignore
// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
// given Reader is not usable after calling this function: an arbitrary amount
// of data may have been read past the end of the block.
func Decode(in io.Reader) (p *Block, err error) {
r := bufio.NewReaderSize(in, 100)
var line []byte
ignoreNext := false
TryNextBlock:
p = nil
// Skip leading garbage
for {
ignoreThis := ignoreNext
line, ignoreNext, err = r.ReadLine()
if err != nil {
return
}
if ignoreNext || ignoreThis {
continue
}
line = bytes.TrimSpace(line)
if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
break
}
}
p = new(Block)
p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
p.Header = make(map[string]string)
nextIsContinuation := false
var lastKey string
// Read headers
for {
isContinuation := nextIsContinuation
line, nextIsContinuation, err = r.ReadLine()
if err != nil {
p = nil
return
}
if isContinuation {
p.Header[lastKey] += string(line)
continue
}
line = bytes.TrimSpace(line)
if len(line) == 0 {
break
}
i := bytes.Index(line, []byte(": "))
if i == -1 {
goto TryNextBlock
}
lastKey = string(line[:i])
p.Header[lastKey] = string(line[i+2:])
}
p.lReader.in = r
p.oReader.currentCRC = crc24Init
p.oReader.lReader = &p.lReader
p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
p.Body = &p.oReader
return
}

160
vendor/golang.org/x/crypto/openpgp/armor/encode.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package armor
import (
"encoding/base64"
"io"
)
var armorHeaderSep = []byte(": ")
var blockEnd = []byte("\n=")
var newline = []byte("\n")
var armorEndOfLineOut = []byte("-----\n")
// writeSlices writes its arguments to the given Writer.
func writeSlices(out io.Writer, slices ...[]byte) (err error) {
for _, s := range slices {
_, err = out.Write(s)
if err != nil {
return err
}
}
return
}
// lineBreaker breaks data across several lines, all of the same byte length
// (except possibly the last). Lines are broken with a single '\n'.
type lineBreaker struct {
lineLength int
line []byte
used int
out io.Writer
haveWritten bool
}
func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
return &lineBreaker{
lineLength: lineLength,
line: make([]byte, lineLength),
used: 0,
out: out,
}
}
func (l *lineBreaker) Write(b []byte) (n int, err error) {
n = len(b)
if n == 0 {
return
}
if l.used == 0 && l.haveWritten {
_, err = l.out.Write([]byte{'\n'})
if err != nil {
return
}
}
if l.used+len(b) < l.lineLength {
l.used += copy(l.line[l.used:], b)
return
}
l.haveWritten = true
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
excess := l.lineLength - l.used
l.used = 0
_, err = l.out.Write(b[0:excess])
if err != nil {
return
}
_, err = l.Write(b[excess:])
return
}
func (l *lineBreaker) Close() (err error) {
if l.used > 0 {
_, err = l.out.Write(l.line[0:l.used])
if err != nil {
return
}
}
return
}
// encoding keeps track of a running CRC24 over the data which has been written
// to it and outputs a OpenPGP checksum when closed, followed by an armor
// trailer.
//
// It's built into a stack of io.Writers:
// encoding -> base64 encoder -> lineBreaker -> out
type encoding struct {
out io.Writer
breaker *lineBreaker
b64 io.WriteCloser
crc uint32
blockType []byte
}
func (e *encoding) Write(data []byte) (n int, err error) {
e.crc = crc24(e.crc, data)
return e.b64.Write(data)
}
func (e *encoding) Close() (err error) {
err = e.b64.Close()
if err != nil {
return
}
e.breaker.Close()
var checksumBytes [3]byte
checksumBytes[0] = byte(e.crc >> 16)
checksumBytes[1] = byte(e.crc >> 8)
checksumBytes[2] = byte(e.crc)
var b64ChecksumBytes [4]byte
base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
}
// Encode returns a WriteCloser which will encode the data written to it in
// OpenPGP armor.
func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
bType := []byte(blockType)
err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
if err != nil {
return
}
for k, v := range headers {
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
if err != nil {
return
}
}
_, err = out.Write(newline)
if err != nil {
return
}
e := &encoding{
out: out,
breaker: newLineBreaker(out, 64),
crc: crc24Init,
blockType: bType,
}
e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
return e, nil
}

59
vendor/golang.org/x/crypto/openpgp/canonical_text.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import "hash"
// NewCanonicalTextHash reformats text written to it into the canonical
// form and then applies the hash h. See RFC 4880, section 5.2.1.
func NewCanonicalTextHash(h hash.Hash) hash.Hash {
return &canonicalTextHash{h, 0}
}
type canonicalTextHash struct {
h hash.Hash
s int
}
var newline = []byte{'\r', '\n'}
func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
start := 0
for i, c := range buf {
switch cth.s {
case 0:
if c == '\r' {
cth.s = 1
} else if c == '\n' {
cth.h.Write(buf[start:i])
cth.h.Write(newline)
start = i + 1
}
case 1:
cth.s = 0
}
}
cth.h.Write(buf[start:])
return len(buf), nil
}
func (cth *canonicalTextHash) Sum(in []byte) []byte {
return cth.h.Sum(in)
}
func (cth *canonicalTextHash) Reset() {
cth.h.Reset()
cth.s = 0
}
func (cth *canonicalTextHash) Size() int {
return cth.h.Size()
}
func (cth *canonicalTextHash) BlockSize() int {
return cth.h.BlockSize()
}

122
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
// n. 4, 1985, pp. 469-472.
//
// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
// unsuitable for other protocols. RSA should be used in preference in any
// case.
package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
import (
"crypto/rand"
"crypto/subtle"
"errors"
"io"
"math/big"
)
// PublicKey represents an ElGamal public key.
type PublicKey struct {
G, P, Y *big.Int
}
// PrivateKey represents an ElGamal private key.
type PrivateKey struct {
PublicKey
X *big.Int
}
// Encrypt encrypts the given message to the given public key. The result is a
// pair of integers. Errors can result from reading random, or because msg is
// too large to be encrypted to the public key.
func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
pLen := (pub.P.BitLen() + 7) / 8
if len(msg) > pLen-11 {
err = errors.New("elgamal: message too long")
return
}
// EM = 0x02 || PS || 0x00 || M
em := make([]byte, pLen-1)
em[0] = 2
ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
err = nonZeroRandomBytes(ps, random)
if err != nil {
return
}
em[len(em)-len(msg)-1] = 0
copy(mm, msg)
m := new(big.Int).SetBytes(em)
k, err := rand.Int(random, pub.P)
if err != nil {
return
}
c1 = new(big.Int).Exp(pub.G, k, pub.P)
s := new(big.Int).Exp(pub.Y, k, pub.P)
c2 = s.Mul(s, m)
c2.Mod(c2, pub.P)
return
}
// Decrypt takes two integers, resulting from an ElGamal encryption, and
// returns the plaintext of the message. An error can result only if the
// ciphertext is invalid. Users should keep in mind that this is a padding
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
// Bleichenbacher, Advances in Cryptology (Crypto '98),
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
s := new(big.Int).Exp(c1, priv.X, priv.P)
s.ModInverse(s, priv.P)
s.Mul(s, c2)
s.Mod(s, priv.P)
em := s.Bytes()
firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
// The remainder of the plaintext must be a string of non-zero random
// octets, followed by a 0, followed by the message.
// lookingForIndex: 1 iff we are still looking for the zero.
// index: the offset of the first zero byte.
var lookingForIndex, index int
lookingForIndex = 1
for i := 1; i < len(em); i++ {
equals0 := subtle.ConstantTimeByteEq(em[i], 0)
index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
}
if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
return nil, errors.New("elgamal: decryption error")
}
return em[index+1:], nil
}
// nonZeroRandomBytes fills the given slice with non-zero random octets.
func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
_, err = io.ReadFull(rand, s)
if err != nil {
return
}
for i := 0; i < len(s); i++ {
for s[i] == 0 {
_, err = io.ReadFull(rand, s[i:i+1])
if err != nil {
return
}
}
}
return
}

72
vendor/golang.org/x/crypto/openpgp/errors/errors.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errors contains common error types for the OpenPGP packages.
package errors // import "golang.org/x/crypto/openpgp/errors"
import (
"strconv"
)
// A StructuralError is returned when OpenPGP data is found to be syntactically
// invalid.
type StructuralError string
func (s StructuralError) Error() string {
return "openpgp: invalid data: " + string(s)
}
// UnsupportedError indicates that, although the OpenPGP data is valid, it
// makes use of currently unimplemented features.
type UnsupportedError string
func (s UnsupportedError) Error() string {
return "openpgp: unsupported feature: " + string(s)
}
// InvalidArgumentError indicates that the caller is in error and passed an
// incorrect value.
type InvalidArgumentError string
func (i InvalidArgumentError) Error() string {
return "openpgp: invalid argument: " + string(i)
}
// SignatureError indicates that a syntactically valid signature failed to
// validate.
type SignatureError string
func (b SignatureError) Error() string {
return "openpgp: invalid signature: " + string(b)
}
type keyIncorrectError int
func (ki keyIncorrectError) Error() string {
return "openpgp: incorrect key"
}
var ErrKeyIncorrect error = keyIncorrectError(0)
type unknownIssuerError int
func (unknownIssuerError) Error() string {
return "openpgp: signature made by unknown entity"
}
var ErrUnknownIssuer error = unknownIssuerError(0)
type keyRevokedError int
func (keyRevokedError) Error() string {
return "openpgp: signature made by revoked key"
}
var ErrKeyRevoked error = keyRevokedError(0)
type UnknownPacketTypeError uint8
func (upte UnknownPacketTypeError) Error() string {
return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
}

652
vendor/golang.org/x/crypto/openpgp/keys.go generated vendored Normal file
View File

@ -0,0 +1,652 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import (
"crypto/rsa"
"io"
"time"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
)
// PublicKeyType is the armor type for a PGP public key.
var PublicKeyType = "PGP PUBLIC KEY BLOCK"
// PrivateKeyType is the armor type for a PGP private key.
var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
// An Entity represents the components of an OpenPGP key: a primary public key
// (which must be a signing key), one or more identities claimed by that key,
// and zero or more subkeys, which may be encryption keys.
type Entity struct {
PrimaryKey *packet.PublicKey
PrivateKey *packet.PrivateKey
Identities map[string]*Identity // indexed by Identity.Name
Revocations []*packet.Signature
Subkeys []Subkey
}
// An Identity represents an identity claimed by an Entity and zero or more
// assertions by other entities about that claim.
type Identity struct {
Name string // by convention, has the form "Full Name (comment) <email@example.com>"
UserId *packet.UserId
SelfSignature *packet.Signature
Signatures []*packet.Signature
}
// A Subkey is an additional public key in an Entity. Subkeys can be used for
// encryption.
type Subkey struct {
PublicKey *packet.PublicKey
PrivateKey *packet.PrivateKey
Sig *packet.Signature
}
// A Key identifies a specific public key in an Entity. This is either the
// Entity's primary key or a subkey.
type Key struct {
Entity *Entity
PublicKey *packet.PublicKey
PrivateKey *packet.PrivateKey
SelfSignature *packet.Signature
}
// A KeyRing provides access to public and private keys.
type KeyRing interface {
// KeysById returns the set of keys that have the given key id.
KeysById(id uint64) []Key
// KeysByIdAndUsage returns the set of keys with the given id
// that also meet the key usage given by requiredUsage.
// The requiredUsage is expressed as the bitwise-OR of
// packet.KeyFlag* values.
KeysByIdUsage(id uint64, requiredUsage byte) []Key
// DecryptionKeys returns all private keys that are valid for
// decryption.
DecryptionKeys() []Key
}
// primaryIdentity returns the Identity marked as primary or the first identity
// if none are so marked.
func (e *Entity) primaryIdentity() *Identity {
var firstIdentity *Identity
for _, ident := range e.Identities {
if firstIdentity == nil {
firstIdentity = ident
}
if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
return ident
}
}
return firstIdentity
}
// encryptionKey returns the best candidate Key for encrypting a message to the
// given Entity.
func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
candidateSubkey := -1
// Iterate the keys to find the newest key
var maxTime time.Time
for i, subkey := range e.Subkeys {
if subkey.Sig.FlagsValid &&
subkey.Sig.FlagEncryptCommunications &&
subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
!subkey.Sig.KeyExpired(now) &&
(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
candidateSubkey = i
maxTime = subkey.Sig.CreationTime
}
}
if candidateSubkey != -1 {
subkey := e.Subkeys[candidateSubkey]
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
}
// If we don't have any candidate subkeys for encryption and
// the primary key doesn't have any usage metadata then we
// assume that the primary key is ok. Or, if the primary key is
// marked as ok to encrypt to, then we can obviously use it.
i := e.primaryIdentity()
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
!i.SelfSignature.KeyExpired(now) {
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
}
// This Entity appears to be signing only.
return Key{}, false
}
// signingKey return the best candidate Key for signing a message with this
// Entity.
func (e *Entity) signingKey(now time.Time) (Key, bool) {
candidateSubkey := -1
for i, subkey := range e.Subkeys {
if subkey.Sig.FlagsValid &&
subkey.Sig.FlagSign &&
subkey.PublicKey.PubKeyAlgo.CanSign() &&
!subkey.Sig.KeyExpired(now) {
candidateSubkey = i
break
}
}
if candidateSubkey != -1 {
subkey := e.Subkeys[candidateSubkey]
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
}
// If we have no candidate subkey then we assume that it's ok to sign
// with the primary key.
i := e.primaryIdentity()
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign &&
!i.SelfSignature.KeyExpired(now) {
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
}
return Key{}, false
}
// An EntityList contains one or more Entities.
type EntityList []*Entity
// KeysById returns the set of keys that have the given key id.
func (el EntityList) KeysById(id uint64) (keys []Key) {
for _, e := range el {
if e.PrimaryKey.KeyId == id {
var selfSig *packet.Signature
for _, ident := range e.Identities {
if selfSig == nil {
selfSig = ident.SelfSignature
} else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
selfSig = ident.SelfSignature
break
}
}
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
}
for _, subKey := range e.Subkeys {
if subKey.PublicKey.KeyId == id {
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
}
}
}
return
}
// KeysByIdAndUsage returns the set of keys with the given id that also meet
// the key usage given by requiredUsage. The requiredUsage is expressed as
// the bitwise-OR of packet.KeyFlag* values.
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
for _, key := range el.KeysById(id) {
if len(key.Entity.Revocations) > 0 {
continue
}
if key.SelfSignature.RevocationReason != nil {
continue
}
if key.SelfSignature.FlagsValid && requiredUsage != 0 {
var usage byte
if key.SelfSignature.FlagCertify {
usage |= packet.KeyFlagCertify
}
if key.SelfSignature.FlagSign {
usage |= packet.KeyFlagSign
}
if key.SelfSignature.FlagEncryptCommunications {
usage |= packet.KeyFlagEncryptCommunications
}
if key.SelfSignature.FlagEncryptStorage {
usage |= packet.KeyFlagEncryptStorage
}
if usage&requiredUsage != requiredUsage {
continue
}
}
keys = append(keys, key)
}
return
}
// DecryptionKeys returns all private keys that are valid for decryption.
func (el EntityList) DecryptionKeys() (keys []Key) {
for _, e := range el {
for _, subKey := range e.Subkeys {
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
}
}
}
return
}
// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
block, err := armor.Decode(r)
if err == io.EOF {
return nil, errors.InvalidArgumentError("no armored data found")
}
if err != nil {
return nil, err
}
if block.Type != PublicKeyType && block.Type != PrivateKeyType {
return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
}
return ReadKeyRing(block.Body)
}
// ReadKeyRing reads one or more public/private keys. Unsupported keys are
// ignored as long as at least a single valid key is found.
func ReadKeyRing(r io.Reader) (el EntityList, err error) {
packets := packet.NewReader(r)
var lastUnsupportedError error
for {
var e *Entity
e, err = ReadEntity(packets)
if err != nil {
// TODO: warn about skipped unsupported/unreadable keys
if _, ok := err.(errors.UnsupportedError); ok {
lastUnsupportedError = err
err = readToNextPublicKey(packets)
} else if _, ok := err.(errors.StructuralError); ok {
// Skip unreadable, badly-formatted keys
lastUnsupportedError = err
err = readToNextPublicKey(packets)
}
if err == io.EOF {
err = nil
break
}
if err != nil {
el = nil
break
}
} else {
el = append(el, e)
}
}
if len(el) == 0 && err == nil {
err = lastUnsupportedError
}
return
}
// readToNextPublicKey reads packets until the start of the entity and leaves
// the first packet of the new entity in the Reader.
func readToNextPublicKey(packets *packet.Reader) (err error) {
var p packet.Packet
for {
p, err = packets.Next()
if err == io.EOF {
return
} else if err != nil {
if _, ok := err.(errors.UnsupportedError); ok {
err = nil
continue
}
return
}
if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
packets.Unread(p)
return
}
}
}
// ReadEntity reads an entity (public key, identities, subkeys etc) from the
// given Reader.
func ReadEntity(packets *packet.Reader) (*Entity, error) {
e := new(Entity)
e.Identities = make(map[string]*Identity)
p, err := packets.Next()
if err != nil {
return nil, err
}
var ok bool
if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
packets.Unread(p)
return nil, errors.StructuralError("first packet was not a public/private key")
}
e.PrimaryKey = &e.PrivateKey.PublicKey
}
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
return nil, errors.StructuralError("primary key cannot be used for signatures")
}
var current *Identity
var revocations []*packet.Signature
EachPacket:
for {
p, err := packets.Next()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
switch pkt := p.(type) {
case *packet.UserId:
// Make a new Identity object, that we might wind up throwing away.
// We'll only add it if we get a valid self-signature over this
// userID.
current = new(Identity)
current.Name = pkt.Id
current.UserId = pkt
for {
p, err = packets.Next()
if err == io.EOF {
break EachPacket
} else if err != nil {
return nil, err
}
sig, ok := p.(*packet.Signature)
if !ok {
packets.Unread(p)
continue EachPacket
}
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
}
current.SelfSignature = sig
e.Identities[pkt.Id] = current
} else {
current.Signatures = append(current.Signatures, sig)
}
}
case *packet.Signature:
if pkt.SigType == packet.SigTypeKeyRevocation {
revocations = append(revocations, pkt)
} else if pkt.SigType == packet.SigTypeDirectSignature {
// TODO: RFC4880 5.2.1 permits signatures
// directly on keys (eg. to bind additional
// revocation keys).
} else if current == nil {
return nil, errors.StructuralError("signature packet found before user id packet")
} else {
current.Signatures = append(current.Signatures, pkt)
}
case *packet.PrivateKey:
if pkt.IsSubkey == false {
packets.Unread(p)
break EachPacket
}
err = addSubkey(e, packets, &pkt.PublicKey, pkt)
if err != nil {
return nil, err
}
case *packet.PublicKey:
if pkt.IsSubkey == false {
packets.Unread(p)
break EachPacket
}
err = addSubkey(e, packets, pkt, nil)
if err != nil {
return nil, err
}
default:
// we ignore unknown packets
}
}
if len(e.Identities) == 0 {
return nil, errors.StructuralError("entity without any identities")
}
for _, revocation := range revocations {
err = e.PrimaryKey.VerifyRevocationSignature(revocation)
if err == nil {
e.Revocations = append(e.Revocations, revocation)
} else {
// TODO: RFC 4880 5.2.3.15 defines revocation keys.
return nil, errors.StructuralError("revocation signature signed by alternate key")
}
}
return e, nil
}
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
var subKey Subkey
subKey.PublicKey = pub
subKey.PrivateKey = priv
p, err := packets.Next()
if err == io.EOF {
return io.ErrUnexpectedEOF
}
if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
var ok bool
subKey.Sig, ok = p.(*packet.Signature)
if !ok {
return errors.StructuralError("subkey packet not followed by signature")
}
if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
return errors.StructuralError("subkey signature with wrong type")
}
err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
if err != nil {
return errors.StructuralError("subkey signature invalid: " + err.Error())
}
e.Subkeys = append(e.Subkeys, subKey)
return nil
}
const defaultRSAKeyBits = 2048
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
// single identity composed of the given full name, comment and email, any of
// which may be empty but must not contain any of "()<>\x00".
// If config is nil, sensible defaults will be used.
func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
currentTime := config.Now()
bits := defaultRSAKeyBits
if config != nil && config.RSABits != 0 {
bits = config.RSABits
}
uid := packet.NewUserId(name, comment, email)
if uid == nil {
return nil, errors.InvalidArgumentError("user id field contained invalid characters")
}
signingPriv, err := rsa.GenerateKey(config.Random(), bits)
if err != nil {
return nil, err
}
encryptingPriv, err := rsa.GenerateKey(config.Random(), bits)
if err != nil {
return nil, err
}
e := &Entity{
PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
Identities: make(map[string]*Identity),
}
isPrimaryId := true
e.Identities[uid.Id] = &Identity{
Name: uid.Id,
UserId: uid,
SelfSignature: &packet.Signature{
CreationTime: currentTime,
SigType: packet.SigTypePositiveCert,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: config.Hash(),
IsPrimaryId: &isPrimaryId,
FlagsValid: true,
FlagSign: true,
FlagCertify: true,
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config)
if err != nil {
return nil, err
}
// If the user passes in a DefaultHash via packet.Config,
// set the PreferredHash for the SelfSignature.
if config != nil && config.DefaultHash != 0 {
e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
}
// Likewise for DefaultCipher.
if config != nil && config.DefaultCipher != 0 {
e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)}
}
e.Subkeys = make([]Subkey, 1)
e.Subkeys[0] = Subkey{
PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
Sig: &packet.Signature{
CreationTime: currentTime,
SigType: packet.SigTypeSubkeyBinding,
PubKeyAlgo: packet.PubKeyAlgoRSA,
Hash: config.Hash(),
FlagsValid: true,
FlagEncryptStorage: true,
FlagEncryptCommunications: true,
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
e.Subkeys[0].PublicKey.IsSubkey = true
e.Subkeys[0].PrivateKey.IsSubkey = true
err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config)
if err != nil {
return nil, err
}
return e, nil
}
// SerializePrivate serializes an Entity, including private key material, but
// excluding signatures from other entities, to the given Writer.
// Identities and subkeys are re-signed in case they changed since NewEntry.
// If config is nil, sensible defaults will be used.
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
err = e.PrivateKey.Serialize(w)
if err != nil {
return
}
for _, ident := range e.Identities {
err = ident.UserId.Serialize(w)
if err != nil {
return
}
err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
if err != nil {
return
}
err = ident.SelfSignature.Serialize(w)
if err != nil {
return
}
}
for _, subkey := range e.Subkeys {
err = subkey.PrivateKey.Serialize(w)
if err != nil {
return
}
err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
if err != nil {
return
}
err = subkey.Sig.Serialize(w)
if err != nil {
return
}
}
return nil
}
// Serialize writes the public part of the given Entity to w, including
// signatures from other entities. No private key material will be output.
func (e *Entity) Serialize(w io.Writer) error {
err := e.PrimaryKey.Serialize(w)
if err != nil {
return err
}
for _, ident := range e.Identities {
err = ident.UserId.Serialize(w)
if err != nil {
return err
}
err = ident.SelfSignature.Serialize(w)
if err != nil {
return err
}
for _, sig := range ident.Signatures {
err = sig.Serialize(w)
if err != nil {
return err
}
}
}
for _, subkey := range e.Subkeys {
err = subkey.PublicKey.Serialize(w)
if err != nil {
return err
}
err = subkey.Sig.Serialize(w)
if err != nil {
return err
}
}
return nil
}
// SignIdentity adds a signature to e, from signer, attesting that identity is
// associated with e. The provided identity must already be an element of
// e.Identities and the private key of signer must have been decrypted if
// necessary.
// If config is nil, sensible defaults will be used.
func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
if signer.PrivateKey == nil {
return errors.InvalidArgumentError("signing Entity must have a private key")
}
if signer.PrivateKey.Encrypted {
return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
}
ident, ok := e.Identities[identity]
if !ok {
return errors.InvalidArgumentError("given identity string not found in Entity")
}
sig := &packet.Signature{
SigType: packet.SigTypeGenericCert,
PubKeyAlgo: signer.PrivateKey.PubKeyAlgo,
Hash: config.Hash(),
CreationTime: config.Now(),
IssuerKeyId: &signer.PrivateKey.KeyId,
}
if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil {
return err
}
ident.Signatures = append(ident.Signatures, sig)
return nil
}

123
vendor/golang.org/x/crypto/openpgp/packet/compressed.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"compress/bzip2"
"compress/flate"
"compress/zlib"
"golang.org/x/crypto/openpgp/errors"
"io"
"strconv"
)
// Compressed represents a compressed OpenPGP packet. The decompressed contents
// will contain more OpenPGP packets. See RFC 4880, section 5.6.
type Compressed struct {
Body io.Reader
}
const (
NoCompression = flate.NoCompression
BestSpeed = flate.BestSpeed
BestCompression = flate.BestCompression
DefaultCompression = flate.DefaultCompression
)
// CompressionConfig contains compressor configuration settings.
type CompressionConfig struct {
// Level is the compression level to use. It must be set to
// between -1 and 9, with -1 causing the compressor to use the
// default compression level, 0 causing the compressor to use
// no compression and 1 to 9 representing increasing (better,
// slower) compression levels. If Level is less than -1 or
// more then 9, a non-nil error will be returned during
// encryption. See the constants above for convenient common
// settings for Level.
Level int
}
func (c *Compressed) parse(r io.Reader) error {
var buf [1]byte
_, err := readFull(r, buf[:])
if err != nil {
return err
}
switch buf[0] {
case 1:
c.Body = flate.NewReader(r)
case 2:
c.Body, err = zlib.NewReader(r)
case 3:
c.Body = bzip2.NewReader(r)
default:
err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
}
return err
}
// compressedWriterCloser represents the serialized compression stream
// header and the compressor. Its Close() method ensures that both the
// compressor and serialized stream header are closed. Its Write()
// method writes to the compressor.
type compressedWriteCloser struct {
sh io.Closer // Stream Header
c io.WriteCloser // Compressor
}
func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
return cwc.c.Write(p)
}
func (cwc compressedWriteCloser) Close() (err error) {
err = cwc.c.Close()
if err != nil {
return err
}
return cwc.sh.Close()
}
// SerializeCompressed serializes a compressed data packet to w and
// returns a WriteCloser to which the literal data packets themselves
// can be written and which MUST be closed on completion. If cc is
// nil, sensible defaults will be used to configure the compression
// algorithm.
func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
compressed, err := serializeStreamHeader(w, packetTypeCompressed)
if err != nil {
return
}
_, err = compressed.Write([]byte{uint8(algo)})
if err != nil {
return
}
level := DefaultCompression
if cc != nil {
level = cc.Level
}
var compressor io.WriteCloser
switch algo {
case CompressionZIP:
compressor, err = flate.NewWriter(compressed, level)
case CompressionZLIB:
compressor, err = zlib.NewWriterLevel(compressed, level)
default:
s := strconv.Itoa(int(algo))
err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
}
if err != nil {
return
}
literaldata = compressedWriteCloser{compressed, compressor}
return
}

91
vendor/golang.org/x/crypto/openpgp/packet/config.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"crypto/rand"
"io"
"time"
)
// Config collects a number of parameters along with sensible defaults.
// A nil *Config is valid and results in all default values.
type Config struct {
// Rand provides the source of entropy.
// If nil, the crypto/rand Reader is used.
Rand io.Reader
// DefaultHash is the default hash function to be used.
// If zero, SHA-256 is used.
DefaultHash crypto.Hash
// DefaultCipher is the cipher to be used.
// If zero, AES-128 is used.
DefaultCipher CipherFunction
// Time returns the current time as the number of seconds since the
// epoch. If Time is nil, time.Now is used.
Time func() time.Time
// DefaultCompressionAlgo is the compression algorithm to be
// applied to the plaintext before encryption. If zero, no
// compression is done.
DefaultCompressionAlgo CompressionAlgo
// CompressionConfig configures the compression settings.
CompressionConfig *CompressionConfig
// S2KCount is only used for symmetric encryption. It
// determines the strength of the passphrase stretching when
// the said passphrase is hashed to produce a key. S2KCount
// should be between 1024 and 65011712, inclusive. If Config
// is nil or S2KCount is 0, the value 65536 used. Not all
// values in the above range can be represented. S2KCount will
// be rounded up to the next representable value if it cannot
// be encoded exactly. When set, it is strongly encrouraged to
// use a value that is at least 65536. See RFC 4880 Section
// 3.7.1.3.
S2KCount int
// RSABits is the number of bits in new RSA keys made with NewEntity.
// If zero, then 2048 bit keys are created.
RSABits int
}
func (c *Config) Random() io.Reader {
if c == nil || c.Rand == nil {
return rand.Reader
}
return c.Rand
}
func (c *Config) Hash() crypto.Hash {
if c == nil || uint(c.DefaultHash) == 0 {
return crypto.SHA256
}
return c.DefaultHash
}
func (c *Config) Cipher() CipherFunction {
if c == nil || uint8(c.DefaultCipher) == 0 {
return CipherAES128
}
return c.DefaultCipher
}
func (c *Config) Now() time.Time {
if c == nil || c.Time == nil {
return time.Now()
}
return c.Time()
}
func (c *Config) Compression() CompressionAlgo {
if c == nil {
return CompressionNone
}
return c.DefaultCompressionAlgo
}
func (c *Config) PasswordHashIterations() int {
if c == nil || c.S2KCount == 0 {
return 0
}
return c.S2KCount
}

View File

@ -0,0 +1,206 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto/rsa"
"encoding/binary"
"io"
"math/big"
"strconv"
"golang.org/x/crypto/openpgp/elgamal"
"golang.org/x/crypto/openpgp/errors"
)
const encryptedKeyVersion = 3
// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
// section 5.1.
type EncryptedKey struct {
KeyId uint64
Algo PublicKeyAlgorithm
CipherFunc CipherFunction // only valid after a successful Decrypt
Key []byte // only valid after a successful Decrypt
encryptedMPI1, encryptedMPI2 parsedMPI
}
func (e *EncryptedKey) parse(r io.Reader) (err error) {
var buf [10]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != encryptedKeyVersion {
return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
}
e.KeyId = binary.BigEndian.Uint64(buf[1:9])
e.Algo = PublicKeyAlgorithm(buf[9])
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
case PubKeyAlgoElGamal:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
if err != nil {
return
}
}
_, err = consumeAll(r)
return
}
func checksumKeyMaterial(key []byte) uint16 {
var checksum uint16
for _, v := range key {
checksum += uint16(v)
}
return checksum
}
// Decrypt decrypts an encrypted session key with the given private key. The
// private key must have been decrypted first.
// If config is nil, sensible defaults will be used.
func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
var err error
var b []byte
// TODO(agl): use session key decryption routines here to avoid
// padding oracle attacks.
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
k := priv.PrivateKey.(*rsa.PrivateKey)
b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
case PubKeyAlgoElGamal:
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
default:
err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
}
if err != nil {
return err
}
e.CipherFunc = CipherFunction(b[0])
e.Key = b[1 : len(b)-2]
expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
checksum := checksumKeyMaterial(e.Key)
if checksum != expectedChecksum {
return errors.StructuralError("EncryptedKey checksum incorrect")
}
return nil
}
// Serialize writes the encrypted key packet, e, to w.
func (e *EncryptedKey) Serialize(w io.Writer) error {
var mpiLen int
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
mpiLen = 2 + len(e.encryptedMPI1.bytes)
case PubKeyAlgoElGamal:
mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes)
default:
return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
}
serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
w.Write([]byte{encryptedKeyVersion})
binary.Write(w, binary.BigEndian, e.KeyId)
w.Write([]byte{byte(e.Algo)})
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
writeMPIs(w, e.encryptedMPI1)
case PubKeyAlgoElGamal:
writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2)
default:
panic("internal error")
}
return nil
}
// SerializeEncryptedKey serializes an encrypted key packet to w that contains
// key, encrypted to pub.
// If config is nil, sensible defaults will be used.
func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
var buf [10]byte
buf[0] = encryptedKeyVersion
binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
buf[9] = byte(pub.PubKeyAlgo)
keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
keyBlock[0] = byte(cipherFunc)
copy(keyBlock[1:], key)
checksum := checksumKeyMaterial(key)
keyBlock[1+len(key)] = byte(checksum >> 8)
keyBlock[1+len(key)+1] = byte(checksum)
switch pub.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
case PubKeyAlgoElGamal:
return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
}
return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
}
func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
if err != nil {
return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
}
packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
if err != nil {
return err
}
_, err = w.Write(header[:])
if err != nil {
return err
}
return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
}
func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
if err != nil {
return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
}
packetLen := 10 /* header length */
packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
if err != nil {
return err
}
_, err = w.Write(header[:])
if err != nil {
return err
}
err = writeBig(w, c1)
if err != nil {
return err
}
return writeBig(w, c2)
}

89
vendor/golang.org/x/crypto/openpgp/packet/literal.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"encoding/binary"
"io"
)
// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
type LiteralData struct {
IsBinary bool
FileName string
Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
Body io.Reader
}
// ForEyesOnly returns whether the contents of the LiteralData have been marked
// as especially sensitive.
func (l *LiteralData) ForEyesOnly() bool {
return l.FileName == "_CONSOLE"
}
func (l *LiteralData) parse(r io.Reader) (err error) {
var buf [256]byte
_, err = readFull(r, buf[:2])
if err != nil {
return
}
l.IsBinary = buf[0] == 'b'
fileNameLen := int(buf[1])
_, err = readFull(r, buf[:fileNameLen])
if err != nil {
return
}
l.FileName = string(buf[:fileNameLen])
_, err = readFull(r, buf[:4])
if err != nil {
return
}
l.Time = binary.BigEndian.Uint32(buf[:4])
l.Body = r
return
}
// SerializeLiteral serializes a literal data packet to w and returns a
// WriteCloser to which the data itself can be written and which MUST be closed
// on completion. The fileName is truncated to 255 bytes.
func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
var buf [4]byte
buf[0] = 't'
if isBinary {
buf[0] = 'b'
}
if len(fileName) > 255 {
fileName = fileName[:255]
}
buf[1] = byte(len(fileName))
inner, err := serializeStreamHeader(w, packetTypeLiteralData)
if err != nil {
return
}
_, err = inner.Write(buf[:2])
if err != nil {
return
}
_, err = inner.Write([]byte(fileName))
if err != nil {
return
}
binary.BigEndian.PutUint32(buf[:], time)
_, err = inner.Write(buf[:])
if err != nil {
return
}
plaintext = inner
return
}

143
vendor/golang.org/x/crypto/openpgp/packet/ocfb.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
package packet
import (
"crypto/cipher"
)
type ocfbEncrypter struct {
b cipher.Block
fre []byte
outUsed int
}
// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
// performed.
type OCFBResyncOption bool
const (
OCFBResync OCFBResyncOption = true
OCFBNoResync OCFBResyncOption = false
)
// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
// cipher feedback mode using the given cipher.Block, and an initial amount of
// ciphertext. randData must be random bytes and be the same length as the
// cipher.Block's block size. Resync determines if the "resynchronization step"
// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
// this point.
func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
blockSize := block.BlockSize()
if len(randData) != blockSize {
return nil, nil
}
x := &ocfbEncrypter{
b: block,
fre: make([]byte, blockSize),
outUsed: 0,
}
prefix := make([]byte, blockSize+2)
block.Encrypt(x.fre, x.fre)
for i := 0; i < blockSize; i++ {
prefix[i] = randData[i] ^ x.fre[i]
}
block.Encrypt(x.fre, prefix[:blockSize])
prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
if resync {
block.Encrypt(x.fre, prefix[2:])
} else {
x.fre[0] = prefix[blockSize]
x.fre[1] = prefix[blockSize+1]
x.outUsed = 2
}
return x, prefix
}
func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
for i := 0; i < len(src); i++ {
if x.outUsed == len(x.fre) {
x.b.Encrypt(x.fre, x.fre)
x.outUsed = 0
}
x.fre[x.outUsed] ^= src[i]
dst[i] = x.fre[x.outUsed]
x.outUsed++
}
}
type ocfbDecrypter struct {
b cipher.Block
fre []byte
outUsed int
}
// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
// cipher feedback mode using the given cipher.Block. Prefix must be the first
// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
// block size. If an incorrect key is detected then nil is returned. On
// successful exit, blockSize+2 bytes of decrypted data are written into
// prefix. Resync determines if the "resynchronization step" from RFC 4880,
// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
blockSize := block.BlockSize()
if len(prefix) != blockSize+2 {
return nil
}
x := &ocfbDecrypter{
b: block,
fre: make([]byte, blockSize),
outUsed: 0,
}
prefixCopy := make([]byte, len(prefix))
copy(prefixCopy, prefix)
block.Encrypt(x.fre, x.fre)
for i := 0; i < blockSize; i++ {
prefixCopy[i] ^= x.fre[i]
}
block.Encrypt(x.fre, prefix[:blockSize])
prefixCopy[blockSize] ^= x.fre[0]
prefixCopy[blockSize+1] ^= x.fre[1]
if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
return nil
}
if resync {
block.Encrypt(x.fre, prefix[2:])
} else {
x.fre[0] = prefix[blockSize]
x.fre[1] = prefix[blockSize+1]
x.outUsed = 2
}
copy(prefix, prefixCopy)
return x
}
func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
for i := 0; i < len(src); i++ {
if x.outUsed == len(x.fre) {
x.b.Encrypt(x.fre, x.fre)
x.outUsed = 0
}
c := src[i]
dst[i] = x.fre[x.outUsed] ^ src[i]
x.fre[x.outUsed] = c
x.outUsed++
}
}

View File

@ -0,0 +1,73 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"encoding/binary"
"golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/s2k"
"io"
"strconv"
)
// OnePassSignature represents a one-pass signature packet. See RFC 4880,
// section 5.4.
type OnePassSignature struct {
SigType SignatureType
Hash crypto.Hash
PubKeyAlgo PublicKeyAlgorithm
KeyId uint64
IsLast bool
}
const onePassSignatureVersion = 3
func (ops *OnePassSignature) parse(r io.Reader) (err error) {
var buf [13]byte
_, err = readFull(r, buf[:])
if err != nil {
return
}
if buf[0] != onePassSignatureVersion {
err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
}
var ok bool
ops.Hash, ok = s2k.HashIdToHash(buf[2])
if !ok {
return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
}
ops.SigType = SignatureType(buf[1])
ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
ops.IsLast = buf[12] != 0
return
}
// Serialize marshals the given OnePassSignature to w.
func (ops *OnePassSignature) Serialize(w io.Writer) error {
var buf [13]byte
buf[0] = onePassSignatureVersion
buf[1] = uint8(ops.SigType)
var ok bool
buf[2], ok = s2k.HashToHashId(ops.Hash)
if !ok {
return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
}
buf[3] = uint8(ops.PubKeyAlgo)
binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
if ops.IsLast {
buf[12] = 1
}
if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
return err
}
_, err := w.Write(buf[:])
return err
}

162
vendor/golang.org/x/crypto/openpgp/packet/opaque.go generated vendored Normal file
View File

@ -0,0 +1,162 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"bytes"
"io"
"io/ioutil"
"golang.org/x/crypto/openpgp/errors"
)
// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
// useful for splitting and storing the original packet contents separately,
// handling unsupported packet types or accessing parts of the packet not yet
// implemented by this package.
type OpaquePacket struct {
// Packet type
Tag uint8
// Reason why the packet was parsed opaquely
Reason error
// Binary contents of the packet data
Contents []byte
}
func (op *OpaquePacket) parse(r io.Reader) (err error) {
op.Contents, err = ioutil.ReadAll(r)
return
}
// Serialize marshals the packet to a writer in its original form, including
// the packet header.
func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
if err == nil {
_, err = w.Write(op.Contents)
}
return
}
// Parse attempts to parse the opaque contents into a structure supported by
// this package. If the packet is not known then the result will be another
// OpaquePacket.
func (op *OpaquePacket) Parse() (p Packet, err error) {
hdr := bytes.NewBuffer(nil)
err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
if err != nil {
op.Reason = err
return op, err
}
p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
if err != nil {
op.Reason = err
p = op
}
return
}
// OpaqueReader reads OpaquePackets from an io.Reader.
type OpaqueReader struct {
r io.Reader
}
func NewOpaqueReader(r io.Reader) *OpaqueReader {
return &OpaqueReader{r: r}
}
// Read the next OpaquePacket.
func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
tag, _, contents, err := readHeader(or.r)
if err != nil {
return
}
op = &OpaquePacket{Tag: uint8(tag), Reason: err}
err = op.parse(contents)
if err != nil {
consumeAll(contents)
}
return
}
// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
// as found in signature and user attribute packets.
type OpaqueSubpacket struct {
SubType uint8
Contents []byte
}
// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
// their byte representation.
func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
var (
subHeaderLen int
subPacket *OpaqueSubpacket
)
for len(contents) > 0 {
subHeaderLen, subPacket, err = nextSubpacket(contents)
if err != nil {
break
}
result = append(result, subPacket)
contents = contents[subHeaderLen+len(subPacket.Contents):]
}
return
}
func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
// RFC 4880, section 5.2.3.1
var subLen uint32
if len(contents) < 1 {
goto Truncated
}
subPacket = &OpaqueSubpacket{}
switch {
case contents[0] < 192:
subHeaderLen = 2 // 1 length byte, 1 subtype byte
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[0])
contents = contents[1:]
case contents[0] < 255:
subHeaderLen = 3 // 2 length bytes, 1 subtype
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
contents = contents[2:]
default:
subHeaderLen = 6 // 5 length bytes, 1 subtype
if len(contents) < subHeaderLen {
goto Truncated
}
subLen = uint32(contents[1])<<24 |
uint32(contents[2])<<16 |
uint32(contents[3])<<8 |
uint32(contents[4])
contents = contents[5:]
}
if subLen > uint32(len(contents)) || subLen == 0 {
goto Truncated
}
subPacket.SubType = contents[0]
subPacket.Contents = contents[1:subLen]
return
Truncated:
err = errors.StructuralError("subpacket truncated")
return
}
func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
buf := make([]byte, 6)
n := serializeSubpacketLength(buf, len(osp.Contents)+1)
buf[n] = osp.SubType
if _, err = w.Write(buf[:n+1]); err != nil {
return
}
_, err = w.Write(osp.Contents)
return
}

549
vendor/golang.org/x/crypto/openpgp/packet/packet.go generated vendored Normal file
View File

@ -0,0 +1,549 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package packet implements parsing and serialization of OpenPGP packets, as
// specified in RFC 4880.
package packet // import "golang.org/x/crypto/openpgp/packet"
import (
"bufio"
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/rsa"
"io"
"math/big"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/openpgp/errors"
)
// readFull is the same as io.ReadFull except that reading zero bytes returns
// ErrUnexpectedEOF rather than EOF.
func readFull(r io.Reader, buf []byte) (n int, err error) {
n, err = io.ReadFull(r, buf)
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
func readLength(r io.Reader) (length int64, isPartial bool, err error) {
var buf [4]byte
_, err = readFull(r, buf[:1])
if err != nil {
return
}
switch {
case buf[0] < 192:
length = int64(buf[0])
case buf[0] < 224:
length = int64(buf[0]-192) << 8
_, err = readFull(r, buf[0:1])
if err != nil {
return
}
length += int64(buf[0]) + 192
case buf[0] < 255:
length = int64(1) << (buf[0] & 0x1f)
isPartial = true
default:
_, err = readFull(r, buf[0:4])
if err != nil {
return
}
length = int64(buf[0])<<24 |
int64(buf[1])<<16 |
int64(buf[2])<<8 |
int64(buf[3])
}
return
}
// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
// The continuation lengths are parsed and removed from the stream and EOF is
// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
type partialLengthReader struct {
r io.Reader
remaining int64
isPartial bool
}
func (r *partialLengthReader) Read(p []byte) (n int, err error) {
for r.remaining == 0 {
if !r.isPartial {
return 0, io.EOF
}
r.remaining, r.isPartial, err = readLength(r.r)
if err != nil {
return 0, err
}
}
toRead := int64(len(p))
if toRead > r.remaining {
toRead = r.remaining
}
n, err = r.r.Read(p[:int(toRead)])
r.remaining -= int64(n)
if n < int(toRead) && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
// See RFC 4880, section 4.2.2.4.
type partialLengthWriter struct {
w io.WriteCloser
lengthByte [1]byte
}
func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
for len(p) > 0 {
for power := uint(14); power < 32; power-- {
l := 1 << power
if len(p) >= l {
w.lengthByte[0] = 224 + uint8(power)
_, err = w.w.Write(w.lengthByte[:])
if err != nil {
return
}
var m int
m, err = w.w.Write(p[:l])
n += m
if err != nil {
return
}
p = p[l:]
break
}
}
}
return
}
func (w *partialLengthWriter) Close() error {
w.lengthByte[0] = 0
_, err := w.w.Write(w.lengthByte[:])
if err != nil {
return err
}
return w.w.Close()
}
// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
// underlying Reader returns EOF before the limit has been reached.
type spanReader struct {
r io.Reader
n int64
}
func (l *spanReader) Read(p []byte) (n int, err error) {
if l.n <= 0 {
return 0, io.EOF
}
if int64(len(p)) > l.n {
p = p[0:l.n]
}
n, err = l.r.Read(p)
l.n -= int64(n)
if l.n > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
// readHeader parses a packet header and returns an io.Reader which will return
// the contents of the packet. See RFC 4880, section 4.2.
func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
var buf [4]byte
_, err = io.ReadFull(r, buf[:1])
if err != nil {
return
}
if buf[0]&0x80 == 0 {
err = errors.StructuralError("tag byte does not have MSB set")
return
}
if buf[0]&0x40 == 0 {
// Old format packet
tag = packetType((buf[0] & 0x3f) >> 2)
lengthType := buf[0] & 3
if lengthType == 3 {
length = -1
contents = r
return
}
lengthBytes := 1 << lengthType
_, err = readFull(r, buf[0:lengthBytes])
if err != nil {
return
}
for i := 0; i < lengthBytes; i++ {
length <<= 8
length |= int64(buf[i])
}
contents = &spanReader{r, length}
return
}
// New format packet
tag = packetType(buf[0] & 0x3f)
length, isPartial, err := readLength(r)
if err != nil {
return
}
if isPartial {
contents = &partialLengthReader{
remaining: length,
isPartial: true,
r: r,
}
length = -1
} else {
contents = &spanReader{r, length}
}
return
}
// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
// 4.2.
func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
var buf [6]byte
var n int
buf[0] = 0x80 | 0x40 | byte(ptype)
if length < 192 {
buf[1] = byte(length)
n = 2
} else if length < 8384 {
length -= 192
buf[1] = 192 + byte(length>>8)
buf[2] = byte(length)
n = 3
} else {
buf[1] = 255
buf[2] = byte(length >> 24)
buf[3] = byte(length >> 16)
buf[4] = byte(length >> 8)
buf[5] = byte(length)
n = 6
}
_, err = w.Write(buf[:n])
return
}
// serializeStreamHeader writes an OpenPGP packet header to w where the
// length of the packet is unknown. It returns a io.WriteCloser which can be
// used to write the contents of the packet. See RFC 4880, section 4.2.
func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
var buf [1]byte
buf[0] = 0x80 | 0x40 | byte(ptype)
_, err = w.Write(buf[:])
if err != nil {
return
}
out = &partialLengthWriter{w: w}
return
}
// Packet represents an OpenPGP packet. Users are expected to try casting
// instances of this interface to specific packet types.
type Packet interface {
parse(io.Reader) error
}
// consumeAll reads from the given Reader until error, returning the number of
// bytes read.
func consumeAll(r io.Reader) (n int64, err error) {
var m int
var buf [1024]byte
for {
m, err = r.Read(buf[:])
n += int64(m)
if err == io.EOF {
err = nil
return
}
if err != nil {
return
}
}
}
// packetType represents the numeric ids of the different OpenPGP packet types. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
type packetType uint8
const (
packetTypeEncryptedKey packetType = 1
packetTypeSignature packetType = 2
packetTypeSymmetricKeyEncrypted packetType = 3
packetTypeOnePassSignature packetType = 4
packetTypePrivateKey packetType = 5
packetTypePublicKey packetType = 6
packetTypePrivateSubkey packetType = 7
packetTypeCompressed packetType = 8
packetTypeSymmetricallyEncrypted packetType = 9
packetTypeLiteralData packetType = 11
packetTypeUserId packetType = 13
packetTypePublicSubkey packetType = 14
packetTypeUserAttribute packetType = 17
packetTypeSymmetricallyEncryptedMDC packetType = 18
)
// peekVersion detects the version of a public key packet about to
// be read. A bufio.Reader at the original position of the io.Reader
// is returned.
func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) {
bufr = bufio.NewReader(r)
var verBuf []byte
if verBuf, err = bufr.Peek(1); err != nil {
return
}
ver = verBuf[0]
return
}
// Read reads a single OpenPGP packet from the given io.Reader. If there is an
// error parsing a packet, the whole packet is consumed from the input.
func Read(r io.Reader) (p Packet, err error) {
tag, _, contents, err := readHeader(r)
if err != nil {
return
}
switch tag {
case packetTypeEncryptedKey:
p = new(EncryptedKey)
case packetTypeSignature:
var version byte
// Detect signature version
if contents, version, err = peekVersion(contents); err != nil {
return
}
if version < 4 {
p = new(SignatureV3)
} else {
p = new(Signature)
}
case packetTypeSymmetricKeyEncrypted:
p = new(SymmetricKeyEncrypted)
case packetTypeOnePassSignature:
p = new(OnePassSignature)
case packetTypePrivateKey, packetTypePrivateSubkey:
pk := new(PrivateKey)
if tag == packetTypePrivateSubkey {
pk.IsSubkey = true
}
p = pk
case packetTypePublicKey, packetTypePublicSubkey:
var version byte
if contents, version, err = peekVersion(contents); err != nil {
return
}
isSubkey := tag == packetTypePublicSubkey
if version < 4 {
p = &PublicKeyV3{IsSubkey: isSubkey}
} else {
p = &PublicKey{IsSubkey: isSubkey}
}
case packetTypeCompressed:
p = new(Compressed)
case packetTypeSymmetricallyEncrypted:
p = new(SymmetricallyEncrypted)
case packetTypeLiteralData:
p = new(LiteralData)
case packetTypeUserId:
p = new(UserId)
case packetTypeUserAttribute:
p = new(UserAttribute)
case packetTypeSymmetricallyEncryptedMDC:
se := new(SymmetricallyEncrypted)
se.MDC = true
p = se
default:
err = errors.UnknownPacketTypeError(tag)
}
if p != nil {
err = p.parse(contents)
}
if err != nil {
consumeAll(contents)
}
return
}
// SignatureType represents the different semantic meanings of an OpenPGP
// signature. See RFC 4880, section 5.2.1.
type SignatureType uint8
const (
SigTypeBinary SignatureType = 0
SigTypeText = 1
SigTypeGenericCert = 0x10
SigTypePersonaCert = 0x11
SigTypeCasualCert = 0x12
SigTypePositiveCert = 0x13
SigTypeSubkeyBinding = 0x18
SigTypePrimaryKeyBinding = 0x19
SigTypeDirectSignature = 0x1F
SigTypeKeyRevocation = 0x20
SigTypeSubkeyRevocation = 0x28
)
// PublicKeyAlgorithm represents the different public key system specified for
// OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
type PublicKeyAlgorithm uint8
const (
PubKeyAlgoRSA PublicKeyAlgorithm = 1
PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
PubKeyAlgoElGamal PublicKeyAlgorithm = 16
PubKeyAlgoDSA PublicKeyAlgorithm = 17
// RFC 6637, Section 5.
PubKeyAlgoECDH PublicKeyAlgorithm = 18
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
)
// CanEncrypt returns true if it's possible to encrypt a message to a public
// key of the given type.
func (pka PublicKeyAlgorithm) CanEncrypt() bool {
switch pka {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal:
return true
}
return false
}
// CanSign returns true if it's possible for a public key of the given type to
// sign a message.
func (pka PublicKeyAlgorithm) CanSign() bool {
switch pka {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
return true
}
return false
}
// CipherFunction represents the different block ciphers specified for OpenPGP. See
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
type CipherFunction uint8
const (
Cipher3DES CipherFunction = 2
CipherCAST5 CipherFunction = 3
CipherAES128 CipherFunction = 7
CipherAES192 CipherFunction = 8
CipherAES256 CipherFunction = 9
)
// KeySize returns the key size, in bytes, of cipher.
func (cipher CipherFunction) KeySize() int {
switch cipher {
case Cipher3DES:
return 24
case CipherCAST5:
return cast5.KeySize
case CipherAES128:
return 16
case CipherAES192:
return 24
case CipherAES256:
return 32
}
return 0
}
// blockSize returns the block size, in bytes, of cipher.
func (cipher CipherFunction) blockSize() int {
switch cipher {
case Cipher3DES:
return des.BlockSize
case CipherCAST5:
return 8
case CipherAES128, CipherAES192, CipherAES256:
return 16
}
return 0
}
// new returns a fresh instance of the given cipher.
func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
switch cipher {
case Cipher3DES:
block, _ = des.NewTripleDESCipher(key)
case CipherCAST5:
block, _ = cast5.NewCipher(key)
case CipherAES128, CipherAES192, CipherAES256:
block, _ = aes.NewCipher(key)
}
return
}
// readMPI reads a big integer from r. The bit length returned is the bit
// length that was specified in r. This is preserved so that the integer can be
// reserialized exactly.
func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
var buf [2]byte
_, err = readFull(r, buf[0:])
if err != nil {
return
}
bitLength = uint16(buf[0])<<8 | uint16(buf[1])
numBytes := (int(bitLength) + 7) / 8
mpi = make([]byte, numBytes)
_, err = readFull(r, mpi)
// According to RFC 4880 3.2. we should check that the MPI has no leading
// zeroes (at least when not an encrypted MPI?), but this implementation
// does generate leading zeroes, so we keep accepting them.
return
}
// writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
// Note that we can produce leading zeroes, in violation of RFC 4880 3.2.
// Implementations seem to be tolerant of them, and stripping them would
// make it complex to guarantee matching re-serialization.
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
if err == nil {
_, err = w.Write(mpiBytes)
}
return
}
// writeBig serializes a *big.Int to w.
func writeBig(w io.Writer, i *big.Int) error {
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
}
// padToKeySize left-pads a MPI with zeroes to match the length of the
// specified RSA public.
func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
k := (pub.N.BitLen() + 7) / 8
if len(b) >= k {
return b
}
bb := make([]byte, k)
copy(bb[len(bb)-len(b):], b)
return bb
}
// CompressionAlgo Represents the different compression algorithms
// supported by OpenPGP (except for BZIP2, which is not currently
// supported). See Section 9.3 of RFC 4880.
type CompressionAlgo uint8
const (
CompressionNone CompressionAlgo = 0
CompressionZIP CompressionAlgo = 1
CompressionZLIB CompressionAlgo = 2
)

Some files were not shown because too many files have changed in this diff Show More