You've already forked golang-base-project
Initial commit
This commit is contained in:
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
35
vendor/golang.org/x/crypto/bcrypt/base64.go
generated
vendored
Normal file
35
vendor/golang.org/x/crypto/bcrypt/base64.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bcrypt
|
||||
|
||||
import "encoding/base64"
|
||||
|
||||
const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
var bcEncoding = base64.NewEncoding(alphabet)
|
||||
|
||||
func base64Encode(src []byte) []byte {
|
||||
n := bcEncoding.EncodedLen(len(src))
|
||||
dst := make([]byte, n)
|
||||
bcEncoding.Encode(dst, src)
|
||||
for dst[n-1] == '=' {
|
||||
n--
|
||||
}
|
||||
return dst[:n]
|
||||
}
|
||||
|
||||
func base64Decode(src []byte) ([]byte, error) {
|
||||
numOfEquals := 4 - (len(src) % 4)
|
||||
for i := 0; i < numOfEquals; i++ {
|
||||
src = append(src, '=')
|
||||
}
|
||||
|
||||
dst := make([]byte, bcEncoding.DecodedLen(len(src)))
|
||||
n, err := bcEncoding.Decode(dst, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:n], nil
|
||||
}
|
295
vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
Normal file
295
vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
Normal file
@ -0,0 +1,295 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
|
||||
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
|
||||
package bcrypt // import "golang.org/x/crypto/bcrypt"
|
||||
|
||||
// The code is a port of Provos and Mazières's C implementation.
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/blowfish"
|
||||
)
|
||||
|
||||
const (
|
||||
MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
|
||||
MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
|
||||
DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
|
||||
)
|
||||
|
||||
// The error returned from CompareHashAndPassword when a password and hash do
|
||||
// not match.
|
||||
var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash is too short to
|
||||
// be a bcrypt hash.
|
||||
var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash was created with
|
||||
// a bcrypt algorithm newer than this implementation.
|
||||
type HashVersionTooNewError byte
|
||||
|
||||
func (hv HashVersionTooNewError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
|
||||
}
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
|
||||
type InvalidHashPrefixError byte
|
||||
|
||||
func (ih InvalidHashPrefixError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
|
||||
}
|
||||
|
||||
type InvalidCostError int
|
||||
|
||||
func (ic InvalidCostError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
|
||||
}
|
||||
|
||||
const (
|
||||
majorVersion = '2'
|
||||
minorVersion = 'a'
|
||||
maxSaltSize = 16
|
||||
maxCryptedHashSize = 23
|
||||
encodedSaltSize = 22
|
||||
encodedHashSize = 31
|
||||
minHashSize = 59
|
||||
)
|
||||
|
||||
// magicCipherData is an IV for the 64 Blowfish encryption calls in
|
||||
// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
|
||||
var magicCipherData = []byte{
|
||||
0x4f, 0x72, 0x70, 0x68,
|
||||
0x65, 0x61, 0x6e, 0x42,
|
||||
0x65, 0x68, 0x6f, 0x6c,
|
||||
0x64, 0x65, 0x72, 0x53,
|
||||
0x63, 0x72, 0x79, 0x44,
|
||||
0x6f, 0x75, 0x62, 0x74,
|
||||
}
|
||||
|
||||
type hashed struct {
|
||||
hash []byte
|
||||
salt []byte
|
||||
cost int // allowed range is MinCost to MaxCost
|
||||
major byte
|
||||
minor byte
|
||||
}
|
||||
|
||||
// GenerateFromPassword returns the bcrypt hash of the password at the given
|
||||
// cost. If the cost given is less than MinCost, the cost will be set to
|
||||
// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
|
||||
// to compare the returned hashed password with its cleartext version.
|
||||
func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
|
||||
p, err := newFromPassword(password, cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.Hash(), nil
|
||||
}
|
||||
|
||||
// CompareHashAndPassword compares a bcrypt hashed password with its possible
|
||||
// plaintext equivalent. Returns nil on success, or an error on failure.
|
||||
func CompareHashAndPassword(hashedPassword, password []byte) error {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherHash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
|
||||
if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrMismatchedHashAndPassword
|
||||
}
|
||||
|
||||
// Cost returns the hashing cost used to create the given hashed
|
||||
// password. When, in the future, the hashing cost of a password system needs
|
||||
// to be increased in order to adjust for greater computational power, this
|
||||
// function allows one to establish which passwords need to be updated.
|
||||
func Cost(hashedPassword []byte) (int, error) {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p.cost, nil
|
||||
}
|
||||
|
||||
func newFromPassword(password []byte, cost int) (*hashed, error) {
|
||||
if cost < MinCost {
|
||||
cost = DefaultCost
|
||||
}
|
||||
p := new(hashed)
|
||||
p.major = majorVersion
|
||||
p.minor = minorVersion
|
||||
|
||||
err := checkCost(cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.cost = cost
|
||||
|
||||
unencodedSalt := make([]byte, maxSaltSize)
|
||||
_, err = io.ReadFull(rand.Reader, unencodedSalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.salt = base64Encode(unencodedSalt)
|
||||
hash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.hash = hash
|
||||
return p, err
|
||||
}
|
||||
|
||||
func newFromHash(hashedSecret []byte) (*hashed, error) {
|
||||
if len(hashedSecret) < minHashSize {
|
||||
return nil, ErrHashTooShort
|
||||
}
|
||||
p := new(hashed)
|
||||
n, err := p.decodeVersion(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
n, err = p.decodeCost(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
|
||||
// The "+2" is here because we'll have to append at most 2 '=' to the salt
|
||||
// when base64 decoding it in expensiveBlowfishSetup().
|
||||
p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
|
||||
copy(p.salt, hashedSecret[:encodedSaltSize])
|
||||
|
||||
hashedSecret = hashedSecret[encodedSaltSize:]
|
||||
p.hash = make([]byte, len(hashedSecret))
|
||||
copy(p.hash, hashedSecret)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
|
||||
cipherData := make([]byte, len(magicCipherData))
|
||||
copy(cipherData, magicCipherData)
|
||||
|
||||
c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < 24; i += 8 {
|
||||
for j := 0; j < 64; j++ {
|
||||
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
|
||||
}
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. We only encode 23 of
|
||||
// the 24 bytes encrypted.
|
||||
hsh := base64Encode(cipherData[:maxCryptedHashSize])
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
|
||||
csalt, err := base64Decode(salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. They use the trailing
|
||||
// NULL in the key string during expansion.
|
||||
// We copy the key to prevent changing the underlying array.
|
||||
ckey := append(key[:len(key):len(key)], 0)
|
||||
|
||||
c, err := blowfish.NewSaltedCipher(ckey, csalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var i, rounds uint64
|
||||
rounds = 1 << cost
|
||||
for i = 0; i < rounds; i++ {
|
||||
blowfish.ExpandKey(ckey, c)
|
||||
blowfish.ExpandKey(csalt, c)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (p *hashed) Hash() []byte {
|
||||
arr := make([]byte, 60)
|
||||
arr[0] = '$'
|
||||
arr[1] = p.major
|
||||
n := 2
|
||||
if p.minor != 0 {
|
||||
arr[2] = p.minor
|
||||
n = 3
|
||||
}
|
||||
arr[n] = '$'
|
||||
n++
|
||||
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
|
||||
n += 2
|
||||
arr[n] = '$'
|
||||
n++
|
||||
copy(arr[n:], p.salt)
|
||||
n += encodedSaltSize
|
||||
copy(arr[n:], p.hash)
|
||||
n += encodedHashSize
|
||||
return arr[:n]
|
||||
}
|
||||
|
||||
func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
|
||||
if sbytes[0] != '$' {
|
||||
return -1, InvalidHashPrefixError(sbytes[0])
|
||||
}
|
||||
if sbytes[1] > majorVersion {
|
||||
return -1, HashVersionTooNewError(sbytes[1])
|
||||
}
|
||||
p.major = sbytes[1]
|
||||
n := 3
|
||||
if sbytes[2] != '$' {
|
||||
p.minor = sbytes[2]
|
||||
n++
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// sbytes should begin where decodeVersion left off.
|
||||
func (p *hashed) decodeCost(sbytes []byte) (int, error) {
|
||||
cost, err := strconv.Atoi(string(sbytes[0:2]))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
err = checkCost(cost)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
p.cost = cost
|
||||
return 3, nil
|
||||
}
|
||||
|
||||
func (p *hashed) String() string {
|
||||
return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
|
||||
}
|
||||
|
||||
func checkCost(cost int) error {
|
||||
if cost < MinCost || cost > MaxCost {
|
||||
return InvalidCostError(cost)
|
||||
}
|
||||
return nil
|
||||
}
|
159
vendor/golang.org/x/crypto/blowfish/block.go
generated
vendored
Normal file
159
vendor/golang.org/x/crypto/blowfish/block.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package blowfish
|
||||
|
||||
// getNextWord returns the next big-endian uint32 value from the byte slice
|
||||
// at the given position in a circular manner, updating the position.
|
||||
func getNextWord(b []byte, pos *int) uint32 {
|
||||
var w uint32
|
||||
j := *pos
|
||||
for i := 0; i < 4; i++ {
|
||||
w = w<<8 | uint32(b[j])
|
||||
j++
|
||||
if j >= len(b) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
*pos = j
|
||||
return w
|
||||
}
|
||||
|
||||
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
|
||||
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
|
||||
// pi and substitution tables for calls to Encrypt. This is used, primarily,
|
||||
// by the bcrypt package to reuse the Blowfish key schedule during its
|
||||
// set up. It's unlikely that you need to use this directly.
|
||||
func ExpandKey(key []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
// Using inlined getNextWord for performance.
|
||||
var d uint32
|
||||
for k := 0; k < 4; k++ {
|
||||
d = d<<8 | uint32(key[j])
|
||||
j++
|
||||
if j >= len(key) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
c.p[i] ^= d
|
||||
}
|
||||
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
}
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
// This is similar to ExpandKey, but folds the salt during the key
|
||||
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
|
||||
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
|
||||
// and specializing it here is useful.
|
||||
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
|
||||
j := 0
|
||||
for i := 0; i < 18; i++ {
|
||||
c.p[i] ^= getNextWord(key, &j)
|
||||
}
|
||||
|
||||
j = 0
|
||||
var l, r uint32
|
||||
for i := 0; i < 18; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.p[i], c.p[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s0[i], c.s0[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s1[i], c.s1[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s2[i], c.s2[i+1] = l, r
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i += 2 {
|
||||
l ^= getNextWord(salt, &j)
|
||||
r ^= getNextWord(salt, &j)
|
||||
l, r = encryptBlock(l, r, c)
|
||||
c.s3[i], c.s3[i+1] = l, r
|
||||
}
|
||||
}
|
||||
|
||||
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xl, xr := l, r
|
||||
xl ^= c.p[0]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
|
||||
xr ^= c.p[17]
|
||||
return xr, xl
|
||||
}
|
||||
|
||||
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
|
||||
xl, xr := l, r
|
||||
xl ^= c.p[17]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
|
||||
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
|
||||
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
|
||||
xr ^= c.p[0]
|
||||
return xr, xl
|
||||
}
|
99
vendor/golang.org/x/crypto/blowfish/cipher.go
generated
vendored
Normal file
99
vendor/golang.org/x/crypto/blowfish/cipher.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
|
||||
//
|
||||
// Blowfish is a legacy cipher and its short block size makes it vulnerable to
|
||||
// birthday bound attacks (see https://sweet32.info). It should only be used
|
||||
// where compatibility with legacy systems, not security, is the goal.
|
||||
//
|
||||
// Deprecated: any new system should use AES (from crypto/aes, if necessary in
|
||||
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
|
||||
// golang.org/x/crypto/chacha20poly1305).
|
||||
package blowfish // import "golang.org/x/crypto/blowfish"
|
||||
|
||||
// The code is a port of Bruce Schneier's C implementation.
|
||||
// See https://www.schneier.com/blowfish.html.
|
||||
|
||||
import "strconv"
|
||||
|
||||
// The Blowfish block size in bytes.
|
||||
const BlockSize = 8
|
||||
|
||||
// A Cipher is an instance of Blowfish encryption using a particular key.
|
||||
type Cipher struct {
|
||||
p [18]uint32
|
||||
s0, s1, s2, s3 [256]uint32
|
||||
}
|
||||
|
||||
type KeySizeError int
|
||||
|
||||
func (k KeySizeError) Error() string {
|
||||
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
// NewCipher creates and returns a Cipher.
|
||||
// The key argument should be the Blowfish key, from 1 to 56 bytes.
|
||||
func NewCipher(key []byte) (*Cipher, error) {
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 || k > 56 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(&result)
|
||||
ExpandKey(key, &result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
|
||||
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
|
||||
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
|
||||
// bytes.
|
||||
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
|
||||
if len(salt) == 0 {
|
||||
return NewCipher(key)
|
||||
}
|
||||
var result Cipher
|
||||
if k := len(key); k < 1 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(&result)
|
||||
expandKeyWithSalt(key, salt, &result)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// BlockSize returns the Blowfish block size, 8 bytes.
|
||||
// It is necessary to satisfy the Block interface in the
|
||||
// package "crypto/cipher".
|
||||
func (c *Cipher) BlockSize() int { return BlockSize }
|
||||
|
||||
// Encrypt encrypts the 8-byte buffer src using the key k
|
||||
// and stores the result in dst.
|
||||
// Note that for amounts of data larger than a block,
|
||||
// it is not safe to just call Encrypt on successive blocks;
|
||||
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
|
||||
func (c *Cipher) Encrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
l, r = encryptBlock(l, r, c)
|
||||
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
// Decrypt decrypts the 8-byte buffer src using the key k
|
||||
// and stores the result in dst.
|
||||
func (c *Cipher) Decrypt(dst, src []byte) {
|
||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||
l, r = decryptBlock(l, r, c)
|
||||
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
func initCipher(c *Cipher) {
|
||||
copy(c.p[0:], p[0:])
|
||||
copy(c.s0[0:], s0[0:])
|
||||
copy(c.s1[0:], s1[0:])
|
||||
copy(c.s2[0:], s2[0:])
|
||||
copy(c.s3[0:], s3[0:])
|
||||
}
|
199
vendor/golang.org/x/crypto/blowfish/const.go
generated
vendored
Normal file
199
vendor/golang.org/x/crypto/blowfish/const.go
generated
vendored
Normal file
@ -0,0 +1,199 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The startup permutation array and substitution boxes.
|
||||
// They are the hexadecimal digits of PI; see:
|
||||
// https://www.schneier.com/code/constants.txt.
|
||||
|
||||
package blowfish
|
||||
|
||||
var s0 = [256]uint32{
|
||||
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
|
||||
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
|
||||
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
|
||||
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
|
||||
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
|
||||
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
|
||||
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
|
||||
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
|
||||
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
|
||||
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
|
||||
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
|
||||
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
|
||||
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
|
||||
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
|
||||
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
|
||||
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
|
||||
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
|
||||
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
|
||||
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
|
||||
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
|
||||
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
|
||||
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
|
||||
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
|
||||
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
|
||||
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
|
||||
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
|
||||
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
|
||||
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
|
||||
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
|
||||
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
|
||||
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
|
||||
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
|
||||
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
|
||||
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
|
||||
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
|
||||
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
|
||||
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
|
||||
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
|
||||
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
|
||||
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
|
||||
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
|
||||
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
|
||||
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
|
||||
}
|
||||
|
||||
var s1 = [256]uint32{
|
||||
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
|
||||
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
|
||||
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
|
||||
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
|
||||
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
|
||||
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
|
||||
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
|
||||
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
|
||||
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
|
||||
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
|
||||
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
|
||||
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
|
||||
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
|
||||
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
|
||||
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
|
||||
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
|
||||
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
|
||||
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
|
||||
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
|
||||
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
|
||||
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
|
||||
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
|
||||
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
|
||||
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
|
||||
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
|
||||
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
|
||||
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
|
||||
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
|
||||
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
|
||||
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
|
||||
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
|
||||
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
|
||||
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
|
||||
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
|
||||
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
|
||||
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
|
||||
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
|
||||
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
|
||||
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
|
||||
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
|
||||
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
|
||||
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
|
||||
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
|
||||
}
|
||||
|
||||
var s2 = [256]uint32{
|
||||
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
|
||||
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
|
||||
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
|
||||
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
|
||||
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
|
||||
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
|
||||
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
|
||||
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
|
||||
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
|
||||
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
|
||||
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
|
||||
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
|
||||
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
|
||||
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
|
||||
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
|
||||
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
|
||||
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
|
||||
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
|
||||
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
|
||||
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
|
||||
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
|
||||
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
|
||||
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
|
||||
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
|
||||
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
|
||||
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
|
||||
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
|
||||
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
|
||||
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
|
||||
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
|
||||
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
|
||||
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
|
||||
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
|
||||
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
|
||||
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
|
||||
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
|
||||
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
|
||||
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
|
||||
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
|
||||
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
|
||||
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
|
||||
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
|
||||
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
|
||||
}
|
||||
|
||||
var s3 = [256]uint32{
|
||||
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
|
||||
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
|
||||
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
|
||||
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
|
||||
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
|
||||
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
|
||||
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
|
||||
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
|
||||
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
|
||||
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
|
||||
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
|
||||
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
|
||||
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
|
||||
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
|
||||
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
|
||||
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
|
||||
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
|
||||
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
|
||||
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
|
||||
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
|
||||
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
|
||||
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
|
||||
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
|
||||
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
|
||||
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
|
||||
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
|
||||
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
|
||||
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
|
||||
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
|
||||
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
|
||||
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
|
||||
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
|
||||
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
|
||||
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
|
||||
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
|
||||
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
|
||||
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
|
||||
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
|
||||
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
|
||||
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
|
||||
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
|
||||
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
|
||||
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
|
||||
}
|
||||
|
||||
var p = [18]uint32{
|
||||
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
|
||||
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
|
||||
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
|
||||
}
|
77
vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
generated
vendored
Normal file
77
vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
|
||||
2898 / PKCS #5 v2.0.
|
||||
|
||||
A key derivation function is useful when encrypting data based on a password
|
||||
or any other not-fully-random data. It uses a pseudorandom function to derive
|
||||
a secure encryption key based on the password.
|
||||
|
||||
While v2.0 of the standard defines only one pseudorandom function to use,
|
||||
HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
|
||||
Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
|
||||
choose, you can pass the `New` functions from the different SHA packages to
|
||||
pbkdf2.Key.
|
||||
*/
|
||||
package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Key derives a key from the password, salt and iteration count, returning a
|
||||
// []byte of length keylen that can be used as cryptographic key. The key is
|
||||
// derived based on the method described as PBKDF2 with the HMAC variant using
|
||||
// the supplied hash function.
|
||||
//
|
||||
// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
|
||||
// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
|
||||
// doing:
|
||||
//
|
||||
// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
|
||||
//
|
||||
// Remember to get a good random salt. At least 8 bytes is recommended by the
|
||||
// RFC.
|
||||
//
|
||||
// Using a higher iteration count will increase the cost of an exhaustive
|
||||
// search but will also make derivation proportionally slower.
|
||||
func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
|
||||
prf := hmac.New(h, password)
|
||||
hashLen := prf.Size()
|
||||
numBlocks := (keyLen + hashLen - 1) / hashLen
|
||||
|
||||
var buf [4]byte
|
||||
dk := make([]byte, 0, numBlocks*hashLen)
|
||||
U := make([]byte, hashLen)
|
||||
for block := 1; block <= numBlocks; block++ {
|
||||
// N.B.: || means concatenation, ^ means XOR
|
||||
// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
|
||||
// U_1 = PRF(password, salt || uint(i))
|
||||
prf.Reset()
|
||||
prf.Write(salt)
|
||||
buf[0] = byte(block >> 24)
|
||||
buf[1] = byte(block >> 16)
|
||||
buf[2] = byte(block >> 8)
|
||||
buf[3] = byte(block)
|
||||
prf.Write(buf[:4])
|
||||
dk = prf.Sum(dk)
|
||||
T := dk[len(dk)-hashLen:]
|
||||
copy(U, T)
|
||||
|
||||
// U_n = PRF(password, U_(n-1))
|
||||
for n := 2; n <= iter; n++ {
|
||||
prf.Reset()
|
||||
prf.Write(U)
|
||||
U = U[:0]
|
||||
U = prf.Sum(U)
|
||||
for x := range U {
|
||||
T[x] ^= U[x]
|
||||
}
|
||||
}
|
||||
}
|
||||
return dk[:keyLen]
|
||||
}
|
66
vendor/golang.org/x/crypto/sha3/doc.go
generated
vendored
Normal file
66
vendor/golang.org/x/crypto/sha3/doc.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
|
||||
// the SHAKE variable-output-length hash functions defined by FIPS-202.
|
||||
//
|
||||
// Both types of hash function use the "sponge" construction and the Keccak
|
||||
// permutation. For a detailed specification see http://keccak.noekeon.org/
|
||||
//
|
||||
//
|
||||
// Guidance
|
||||
//
|
||||
// If you aren't sure what function you need, use SHAKE256 with at least 64
|
||||
// bytes of output. The SHAKE instances are faster than the SHA3 instances;
|
||||
// the latter have to allocate memory to conform to the hash.Hash interface.
|
||||
//
|
||||
// If you need a secret-key MAC (message authentication code), prepend the
|
||||
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
|
||||
// output.
|
||||
//
|
||||
//
|
||||
// Security strengths
|
||||
//
|
||||
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
|
||||
// strength against preimage attacks of x bits. Since they only produce "x"
|
||||
// bits of output, their collision-resistance is only "x/2" bits.
|
||||
//
|
||||
// The SHAKE-256 and -128 functions have a generic security strength of 256 and
|
||||
// 128 bits against all attacks, provided that at least 2x bits of their output
|
||||
// is used. Requesting more than 64 or 32 bytes of output, respectively, does
|
||||
// not increase the collision-resistance of the SHAKE functions.
|
||||
//
|
||||
//
|
||||
// The sponge construction
|
||||
//
|
||||
// A sponge builds a pseudo-random function from a public pseudo-random
|
||||
// permutation, by applying the permutation to a state of "rate + capacity"
|
||||
// bytes, but hiding "capacity" of the bytes.
|
||||
//
|
||||
// A sponge starts out with a zero state. To hash an input using a sponge, up
|
||||
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
|
||||
// is then "full" and the permutation is applied to "empty" it. This process is
|
||||
// repeated until all the input has been "absorbed". The input is then padded.
|
||||
// The digest is "squeezed" from the sponge in the same way, except that output
|
||||
// is copied out instead of input being XORed in.
|
||||
//
|
||||
// A sponge is parameterized by its generic security strength, which is equal
|
||||
// to half its capacity; capacity + rate is equal to the permutation's width.
|
||||
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
|
||||
// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
|
||||
//
|
||||
//
|
||||
// Recommendations
|
||||
//
|
||||
// The SHAKE functions are recommended for most new uses. They can produce
|
||||
// output of arbitrary length. SHAKE256, with an output length of at least
|
||||
// 64 bytes, provides 256-bit security against all attacks. The Keccak team
|
||||
// recommends it for most applications upgrading from SHA2-512. (NIST chose a
|
||||
// much stronger, but much slower, sponge instance for SHA3-512.)
|
||||
//
|
||||
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
|
||||
// They produce output of the same length, with the same security strengths
|
||||
// against all attacks. This means, in particular, that SHA3-256 only has
|
||||
// 128-bit collision resistance, because its output length is 32 bytes.
|
||||
package sha3 // import "golang.org/x/crypto/sha3"
|
97
vendor/golang.org/x/crypto/sha3/hashes.go
generated
vendored
Normal file
97
vendor/golang.org/x/crypto/sha3/hashes.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// This file provides functions for creating instances of the SHA-3
|
||||
// and SHAKE hash functions, as well as utility functions for hashing
|
||||
// bytes.
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// New224 creates a new SHA3-224 hash.
|
||||
// Its generic security strength is 224 bits against preimage attacks,
|
||||
// and 112 bits against collision attacks.
|
||||
func New224() hash.Hash {
|
||||
if h := new224Asm(); h != nil {
|
||||
return h
|
||||
}
|
||||
return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
|
||||
}
|
||||
|
||||
// New256 creates a new SHA3-256 hash.
|
||||
// Its generic security strength is 256 bits against preimage attacks,
|
||||
// and 128 bits against collision attacks.
|
||||
func New256() hash.Hash {
|
||||
if h := new256Asm(); h != nil {
|
||||
return h
|
||||
}
|
||||
return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
|
||||
}
|
||||
|
||||
// New384 creates a new SHA3-384 hash.
|
||||
// Its generic security strength is 384 bits against preimage attacks,
|
||||
// and 192 bits against collision attacks.
|
||||
func New384() hash.Hash {
|
||||
if h := new384Asm(); h != nil {
|
||||
return h
|
||||
}
|
||||
return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
|
||||
}
|
||||
|
||||
// New512 creates a new SHA3-512 hash.
|
||||
// Its generic security strength is 512 bits against preimage attacks,
|
||||
// and 256 bits against collision attacks.
|
||||
func New512() hash.Hash {
|
||||
if h := new512Asm(); h != nil {
|
||||
return h
|
||||
}
|
||||
return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
|
||||
}
|
||||
|
||||
// NewLegacyKeccak256 creates a new Keccak-256 hash.
|
||||
//
|
||||
// Only use this function if you require compatibility with an existing cryptosystem
|
||||
// that uses non-standard padding. All other users should use New256 instead.
|
||||
func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
|
||||
|
||||
// NewLegacyKeccak512 creates a new Keccak-512 hash.
|
||||
//
|
||||
// Only use this function if you require compatibility with an existing cryptosystem
|
||||
// that uses non-standard padding. All other users should use New512 instead.
|
||||
func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
|
||||
|
||||
// Sum224 returns the SHA3-224 digest of the data.
|
||||
func Sum224(data []byte) (digest [28]byte) {
|
||||
h := New224()
|
||||
h.Write(data)
|
||||
h.Sum(digest[:0])
|
||||
return
|
||||
}
|
||||
|
||||
// Sum256 returns the SHA3-256 digest of the data.
|
||||
func Sum256(data []byte) (digest [32]byte) {
|
||||
h := New256()
|
||||
h.Write(data)
|
||||
h.Sum(digest[:0])
|
||||
return
|
||||
}
|
||||
|
||||
// Sum384 returns the SHA3-384 digest of the data.
|
||||
func Sum384(data []byte) (digest [48]byte) {
|
||||
h := New384()
|
||||
h.Write(data)
|
||||
h.Sum(digest[:0])
|
||||
return
|
||||
}
|
||||
|
||||
// Sum512 returns the SHA3-512 digest of the data.
|
||||
func Sum512(data []byte) (digest [64]byte) {
|
||||
h := New512()
|
||||
h.Write(data)
|
||||
h.Sum(digest[:0])
|
||||
return
|
||||
}
|
28
vendor/golang.org/x/crypto/sha3/hashes_generic.go
generated
vendored
Normal file
28
vendor/golang.org/x/crypto/sha3/hashes_generic.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gc || purego || !s390x
|
||||
// +build !gc purego !s390x
|
||||
|
||||
package sha3
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// new224Asm returns an assembly implementation of SHA3-224 if available,
|
||||
// otherwise it returns nil.
|
||||
func new224Asm() hash.Hash { return nil }
|
||||
|
||||
// new256Asm returns an assembly implementation of SHA3-256 if available,
|
||||
// otherwise it returns nil.
|
||||
func new256Asm() hash.Hash { return nil }
|
||||
|
||||
// new384Asm returns an assembly implementation of SHA3-384 if available,
|
||||
// otherwise it returns nil.
|
||||
func new384Asm() hash.Hash { return nil }
|
||||
|
||||
// new512Asm returns an assembly implementation of SHA3-512 if available,
|
||||
// otherwise it returns nil.
|
||||
func new512Asm() hash.Hash { return nil }
|
413
vendor/golang.org/x/crypto/sha3/keccakf.go
generated
vendored
Normal file
413
vendor/golang.org/x/crypto/sha3/keccakf.go
generated
vendored
Normal file
@ -0,0 +1,413 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !amd64 || purego || !gc
|
||||
// +build !amd64 purego !gc
|
||||
|
||||
package sha3
|
||||
|
||||
// rc stores the round constants for use in the ι step.
|
||||
var rc = [24]uint64{
|
||||
0x0000000000000001,
|
||||
0x0000000000008082,
|
||||
0x800000000000808A,
|
||||
0x8000000080008000,
|
||||
0x000000000000808B,
|
||||
0x0000000080000001,
|
||||
0x8000000080008081,
|
||||
0x8000000000008009,
|
||||
0x000000000000008A,
|
||||
0x0000000000000088,
|
||||
0x0000000080008009,
|
||||
0x000000008000000A,
|
||||
0x000000008000808B,
|
||||
0x800000000000008B,
|
||||
0x8000000000008089,
|
||||
0x8000000000008003,
|
||||
0x8000000000008002,
|
||||
0x8000000000000080,
|
||||
0x000000000000800A,
|
||||
0x800000008000000A,
|
||||
0x8000000080008081,
|
||||
0x8000000000008080,
|
||||
0x0000000080000001,
|
||||
0x8000000080008008,
|
||||
}
|
||||
|
||||
// keccakF1600 applies the Keccak permutation to a 1600b-wide
|
||||
// state represented as a slice of 25 uint64s.
|
||||
func keccakF1600(a *[25]uint64) {
|
||||
// Implementation translated from Keccak-inplace.c
|
||||
// in the keccak reference code.
|
||||
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
|
||||
|
||||
for i := 0; i < 24; i += 4 {
|
||||
// Combines the 5 steps in each round into 2 steps.
|
||||
// Unrolls 4 rounds per loop and spreads some steps across rounds.
|
||||
|
||||
// Round 1
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[6] ^ d1
|
||||
bc1 = t<<44 | t>>(64-44)
|
||||
t = a[12] ^ d2
|
||||
bc2 = t<<43 | t>>(64-43)
|
||||
t = a[18] ^ d3
|
||||
bc3 = t<<21 | t>>(64-21)
|
||||
t = a[24] ^ d4
|
||||
bc4 = t<<14 | t>>(64-14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc2 = t<<3 | t>>(64-3)
|
||||
t = a[16] ^ d1
|
||||
bc3 = t<<45 | t>>(64-45)
|
||||
t = a[22] ^ d2
|
||||
bc4 = t<<61 | t>>(64-61)
|
||||
t = a[3] ^ d3
|
||||
bc0 = t<<28 | t>>(64-28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = t<<20 | t>>(64-20)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc4 = t<<18 | t>>(64-18)
|
||||
t = a[1] ^ d1
|
||||
bc0 = t<<1 | t>>(64-1)
|
||||
t = a[7] ^ d2
|
||||
bc1 = t<<6 | t>>(64-6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = t<<25 | t>>(64-25)
|
||||
t = a[19] ^ d4
|
||||
bc3 = t<<8 | t>>(64-8)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc1 = t<<36 | t>>(64-36)
|
||||
t = a[11] ^ d1
|
||||
bc2 = t<<10 | t>>(64-10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = t<<15 | t>>(64-15)
|
||||
t = a[23] ^ d3
|
||||
bc4 = t<<56 | t>>(64-56)
|
||||
t = a[4] ^ d4
|
||||
bc0 = t<<27 | t>>(64-27)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc3 = t<<41 | t>>(64-41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = t<<2 | t>>(64-2)
|
||||
t = a[2] ^ d2
|
||||
bc0 = t<<62 | t>>(64-62)
|
||||
t = a[8] ^ d3
|
||||
bc1 = t<<55 | t>>(64-55)
|
||||
t = a[14] ^ d4
|
||||
bc2 = t<<39 | t>>(64-39)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 2
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[16] ^ d1
|
||||
bc1 = t<<44 | t>>(64-44)
|
||||
t = a[7] ^ d2
|
||||
bc2 = t<<43 | t>>(64-43)
|
||||
t = a[23] ^ d3
|
||||
bc3 = t<<21 | t>>(64-21)
|
||||
t = a[14] ^ d4
|
||||
bc4 = t<<14 | t>>(64-14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc2 = t<<3 | t>>(64-3)
|
||||
t = a[11] ^ d1
|
||||
bc3 = t<<45 | t>>(64-45)
|
||||
t = a[2] ^ d2
|
||||
bc4 = t<<61 | t>>(64-61)
|
||||
t = a[18] ^ d3
|
||||
bc0 = t<<28 | t>>(64-28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = t<<20 | t>>(64-20)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc4 = t<<18 | t>>(64-18)
|
||||
t = a[6] ^ d1
|
||||
bc0 = t<<1 | t>>(64-1)
|
||||
t = a[22] ^ d2
|
||||
bc1 = t<<6 | t>>(64-6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = t<<25 | t>>(64-25)
|
||||
t = a[4] ^ d4
|
||||
bc3 = t<<8 | t>>(64-8)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc1 = t<<36 | t>>(64-36)
|
||||
t = a[1] ^ d1
|
||||
bc2 = t<<10 | t>>(64-10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = t<<15 | t>>(64-15)
|
||||
t = a[8] ^ d3
|
||||
bc4 = t<<56 | t>>(64-56)
|
||||
t = a[24] ^ d4
|
||||
bc0 = t<<27 | t>>(64-27)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc3 = t<<41 | t>>(64-41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = t<<2 | t>>(64-2)
|
||||
t = a[12] ^ d2
|
||||
bc0 = t<<62 | t>>(64-62)
|
||||
t = a[3] ^ d3
|
||||
bc1 = t<<55 | t>>(64-55)
|
||||
t = a[19] ^ d4
|
||||
bc2 = t<<39 | t>>(64-39)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 3
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[11] ^ d1
|
||||
bc1 = t<<44 | t>>(64-44)
|
||||
t = a[22] ^ d2
|
||||
bc2 = t<<43 | t>>(64-43)
|
||||
t = a[8] ^ d3
|
||||
bc3 = t<<21 | t>>(64-21)
|
||||
t = a[19] ^ d4
|
||||
bc4 = t<<14 | t>>(64-14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc2 = t<<3 | t>>(64-3)
|
||||
t = a[1] ^ d1
|
||||
bc3 = t<<45 | t>>(64-45)
|
||||
t = a[12] ^ d2
|
||||
bc4 = t<<61 | t>>(64-61)
|
||||
t = a[23] ^ d3
|
||||
bc0 = t<<28 | t>>(64-28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = t<<20 | t>>(64-20)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc4 = t<<18 | t>>(64-18)
|
||||
t = a[16] ^ d1
|
||||
bc0 = t<<1 | t>>(64-1)
|
||||
t = a[2] ^ d2
|
||||
bc1 = t<<6 | t>>(64-6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = t<<25 | t>>(64-25)
|
||||
t = a[24] ^ d4
|
||||
bc3 = t<<8 | t>>(64-8)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc1 = t<<36 | t>>(64-36)
|
||||
t = a[6] ^ d1
|
||||
bc2 = t<<10 | t>>(64-10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = t<<15 | t>>(64-15)
|
||||
t = a[3] ^ d3
|
||||
bc4 = t<<56 | t>>(64-56)
|
||||
t = a[14] ^ d4
|
||||
bc0 = t<<27 | t>>(64-27)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc3 = t<<41 | t>>(64-41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = t<<2 | t>>(64-2)
|
||||
t = a[7] ^ d2
|
||||
bc0 = t<<62 | t>>(64-62)
|
||||
t = a[18] ^ d3
|
||||
bc1 = t<<55 | t>>(64-55)
|
||||
t = a[4] ^ d4
|
||||
bc2 = t<<39 | t>>(64-39)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 4
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[1] ^ d1
|
||||
bc1 = t<<44 | t>>(64-44)
|
||||
t = a[2] ^ d2
|
||||
bc2 = t<<43 | t>>(64-43)
|
||||
t = a[3] ^ d3
|
||||
bc3 = t<<21 | t>>(64-21)
|
||||
t = a[4] ^ d4
|
||||
bc4 = t<<14 | t>>(64-14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc2 = t<<3 | t>>(64-3)
|
||||
t = a[6] ^ d1
|
||||
bc3 = t<<45 | t>>(64-45)
|
||||
t = a[7] ^ d2
|
||||
bc4 = t<<61 | t>>(64-61)
|
||||
t = a[8] ^ d3
|
||||
bc0 = t<<28 | t>>(64-28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = t<<20 | t>>(64-20)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc4 = t<<18 | t>>(64-18)
|
||||
t = a[11] ^ d1
|
||||
bc0 = t<<1 | t>>(64-1)
|
||||
t = a[12] ^ d2
|
||||
bc1 = t<<6 | t>>(64-6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = t<<25 | t>>(64-25)
|
||||
t = a[14] ^ d4
|
||||
bc3 = t<<8 | t>>(64-8)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc1 = t<<36 | t>>(64-36)
|
||||
t = a[16] ^ d1
|
||||
bc2 = t<<10 | t>>(64-10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = t<<15 | t>>(64-15)
|
||||
t = a[18] ^ d3
|
||||
bc4 = t<<56 | t>>(64-56)
|
||||
t = a[19] ^ d4
|
||||
bc0 = t<<27 | t>>(64-27)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc3 = t<<41 | t>>(64-41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = t<<2 | t>>(64-2)
|
||||
t = a[22] ^ d2
|
||||
bc0 = t<<62 | t>>(64-62)
|
||||
t = a[23] ^ d3
|
||||
bc1 = t<<55 | t>>(64-55)
|
||||
t = a[24] ^ d4
|
||||
bc2 = t<<39 | t>>(64-39)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
}
|
||||
}
|
14
vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
generated
vendored
Normal file
14
vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build amd64 && !purego && gc
|
||||
// +build amd64,!purego,gc
|
||||
|
||||
package sha3
|
||||
|
||||
// This function is implemented in keccakf_amd64.s.
|
||||
|
||||
//go:noescape
|
||||
|
||||
func keccakF1600(a *[25]uint64)
|
391
vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
generated
vendored
Normal file
391
vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
generated
vendored
Normal file
@ -0,0 +1,391 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build amd64 && !purego && gc
|
||||
// +build amd64,!purego,gc
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources at https://github.com/gvanas/KeccakCodePackage
|
||||
|
||||
// Offsets in state
|
||||
#define _ba (0*8)
|
||||
#define _be (1*8)
|
||||
#define _bi (2*8)
|
||||
#define _bo (3*8)
|
||||
#define _bu (4*8)
|
||||
#define _ga (5*8)
|
||||
#define _ge (6*8)
|
||||
#define _gi (7*8)
|
||||
#define _go (8*8)
|
||||
#define _gu (9*8)
|
||||
#define _ka (10*8)
|
||||
#define _ke (11*8)
|
||||
#define _ki (12*8)
|
||||
#define _ko (13*8)
|
||||
#define _ku (14*8)
|
||||
#define _ma (15*8)
|
||||
#define _me (16*8)
|
||||
#define _mi (17*8)
|
||||
#define _mo (18*8)
|
||||
#define _mu (19*8)
|
||||
#define _sa (20*8)
|
||||
#define _se (21*8)
|
||||
#define _si (22*8)
|
||||
#define _so (23*8)
|
||||
#define _su (24*8)
|
||||
|
||||
// Temporary registers
|
||||
#define rT1 AX
|
||||
|
||||
// Round vars
|
||||
#define rpState DI
|
||||
#define rpStack SP
|
||||
|
||||
#define rDa BX
|
||||
#define rDe CX
|
||||
#define rDi DX
|
||||
#define rDo R8
|
||||
#define rDu R9
|
||||
|
||||
#define rBa R10
|
||||
#define rBe R11
|
||||
#define rBi R12
|
||||
#define rBo R13
|
||||
#define rBu R14
|
||||
|
||||
#define rCa SI
|
||||
#define rCe BP
|
||||
#define rCi rBi
|
||||
#define rCo rBo
|
||||
#define rCu R15
|
||||
|
||||
#define MOVQ_RBI_RCE MOVQ rBi, rCe
|
||||
#define XORQ_RT1_RCA XORQ rT1, rCa
|
||||
#define XORQ_RT1_RCE XORQ rT1, rCe
|
||||
#define XORQ_RBA_RCU XORQ rBa, rCu
|
||||
#define XORQ_RBE_RCU XORQ rBe, rCu
|
||||
#define XORQ_RDU_RCU XORQ rDu, rCu
|
||||
#define XORQ_RDA_RCA XORQ rDa, rCa
|
||||
#define XORQ_RDE_RCE XORQ rDe, rCe
|
||||
|
||||
#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
|
||||
/* Prepare round */ \
|
||||
MOVQ rCe, rDa; \
|
||||
ROLQ $1, rDa; \
|
||||
\
|
||||
MOVQ _bi(iState), rCi; \
|
||||
XORQ _gi(iState), rDi; \
|
||||
XORQ rCu, rDa; \
|
||||
XORQ _ki(iState), rCi; \
|
||||
XORQ _mi(iState), rDi; \
|
||||
XORQ rDi, rCi; \
|
||||
\
|
||||
MOVQ rCi, rDe; \
|
||||
ROLQ $1, rDe; \
|
||||
\
|
||||
MOVQ _bo(iState), rCo; \
|
||||
XORQ _go(iState), rDo; \
|
||||
XORQ rCa, rDe; \
|
||||
XORQ _ko(iState), rCo; \
|
||||
XORQ _mo(iState), rDo; \
|
||||
XORQ rDo, rCo; \
|
||||
\
|
||||
MOVQ rCo, rDi; \
|
||||
ROLQ $1, rDi; \
|
||||
\
|
||||
MOVQ rCu, rDo; \
|
||||
XORQ rCe, rDi; \
|
||||
ROLQ $1, rDo; \
|
||||
\
|
||||
MOVQ rCa, rDu; \
|
||||
XORQ rCi, rDo; \
|
||||
ROLQ $1, rDu; \
|
||||
\
|
||||
/* Result b */ \
|
||||
MOVQ _ba(iState), rBa; \
|
||||
MOVQ _ge(iState), rBe; \
|
||||
XORQ rCo, rDu; \
|
||||
MOVQ _ki(iState), rBi; \
|
||||
MOVQ _mo(iState), rBo; \
|
||||
MOVQ _su(iState), rBu; \
|
||||
XORQ rDe, rBe; \
|
||||
ROLQ $44, rBe; \
|
||||
XORQ rDi, rBi; \
|
||||
XORQ rDa, rBa; \
|
||||
ROLQ $43, rBi; \
|
||||
\
|
||||
MOVQ rBe, rCa; \
|
||||
MOVQ rc, rT1; \
|
||||
ORQ rBi, rCa; \
|
||||
XORQ rBa, rT1; \
|
||||
XORQ rT1, rCa; \
|
||||
MOVQ rCa, _ba(oState); \
|
||||
\
|
||||
XORQ rDu, rBu; \
|
||||
ROLQ $14, rBu; \
|
||||
MOVQ rBa, rCu; \
|
||||
ANDQ rBe, rCu; \
|
||||
XORQ rBu, rCu; \
|
||||
MOVQ rCu, _bu(oState); \
|
||||
\
|
||||
XORQ rDo, rBo; \
|
||||
ROLQ $21, rBo; \
|
||||
MOVQ rBo, rT1; \
|
||||
ANDQ rBu, rT1; \
|
||||
XORQ rBi, rT1; \
|
||||
MOVQ rT1, _bi(oState); \
|
||||
\
|
||||
NOTQ rBi; \
|
||||
ORQ rBa, rBu; \
|
||||
ORQ rBo, rBi; \
|
||||
XORQ rBo, rBu; \
|
||||
XORQ rBe, rBi; \
|
||||
MOVQ rBu, _bo(oState); \
|
||||
MOVQ rBi, _be(oState); \
|
||||
B_RBI_RCE; \
|
||||
\
|
||||
/* Result g */ \
|
||||
MOVQ _gu(iState), rBe; \
|
||||
XORQ rDu, rBe; \
|
||||
MOVQ _ka(iState), rBi; \
|
||||
ROLQ $20, rBe; \
|
||||
XORQ rDa, rBi; \
|
||||
ROLQ $3, rBi; \
|
||||
MOVQ _bo(iState), rBa; \
|
||||
MOVQ rBe, rT1; \
|
||||
ORQ rBi, rT1; \
|
||||
XORQ rDo, rBa; \
|
||||
MOVQ _me(iState), rBo; \
|
||||
MOVQ _si(iState), rBu; \
|
||||
ROLQ $28, rBa; \
|
||||
XORQ rBa, rT1; \
|
||||
MOVQ rT1, _ga(oState); \
|
||||
G_RT1_RCA; \
|
||||
\
|
||||
XORQ rDe, rBo; \
|
||||
ROLQ $45, rBo; \
|
||||
MOVQ rBi, rT1; \
|
||||
ANDQ rBo, rT1; \
|
||||
XORQ rBe, rT1; \
|
||||
MOVQ rT1, _ge(oState); \
|
||||
G_RT1_RCE; \
|
||||
\
|
||||
XORQ rDi, rBu; \
|
||||
ROLQ $61, rBu; \
|
||||
MOVQ rBu, rT1; \
|
||||
ORQ rBa, rT1; \
|
||||
XORQ rBo, rT1; \
|
||||
MOVQ rT1, _go(oState); \
|
||||
\
|
||||
ANDQ rBe, rBa; \
|
||||
XORQ rBu, rBa; \
|
||||
MOVQ rBa, _gu(oState); \
|
||||
NOTQ rBu; \
|
||||
G_RBA_RCU; \
|
||||
\
|
||||
ORQ rBu, rBo; \
|
||||
XORQ rBi, rBo; \
|
||||
MOVQ rBo, _gi(oState); \
|
||||
\
|
||||
/* Result k */ \
|
||||
MOVQ _be(iState), rBa; \
|
||||
MOVQ _gi(iState), rBe; \
|
||||
MOVQ _ko(iState), rBi; \
|
||||
MOVQ _mu(iState), rBo; \
|
||||
MOVQ _sa(iState), rBu; \
|
||||
XORQ rDi, rBe; \
|
||||
ROLQ $6, rBe; \
|
||||
XORQ rDo, rBi; \
|
||||
ROLQ $25, rBi; \
|
||||
MOVQ rBe, rT1; \
|
||||
ORQ rBi, rT1; \
|
||||
XORQ rDe, rBa; \
|
||||
ROLQ $1, rBa; \
|
||||
XORQ rBa, rT1; \
|
||||
MOVQ rT1, _ka(oState); \
|
||||
K_RT1_RCA; \
|
||||
\
|
||||
XORQ rDu, rBo; \
|
||||
ROLQ $8, rBo; \
|
||||
MOVQ rBi, rT1; \
|
||||
ANDQ rBo, rT1; \
|
||||
XORQ rBe, rT1; \
|
||||
MOVQ rT1, _ke(oState); \
|
||||
K_RT1_RCE; \
|
||||
\
|
||||
XORQ rDa, rBu; \
|
||||
ROLQ $18, rBu; \
|
||||
NOTQ rBo; \
|
||||
MOVQ rBo, rT1; \
|
||||
ANDQ rBu, rT1; \
|
||||
XORQ rBi, rT1; \
|
||||
MOVQ rT1, _ki(oState); \
|
||||
\
|
||||
MOVQ rBu, rT1; \
|
||||
ORQ rBa, rT1; \
|
||||
XORQ rBo, rT1; \
|
||||
MOVQ rT1, _ko(oState); \
|
||||
\
|
||||
ANDQ rBe, rBa; \
|
||||
XORQ rBu, rBa; \
|
||||
MOVQ rBa, _ku(oState); \
|
||||
K_RBA_RCU; \
|
||||
\
|
||||
/* Result m */ \
|
||||
MOVQ _ga(iState), rBe; \
|
||||
XORQ rDa, rBe; \
|
||||
MOVQ _ke(iState), rBi; \
|
||||
ROLQ $36, rBe; \
|
||||
XORQ rDe, rBi; \
|
||||
MOVQ _bu(iState), rBa; \
|
||||
ROLQ $10, rBi; \
|
||||
MOVQ rBe, rT1; \
|
||||
MOVQ _mi(iState), rBo; \
|
||||
ANDQ rBi, rT1; \
|
||||
XORQ rDu, rBa; \
|
||||
MOVQ _so(iState), rBu; \
|
||||
ROLQ $27, rBa; \
|
||||
XORQ rBa, rT1; \
|
||||
MOVQ rT1, _ma(oState); \
|
||||
M_RT1_RCA; \
|
||||
\
|
||||
XORQ rDi, rBo; \
|
||||
ROLQ $15, rBo; \
|
||||
MOVQ rBi, rT1; \
|
||||
ORQ rBo, rT1; \
|
||||
XORQ rBe, rT1; \
|
||||
MOVQ rT1, _me(oState); \
|
||||
M_RT1_RCE; \
|
||||
\
|
||||
XORQ rDo, rBu; \
|
||||
ROLQ $56, rBu; \
|
||||
NOTQ rBo; \
|
||||
MOVQ rBo, rT1; \
|
||||
ORQ rBu, rT1; \
|
||||
XORQ rBi, rT1; \
|
||||
MOVQ rT1, _mi(oState); \
|
||||
\
|
||||
ORQ rBa, rBe; \
|
||||
XORQ rBu, rBe; \
|
||||
MOVQ rBe, _mu(oState); \
|
||||
\
|
||||
ANDQ rBa, rBu; \
|
||||
XORQ rBo, rBu; \
|
||||
MOVQ rBu, _mo(oState); \
|
||||
M_RBE_RCU; \
|
||||
\
|
||||
/* Result s */ \
|
||||
MOVQ _bi(iState), rBa; \
|
||||
MOVQ _go(iState), rBe; \
|
||||
MOVQ _ku(iState), rBi; \
|
||||
XORQ rDi, rBa; \
|
||||
MOVQ _ma(iState), rBo; \
|
||||
ROLQ $62, rBa; \
|
||||
XORQ rDo, rBe; \
|
||||
MOVQ _se(iState), rBu; \
|
||||
ROLQ $55, rBe; \
|
||||
\
|
||||
XORQ rDu, rBi; \
|
||||
MOVQ rBa, rDu; \
|
||||
XORQ rDe, rBu; \
|
||||
ROLQ $2, rBu; \
|
||||
ANDQ rBe, rDu; \
|
||||
XORQ rBu, rDu; \
|
||||
MOVQ rDu, _su(oState); \
|
||||
\
|
||||
ROLQ $39, rBi; \
|
||||
S_RDU_RCU; \
|
||||
NOTQ rBe; \
|
||||
XORQ rDa, rBo; \
|
||||
MOVQ rBe, rDa; \
|
||||
ANDQ rBi, rDa; \
|
||||
XORQ rBa, rDa; \
|
||||
MOVQ rDa, _sa(oState); \
|
||||
S_RDA_RCA; \
|
||||
\
|
||||
ROLQ $41, rBo; \
|
||||
MOVQ rBi, rDe; \
|
||||
ORQ rBo, rDe; \
|
||||
XORQ rBe, rDe; \
|
||||
MOVQ rDe, _se(oState); \
|
||||
S_RDE_RCE; \
|
||||
\
|
||||
MOVQ rBo, rDi; \
|
||||
MOVQ rBu, rDo; \
|
||||
ANDQ rBu, rDi; \
|
||||
ORQ rBa, rDo; \
|
||||
XORQ rBi, rDi; \
|
||||
XORQ rBo, rDo; \
|
||||
MOVQ rDi, _si(oState); \
|
||||
MOVQ rDo, _so(oState) \
|
||||
|
||||
// func keccakF1600(state *[25]uint64)
|
||||
TEXT ·keccakF1600(SB), 0, $200-8
|
||||
MOVQ state+0(FP), rpState
|
||||
|
||||
// Convert the user state into an internal state
|
||||
NOTQ _be(rpState)
|
||||
NOTQ _bi(rpState)
|
||||
NOTQ _go(rpState)
|
||||
NOTQ _ki(rpState)
|
||||
NOTQ _mi(rpState)
|
||||
NOTQ _sa(rpState)
|
||||
|
||||
// Execute the KeccakF permutation
|
||||
MOVQ _ba(rpState), rCa
|
||||
MOVQ _be(rpState), rCe
|
||||
MOVQ _bu(rpState), rCu
|
||||
|
||||
XORQ _ga(rpState), rCa
|
||||
XORQ _ge(rpState), rCe
|
||||
XORQ _gu(rpState), rCu
|
||||
|
||||
XORQ _ka(rpState), rCa
|
||||
XORQ _ke(rpState), rCe
|
||||
XORQ _ku(rpState), rCu
|
||||
|
||||
XORQ _ma(rpState), rCa
|
||||
XORQ _me(rpState), rCe
|
||||
XORQ _mu(rpState), rCu
|
||||
|
||||
XORQ _sa(rpState), rCa
|
||||
XORQ _se(rpState), rCe
|
||||
MOVQ _si(rpState), rDi
|
||||
MOVQ _so(rpState), rDo
|
||||
XORQ _su(rpState), rCu
|
||||
|
||||
mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
|
||||
mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
|
||||
|
||||
// Revert the internal state to the user state
|
||||
NOTQ _be(rpState)
|
||||
NOTQ _bi(rpState)
|
||||
NOTQ _go(rpState)
|
||||
NOTQ _ki(rpState)
|
||||
NOTQ _mi(rpState)
|
||||
NOTQ _sa(rpState)
|
||||
|
||||
RET
|
19
vendor/golang.org/x/crypto/sha3/register.go
generated
vendored
Normal file
19
vendor/golang.org/x/crypto/sha3/register.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.4
|
||||
// +build go1.4
|
||||
|
||||
package sha3
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
crypto.RegisterHash(crypto.SHA3_224, New224)
|
||||
crypto.RegisterHash(crypto.SHA3_256, New256)
|
||||
crypto.RegisterHash(crypto.SHA3_384, New384)
|
||||
crypto.RegisterHash(crypto.SHA3_512, New512)
|
||||
}
|
193
vendor/golang.org/x/crypto/sha3/sha3.go
generated
vendored
Normal file
193
vendor/golang.org/x/crypto/sha3/sha3.go
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// spongeDirection indicates the direction bytes are flowing through the sponge.
|
||||
type spongeDirection int
|
||||
|
||||
const (
|
||||
// spongeAbsorbing indicates that the sponge is absorbing input.
|
||||
spongeAbsorbing spongeDirection = iota
|
||||
// spongeSqueezing indicates that the sponge is being squeezed.
|
||||
spongeSqueezing
|
||||
)
|
||||
|
||||
const (
|
||||
// maxRate is the maximum size of the internal buffer. SHAKE-256
|
||||
// currently needs the largest buffer.
|
||||
maxRate = 168
|
||||
)
|
||||
|
||||
type state struct {
|
||||
// Generic sponge components.
|
||||
a [25]uint64 // main state of the hash
|
||||
buf []byte // points into storage
|
||||
rate int // the number of bytes of state to use
|
||||
|
||||
// dsbyte contains the "domain separation" bits and the first bit of
|
||||
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
|
||||
// SHA-3 and SHAKE functions by appending bitstrings to the message.
|
||||
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
|
||||
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
|
||||
// padding rule from section 5.1 is applied to pad the message to a multiple
|
||||
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
|
||||
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
|
||||
// giving 00000110b (0x06) and 00011111b (0x1f).
|
||||
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
|
||||
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
|
||||
// Extendable-Output Functions (May 2014)"
|
||||
dsbyte byte
|
||||
|
||||
storage storageBuf
|
||||
|
||||
// Specific to SHA-3 and SHAKE.
|
||||
outputLen int // the default output size in bytes
|
||||
state spongeDirection // whether the sponge is absorbing or squeezing
|
||||
}
|
||||
|
||||
// BlockSize returns the rate of sponge underlying this hash function.
|
||||
func (d *state) BlockSize() int { return d.rate }
|
||||
|
||||
// Size returns the output size of the hash function in bytes.
|
||||
func (d *state) Size() int { return d.outputLen }
|
||||
|
||||
// Reset clears the internal state by zeroing the sponge state and
|
||||
// the byte buffer, and setting Sponge.state to absorbing.
|
||||
func (d *state) Reset() {
|
||||
// Zero the permutation's state.
|
||||
for i := range d.a {
|
||||
d.a[i] = 0
|
||||
}
|
||||
d.state = spongeAbsorbing
|
||||
d.buf = d.storage.asBytes()[:0]
|
||||
}
|
||||
|
||||
func (d *state) clone() *state {
|
||||
ret := *d
|
||||
if ret.state == spongeAbsorbing {
|
||||
ret.buf = ret.storage.asBytes()[:len(ret.buf)]
|
||||
} else {
|
||||
ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate]
|
||||
}
|
||||
|
||||
return &ret
|
||||
}
|
||||
|
||||
// permute applies the KeccakF-1600 permutation. It handles
|
||||
// any input-output buffering.
|
||||
func (d *state) permute() {
|
||||
switch d.state {
|
||||
case spongeAbsorbing:
|
||||
// If we're absorbing, we need to xor the input into the state
|
||||
// before applying the permutation.
|
||||
xorIn(d, d.buf)
|
||||
d.buf = d.storage.asBytes()[:0]
|
||||
keccakF1600(&d.a)
|
||||
case spongeSqueezing:
|
||||
// If we're squeezing, we need to apply the permutatin before
|
||||
// copying more output.
|
||||
keccakF1600(&d.a)
|
||||
d.buf = d.storage.asBytes()[:d.rate]
|
||||
copyOut(d, d.buf)
|
||||
}
|
||||
}
|
||||
|
||||
// pads appends the domain separation bits in dsbyte, applies
|
||||
// the multi-bitrate 10..1 padding rule, and permutes the state.
|
||||
func (d *state) padAndPermute(dsbyte byte) {
|
||||
if d.buf == nil {
|
||||
d.buf = d.storage.asBytes()[:0]
|
||||
}
|
||||
// Pad with this instance's domain-separator bits. We know that there's
|
||||
// at least one byte of space in d.buf because, if it were full,
|
||||
// permute would have been called to empty it. dsbyte also contains the
|
||||
// first one bit for the padding. See the comment in the state struct.
|
||||
d.buf = append(d.buf, dsbyte)
|
||||
zerosStart := len(d.buf)
|
||||
d.buf = d.storage.asBytes()[:d.rate]
|
||||
for i := zerosStart; i < d.rate; i++ {
|
||||
d.buf[i] = 0
|
||||
}
|
||||
// This adds the final one bit for the padding. Because of the way that
|
||||
// bits are numbered from the LSB upwards, the final bit is the MSB of
|
||||
// the last byte.
|
||||
d.buf[d.rate-1] ^= 0x80
|
||||
// Apply the permutation
|
||||
d.permute()
|
||||
d.state = spongeSqueezing
|
||||
d.buf = d.storage.asBytes()[:d.rate]
|
||||
copyOut(d, d.buf)
|
||||
}
|
||||
|
||||
// Write absorbs more data into the hash's state. It produces an error
|
||||
// if more data is written to the ShakeHash after writing
|
||||
func (d *state) Write(p []byte) (written int, err error) {
|
||||
if d.state != spongeAbsorbing {
|
||||
panic("sha3: write to sponge after read")
|
||||
}
|
||||
if d.buf == nil {
|
||||
d.buf = d.storage.asBytes()[:0]
|
||||
}
|
||||
written = len(p)
|
||||
|
||||
for len(p) > 0 {
|
||||
if len(d.buf) == 0 && len(p) >= d.rate {
|
||||
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
|
||||
xorIn(d, p[:d.rate])
|
||||
p = p[d.rate:]
|
||||
keccakF1600(&d.a)
|
||||
} else {
|
||||
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
|
||||
todo := d.rate - len(d.buf)
|
||||
if todo > len(p) {
|
||||
todo = len(p)
|
||||
}
|
||||
d.buf = append(d.buf, p[:todo]...)
|
||||
p = p[todo:]
|
||||
|
||||
// If the sponge is full, apply the permutation.
|
||||
if len(d.buf) == d.rate {
|
||||
d.permute()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Read squeezes an arbitrary number of bytes from the sponge.
|
||||
func (d *state) Read(out []byte) (n int, err error) {
|
||||
// If we're still absorbing, pad and apply the permutation.
|
||||
if d.state == spongeAbsorbing {
|
||||
d.padAndPermute(d.dsbyte)
|
||||
}
|
||||
|
||||
n = len(out)
|
||||
|
||||
// Now, do the squeezing.
|
||||
for len(out) > 0 {
|
||||
n := copy(out, d.buf)
|
||||
d.buf = d.buf[n:]
|
||||
out = out[n:]
|
||||
|
||||
// Apply the permutation if we've squeezed the sponge dry.
|
||||
if len(d.buf) == 0 {
|
||||
d.permute()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum applies padding to the hash state and then squeezes out the desired
|
||||
// number of output bytes.
|
||||
func (d *state) Sum(in []byte) []byte {
|
||||
// Make a copy of the original hash so that caller can keep writing
|
||||
// and summing.
|
||||
dup := d.clone()
|
||||
hash := make([]byte, dup.outputLen)
|
||||
dup.Read(hash)
|
||||
return append(in, hash...)
|
||||
}
|
285
vendor/golang.org/x/crypto/sha3/sha3_s390x.go
generated
vendored
Normal file
285
vendor/golang.org/x/crypto/sha3/sha3_s390x.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
package sha3
|
||||
|
||||
// This file contains code for using the 'compute intermediate
|
||||
// message digest' (KIMD) and 'compute last message digest' (KLMD)
|
||||
// instructions to compute SHA-3 and SHAKE hashes on IBM Z.
|
||||
|
||||
import (
|
||||
"hash"
|
||||
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
// codes represent 7-bit KIMD/KLMD function codes as defined in
|
||||
// the Principles of Operation.
|
||||
type code uint64
|
||||
|
||||
const (
|
||||
// function codes for KIMD/KLMD
|
||||
sha3_224 code = 32
|
||||
sha3_256 = 33
|
||||
sha3_384 = 34
|
||||
sha3_512 = 35
|
||||
shake_128 = 36
|
||||
shake_256 = 37
|
||||
nopad = 0x100
|
||||
)
|
||||
|
||||
// kimd is a wrapper for the 'compute intermediate message digest' instruction.
|
||||
// src must be a multiple of the rate for the given function code.
|
||||
//go:noescape
|
||||
func kimd(function code, chain *[200]byte, src []byte)
|
||||
|
||||
// klmd is a wrapper for the 'compute last message digest' instruction.
|
||||
// src padding is handled by the instruction.
|
||||
//go:noescape
|
||||
func klmd(function code, chain *[200]byte, dst, src []byte)
|
||||
|
||||
type asmState struct {
|
||||
a [200]byte // 1600 bit state
|
||||
buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
|
||||
rate int // equivalent to block size
|
||||
storage [3072]byte // underlying storage for buf
|
||||
outputLen int // output length if fixed, 0 if not
|
||||
function code // KIMD/KLMD function code
|
||||
state spongeDirection // whether the sponge is absorbing or squeezing
|
||||
}
|
||||
|
||||
func newAsmState(function code) *asmState {
|
||||
var s asmState
|
||||
s.function = function
|
||||
switch function {
|
||||
case sha3_224:
|
||||
s.rate = 144
|
||||
s.outputLen = 28
|
||||
case sha3_256:
|
||||
s.rate = 136
|
||||
s.outputLen = 32
|
||||
case sha3_384:
|
||||
s.rate = 104
|
||||
s.outputLen = 48
|
||||
case sha3_512:
|
||||
s.rate = 72
|
||||
s.outputLen = 64
|
||||
case shake_128:
|
||||
s.rate = 168
|
||||
case shake_256:
|
||||
s.rate = 136
|
||||
default:
|
||||
panic("sha3: unrecognized function code")
|
||||
}
|
||||
|
||||
// limit s.buf size to a multiple of s.rate
|
||||
s.resetBuf()
|
||||
return &s
|
||||
}
|
||||
|
||||
func (s *asmState) clone() *asmState {
|
||||
c := *s
|
||||
c.buf = c.storage[:len(s.buf):cap(s.buf)]
|
||||
return &c
|
||||
}
|
||||
|
||||
// copyIntoBuf copies b into buf. It will panic if there is not enough space to
|
||||
// store all of b.
|
||||
func (s *asmState) copyIntoBuf(b []byte) {
|
||||
bufLen := len(s.buf)
|
||||
s.buf = s.buf[:len(s.buf)+len(b)]
|
||||
copy(s.buf[bufLen:], b)
|
||||
}
|
||||
|
||||
// resetBuf points buf at storage, sets the length to 0 and sets cap to be a
|
||||
// multiple of the rate.
|
||||
func (s *asmState) resetBuf() {
|
||||
max := (cap(s.storage) / s.rate) * s.rate
|
||||
s.buf = s.storage[:0:max]
|
||||
}
|
||||
|
||||
// Write (via the embedded io.Writer interface) adds more data to the running hash.
|
||||
// It never returns an error.
|
||||
func (s *asmState) Write(b []byte) (int, error) {
|
||||
if s.state != spongeAbsorbing {
|
||||
panic("sha3: write to sponge after read")
|
||||
}
|
||||
length := len(b)
|
||||
for len(b) > 0 {
|
||||
if len(s.buf) == 0 && len(b) >= cap(s.buf) {
|
||||
// Hash the data directly and push any remaining bytes
|
||||
// into the buffer.
|
||||
remainder := len(b) % s.rate
|
||||
kimd(s.function, &s.a, b[:len(b)-remainder])
|
||||
if remainder != 0 {
|
||||
s.copyIntoBuf(b[len(b)-remainder:])
|
||||
}
|
||||
return length, nil
|
||||
}
|
||||
|
||||
if len(s.buf) == cap(s.buf) {
|
||||
// flush the buffer
|
||||
kimd(s.function, &s.a, s.buf)
|
||||
s.buf = s.buf[:0]
|
||||
}
|
||||
|
||||
// copy as much as we can into the buffer
|
||||
n := len(b)
|
||||
if len(b) > cap(s.buf)-len(s.buf) {
|
||||
n = cap(s.buf) - len(s.buf)
|
||||
}
|
||||
s.copyIntoBuf(b[:n])
|
||||
b = b[n:]
|
||||
}
|
||||
return length, nil
|
||||
}
|
||||
|
||||
// Read squeezes an arbitrary number of bytes from the sponge.
|
||||
func (s *asmState) Read(out []byte) (n int, err error) {
|
||||
n = len(out)
|
||||
|
||||
// need to pad if we were absorbing
|
||||
if s.state == spongeAbsorbing {
|
||||
s.state = spongeSqueezing
|
||||
|
||||
// write hash directly into out if possible
|
||||
if len(out)%s.rate == 0 {
|
||||
klmd(s.function, &s.a, out, s.buf) // len(out) may be 0
|
||||
s.buf = s.buf[:0]
|
||||
return
|
||||
}
|
||||
|
||||
// write hash into buffer
|
||||
max := cap(s.buf)
|
||||
if max > len(out) {
|
||||
max = (len(out)/s.rate)*s.rate + s.rate
|
||||
}
|
||||
klmd(s.function, &s.a, s.buf[:max], s.buf)
|
||||
s.buf = s.buf[:max]
|
||||
}
|
||||
|
||||
for len(out) > 0 {
|
||||
// flush the buffer
|
||||
if len(s.buf) != 0 {
|
||||
c := copy(out, s.buf)
|
||||
out = out[c:]
|
||||
s.buf = s.buf[c:]
|
||||
continue
|
||||
}
|
||||
|
||||
// write hash directly into out if possible
|
||||
if len(out)%s.rate == 0 {
|
||||
klmd(s.function|nopad, &s.a, out, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// write hash into buffer
|
||||
s.resetBuf()
|
||||
if cap(s.buf) > len(out) {
|
||||
s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate]
|
||||
}
|
||||
klmd(s.function|nopad, &s.a, s.buf, nil)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (s *asmState) Sum(b []byte) []byte {
|
||||
if s.outputLen == 0 {
|
||||
panic("sha3: cannot call Sum on SHAKE functions")
|
||||
}
|
||||
|
||||
// Copy the state to preserve the original.
|
||||
a := s.a
|
||||
|
||||
// Hash the buffer. Note that we don't clear it because we
|
||||
// aren't updating the state.
|
||||
klmd(s.function, &a, nil, s.buf)
|
||||
return append(b, a[:s.outputLen]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (s *asmState) Reset() {
|
||||
for i := range s.a {
|
||||
s.a[i] = 0
|
||||
}
|
||||
s.resetBuf()
|
||||
s.state = spongeAbsorbing
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (s *asmState) Size() int {
|
||||
return s.outputLen
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (s *asmState) BlockSize() int {
|
||||
return s.rate
|
||||
}
|
||||
|
||||
// Clone returns a copy of the ShakeHash in its current state.
|
||||
func (s *asmState) Clone() ShakeHash {
|
||||
return s.clone()
|
||||
}
|
||||
|
||||
// new224Asm returns an assembly implementation of SHA3-224 if available,
|
||||
// otherwise it returns nil.
|
||||
func new224Asm() hash.Hash {
|
||||
if cpu.S390X.HasSHA3 {
|
||||
return newAsmState(sha3_224)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// new256Asm returns an assembly implementation of SHA3-256 if available,
|
||||
// otherwise it returns nil.
|
||||
func new256Asm() hash.Hash {
|
||||
if cpu.S390X.HasSHA3 {
|
||||
return newAsmState(sha3_256)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// new384Asm returns an assembly implementation of SHA3-384 if available,
|
||||
// otherwise it returns nil.
|
||||
func new384Asm() hash.Hash {
|
||||
if cpu.S390X.HasSHA3 {
|
||||
return newAsmState(sha3_384)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// new512Asm returns an assembly implementation of SHA3-512 if available,
|
||||
// otherwise it returns nil.
|
||||
func new512Asm() hash.Hash {
|
||||
if cpu.S390X.HasSHA3 {
|
||||
return newAsmState(sha3_512)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
|
||||
// otherwise it returns nil.
|
||||
func newShake128Asm() ShakeHash {
|
||||
if cpu.S390X.HasSHA3 {
|
||||
return newAsmState(shake_128)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
|
||||
// otherwise it returns nil.
|
||||
func newShake256Asm() ShakeHash {
|
||||
if cpu.S390X.HasSHA3 {
|
||||
return newAsmState(shake_256)
|
||||
}
|
||||
return nil
|
||||
}
|
34
vendor/golang.org/x/crypto/sha3/sha3_s390x.s
generated
vendored
Normal file
34
vendor/golang.org/x/crypto/sha3/sha3_s390x.s
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
// +build gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func kimd(function code, chain *[200]byte, src []byte)
|
||||
TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
|
||||
MOVD function+0(FP), R0
|
||||
MOVD chain+8(FP), R1
|
||||
LMG src+16(FP), R2, R3 // R2=base, R3=len
|
||||
|
||||
continue:
|
||||
WORD $0xB93E0002 // KIMD --, R2
|
||||
BVS continue // continue if interrupted
|
||||
MOVD $0, R0 // reset R0 for pre-go1.8 compilers
|
||||
RET
|
||||
|
||||
// func klmd(function code, chain *[200]byte, dst, src []byte)
|
||||
TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
|
||||
// TODO: SHAKE support
|
||||
MOVD function+0(FP), R0
|
||||
MOVD chain+8(FP), R1
|
||||
LMG dst+16(FP), R2, R3 // R2=base, R3=len
|
||||
LMG src+40(FP), R4, R5 // R4=base, R5=len
|
||||
|
||||
continue:
|
||||
WORD $0xB93F0024 // KLMD R2, R4
|
||||
BVS continue // continue if interrupted
|
||||
MOVD $0, R0 // reset R0 for pre-go1.8 compilers
|
||||
RET
|
173
vendor/golang.org/x/crypto/sha3/shake.go
generated
vendored
Normal file
173
vendor/golang.org/x/crypto/sha3/shake.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// This file defines the ShakeHash interface, and provides
|
||||
// functions for creating SHAKE and cSHAKE instances, as well as utility
|
||||
// functions for hashing bytes to arbitrary-length output.
|
||||
//
|
||||
//
|
||||
// SHAKE implementation is based on FIPS PUB 202 [1]
|
||||
// cSHAKE implementations is based on NIST SP 800-185 [2]
|
||||
//
|
||||
// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
|
||||
// [2] https://doi.org/10.6028/NIST.SP.800-185
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ShakeHash defines the interface to hash functions that
|
||||
// support arbitrary-length output.
|
||||
type ShakeHash interface {
|
||||
// Write absorbs more data into the hash's state. It panics if input is
|
||||
// written to it after output has been read from it.
|
||||
io.Writer
|
||||
|
||||
// Read reads more output from the hash; reading affects the hash's
|
||||
// state. (ShakeHash.Read is thus very different from Hash.Sum)
|
||||
// It never returns an error.
|
||||
io.Reader
|
||||
|
||||
// Clone returns a copy of the ShakeHash in its current state.
|
||||
Clone() ShakeHash
|
||||
|
||||
// Reset resets the ShakeHash to its initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// cSHAKE specific context
|
||||
type cshakeState struct {
|
||||
*state // SHA-3 state context and Read/Write operations
|
||||
|
||||
// initBlock is the cSHAKE specific initialization set of bytes. It is initialized
|
||||
// by newCShake function and stores concatenation of N followed by S, encoded
|
||||
// by the method specified in 3.3 of [1].
|
||||
// It is stored here in order for Reset() to be able to put context into
|
||||
// initial state.
|
||||
initBlock []byte
|
||||
}
|
||||
|
||||
// Consts for configuring initial SHA-3 state
|
||||
const (
|
||||
dsbyteShake = 0x1f
|
||||
dsbyteCShake = 0x04
|
||||
rate128 = 168
|
||||
rate256 = 136
|
||||
)
|
||||
|
||||
func bytepad(input []byte, w int) []byte {
|
||||
// leftEncode always returns max 9 bytes
|
||||
buf := make([]byte, 0, 9+len(input)+w)
|
||||
buf = append(buf, leftEncode(uint64(w))...)
|
||||
buf = append(buf, input...)
|
||||
padlen := w - (len(buf) % w)
|
||||
return append(buf, make([]byte, padlen)...)
|
||||
}
|
||||
|
||||
func leftEncode(value uint64) []byte {
|
||||
var b [9]byte
|
||||
binary.BigEndian.PutUint64(b[1:], value)
|
||||
// Trim all but last leading zero bytes
|
||||
i := byte(1)
|
||||
for i < 8 && b[i] == 0 {
|
||||
i++
|
||||
}
|
||||
// Prepend number of encoded bytes
|
||||
b[i-1] = 9 - i
|
||||
return b[i-1:]
|
||||
}
|
||||
|
||||
func newCShake(N, S []byte, rate int, dsbyte byte) ShakeHash {
|
||||
c := cshakeState{state: &state{rate: rate, dsbyte: dsbyte}}
|
||||
|
||||
// leftEncode returns max 9 bytes
|
||||
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
|
||||
c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
|
||||
c.initBlock = append(c.initBlock, N...)
|
||||
c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
|
||||
c.initBlock = append(c.initBlock, S...)
|
||||
c.Write(bytepad(c.initBlock, c.rate))
|
||||
return &c
|
||||
}
|
||||
|
||||
// Reset resets the hash to initial state.
|
||||
func (c *cshakeState) Reset() {
|
||||
c.state.Reset()
|
||||
c.Write(bytepad(c.initBlock, c.rate))
|
||||
}
|
||||
|
||||
// Clone returns copy of a cSHAKE context within its current state.
|
||||
func (c *cshakeState) Clone() ShakeHash {
|
||||
b := make([]byte, len(c.initBlock))
|
||||
copy(b, c.initBlock)
|
||||
return &cshakeState{state: c.clone(), initBlock: b}
|
||||
}
|
||||
|
||||
// Clone returns copy of SHAKE context within its current state.
|
||||
func (c *state) Clone() ShakeHash {
|
||||
return c.clone()
|
||||
}
|
||||
|
||||
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
|
||||
// Its generic security strength is 128 bits against all attacks if at
|
||||
// least 32 bytes of its output are used.
|
||||
func NewShake128() ShakeHash {
|
||||
if h := newShake128Asm(); h != nil {
|
||||
return h
|
||||
}
|
||||
return &state{rate: rate128, dsbyte: dsbyteShake}
|
||||
}
|
||||
|
||||
// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
|
||||
// Its generic security strength is 256 bits against all attacks if
|
||||
// at least 64 bytes of its output are used.
|
||||
func NewShake256() ShakeHash {
|
||||
if h := newShake256Asm(); h != nil {
|
||||
return h
|
||||
}
|
||||
return &state{rate: rate256, dsbyte: dsbyteShake}
|
||||
}
|
||||
|
||||
// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
|
||||
// a customizable variant of SHAKE128.
|
||||
// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
|
||||
// desired. S is a customization byte string used for domain separation - two cSHAKE
|
||||
// computations on same input with different S yield unrelated outputs.
|
||||
// When N and S are both empty, this is equivalent to NewShake128.
|
||||
func NewCShake128(N, S []byte) ShakeHash {
|
||||
if len(N) == 0 && len(S) == 0 {
|
||||
return NewShake128()
|
||||
}
|
||||
return newCShake(N, S, rate128, dsbyteCShake)
|
||||
}
|
||||
|
||||
// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
|
||||
// a customizable variant of SHAKE256.
|
||||
// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
|
||||
// desired. S is a customization byte string used for domain separation - two cSHAKE
|
||||
// computations on same input with different S yield unrelated outputs.
|
||||
// When N and S are both empty, this is equivalent to NewShake256.
|
||||
func NewCShake256(N, S []byte) ShakeHash {
|
||||
if len(N) == 0 && len(S) == 0 {
|
||||
return NewShake256()
|
||||
}
|
||||
return newCShake(N, S, rate256, dsbyteCShake)
|
||||
}
|
||||
|
||||
// ShakeSum128 writes an arbitrary-length digest of data into hash.
|
||||
func ShakeSum128(hash, data []byte) {
|
||||
h := NewShake128()
|
||||
h.Write(data)
|
||||
h.Read(hash)
|
||||
}
|
||||
|
||||
// ShakeSum256 writes an arbitrary-length digest of data into hash.
|
||||
func ShakeSum256(hash, data []byte) {
|
||||
h := NewShake256()
|
||||
h.Write(data)
|
||||
h.Read(hash)
|
||||
}
|
20
vendor/golang.org/x/crypto/sha3/shake_generic.go
generated
vendored
Normal file
20
vendor/golang.org/x/crypto/sha3/shake_generic.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !gc || purego || !s390x
|
||||
// +build !gc purego !s390x
|
||||
|
||||
package sha3
|
||||
|
||||
// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
|
||||
// otherwise it returns nil.
|
||||
func newShake128Asm() ShakeHash {
|
||||
return nil
|
||||
}
|
||||
|
||||
// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
|
||||
// otherwise it returns nil.
|
||||
func newShake256Asm() ShakeHash {
|
||||
return nil
|
||||
}
|
24
vendor/golang.org/x/crypto/sha3/xor.go
generated
vendored
Normal file
24
vendor/golang.org/x/crypto/sha3/xor.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (!amd64 && !386 && !ppc64le) || purego
|
||||
// +build !amd64,!386,!ppc64le purego
|
||||
|
||||
package sha3
|
||||
|
||||
// A storageBuf is an aligned array of maxRate bytes.
|
||||
type storageBuf [maxRate]byte
|
||||
|
||||
func (b *storageBuf) asBytes() *[maxRate]byte {
|
||||
return (*[maxRate]byte)(b)
|
||||
}
|
||||
|
||||
var (
|
||||
xorIn = xorInGeneric
|
||||
copyOut = copyOutGeneric
|
||||
xorInUnaligned = xorInGeneric
|
||||
copyOutUnaligned = copyOutGeneric
|
||||
)
|
||||
|
||||
const xorImplementationUnaligned = "generic"
|
28
vendor/golang.org/x/crypto/sha3/xor_generic.go
generated
vendored
Normal file
28
vendor/golang.org/x/crypto/sha3/xor_generic.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// xorInGeneric xors the bytes in buf into the state; it
|
||||
// makes no non-portable assumptions about memory layout
|
||||
// or alignment.
|
||||
func xorInGeneric(d *state, buf []byte) {
|
||||
n := len(buf) / 8
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
a := binary.LittleEndian.Uint64(buf)
|
||||
d.a[i] ^= a
|
||||
buf = buf[8:]
|
||||
}
|
||||
}
|
||||
|
||||
// copyOutGeneric copies uint64s to a byte buffer.
|
||||
func copyOutGeneric(d *state, b []byte) {
|
||||
for i := 0; len(b) >= 8; i++ {
|
||||
binary.LittleEndian.PutUint64(b, d.a[i])
|
||||
b = b[8:]
|
||||
}
|
||||
}
|
68
vendor/golang.org/x/crypto/sha3/xor_unaligned.go
generated
vendored
Normal file
68
vendor/golang.org/x/crypto/sha3/xor_unaligned.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (amd64 || 386 || ppc64le) && !purego
|
||||
// +build amd64 386 ppc64le
|
||||
// +build !purego
|
||||
|
||||
package sha3
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// A storageBuf is an aligned array of maxRate bytes.
|
||||
type storageBuf [maxRate / 8]uint64
|
||||
|
||||
func (b *storageBuf) asBytes() *[maxRate]byte {
|
||||
return (*[maxRate]byte)(unsafe.Pointer(b))
|
||||
}
|
||||
|
||||
// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a
|
||||
// XOR buf.
|
||||
func xorInUnaligned(d *state, buf []byte) {
|
||||
n := len(buf)
|
||||
bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8]
|
||||
if n >= 72 {
|
||||
d.a[0] ^= bw[0]
|
||||
d.a[1] ^= bw[1]
|
||||
d.a[2] ^= bw[2]
|
||||
d.a[3] ^= bw[3]
|
||||
d.a[4] ^= bw[4]
|
||||
d.a[5] ^= bw[5]
|
||||
d.a[6] ^= bw[6]
|
||||
d.a[7] ^= bw[7]
|
||||
d.a[8] ^= bw[8]
|
||||
}
|
||||
if n >= 104 {
|
||||
d.a[9] ^= bw[9]
|
||||
d.a[10] ^= bw[10]
|
||||
d.a[11] ^= bw[11]
|
||||
d.a[12] ^= bw[12]
|
||||
}
|
||||
if n >= 136 {
|
||||
d.a[13] ^= bw[13]
|
||||
d.a[14] ^= bw[14]
|
||||
d.a[15] ^= bw[15]
|
||||
d.a[16] ^= bw[16]
|
||||
}
|
||||
if n >= 144 {
|
||||
d.a[17] ^= bw[17]
|
||||
}
|
||||
if n >= 168 {
|
||||
d.a[18] ^= bw[18]
|
||||
d.a[19] ^= bw[19]
|
||||
d.a[20] ^= bw[20]
|
||||
}
|
||||
}
|
||||
|
||||
func copyOutUnaligned(d *state, buf []byte) {
|
||||
ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
|
||||
copy(buf, ab[:])
|
||||
}
|
||||
|
||||
var (
|
||||
xorIn = xorInUnaligned
|
||||
copyOut = copyOutUnaligned
|
||||
)
|
||||
|
||||
const xorImplementationUnaligned = "unaligned"
|
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
770
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
Normal file
770
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
Normal file
@ -0,0 +1,770 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.10
|
||||
// +build go1.10
|
||||
|
||||
// Package idna implements IDNA2008 using the compatibility processing
|
||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||
// deal with the transition from IDNA2003.
|
||||
//
|
||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
||||
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||
// differences between these two standards.
|
||||
package idna // import "golang.org/x/net/idna"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/secure/bidirule"
|
||||
"golang.org/x/text/unicode/bidi"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||
// evaluated string as lookup.
|
||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||
// Other strategies are also viable, though:
|
||||
// Option 1) Return an empty string in case of error, but allow the user to
|
||||
// specify explicitly which errors to ignore.
|
||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||
// string, otherwise return the empty string in case of error.
|
||||
// Option 3) Option 1 and 2.
|
||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||
// needed, and document that the return string may not be empty in case of
|
||||
// error in the future.
|
||||
// I think Option 1 is best, but it is quite opinionated.
|
||||
|
||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||
func ToASCII(s string) (string, error) {
|
||||
return Punycode.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||
func ToUnicode(s string) (string, error) {
|
||||
return Punycode.process(s, false)
|
||||
}
|
||||
|
||||
// An Option configures a Profile at creation time.
|
||||
type Option func(*options)
|
||||
|
||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||
// compatibility. It is used by most browsers when resolving domain names. This
|
||||
// option is only meaningful if combined with MapForLookup.
|
||||
func Transitional(transitional bool) Option {
|
||||
return func(o *options) { o.transitional = true }
|
||||
}
|
||||
|
||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||
// are longer than allowed by the RFC.
|
||||
//
|
||||
// This option corresponds to the VerifyDnsLength flag in UTS #46.
|
||||
func VerifyDNSLength(verify bool) Option {
|
||||
return func(o *options) { o.verifyDNSLength = verify }
|
||||
}
|
||||
|
||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||
func RemoveLeadingDots(remove bool) Option {
|
||||
return func(o *options) { o.removeLeadingDots = remove }
|
||||
}
|
||||
|
||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||
// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
|
||||
// in UTS #46.
|
||||
func ValidateLabels(enable bool) Option {
|
||||
return func(o *options) {
|
||||
// Don't override existing mappings, but set one that at least checks
|
||||
// normalization if it is not set.
|
||||
if o.mapping == nil && enable {
|
||||
o.mapping = normalize
|
||||
}
|
||||
o.trie = trie
|
||||
o.checkJoiners = enable
|
||||
o.checkHyphens = enable
|
||||
if enable {
|
||||
o.fromPuny = validateFromPunycode
|
||||
} else {
|
||||
o.fromPuny = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckHyphens sets whether to check for correct use of hyphens ('-') in
|
||||
// labels. Most web browsers do not have this option set, since labels such as
|
||||
// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
|
||||
//
|
||||
// This option corresponds to the CheckHyphens flag in UTS #46.
|
||||
func CheckHyphens(enable bool) Option {
|
||||
return func(o *options) { o.checkHyphens = enable }
|
||||
}
|
||||
|
||||
// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
|
||||
// A of RFC 5892, concerning the use of joiner runes.
|
||||
//
|
||||
// This option corresponds to the CheckJoiners flag in UTS #46.
|
||||
func CheckJoiners(enable bool) Option {
|
||||
return func(o *options) {
|
||||
o.trie = trie
|
||||
o.checkJoiners = enable
|
||||
}
|
||||
}
|
||||
|
||||
// StrictDomainName limits the set of permissible ASCII characters to those
|
||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
|
||||
// but is only useful if ValidateLabels is set.
|
||||
//
|
||||
// This option is useful, for instance, for browsers that allow characters
|
||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||
// http://www.rfc-editor.org/std/std3.txt for more details.
|
||||
//
|
||||
// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
|
||||
func StrictDomainName(use bool) Option {
|
||||
return func(o *options) { o.useSTD3Rules = use }
|
||||
}
|
||||
|
||||
// NOTE: the following options pull in tables. The tables should not be linked
|
||||
// in as long as the options are not used.
|
||||
|
||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||
// that relies on proper validation of labels should include this rule.
|
||||
//
|
||||
// This option corresponds to the CheckBidi flag in UTS #46.
|
||||
func BidiRule() Option {
|
||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||
}
|
||||
|
||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||
func ValidateForRegistration() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateRegistration
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
VerifyDNSLength(true)(o)
|
||||
BidiRule()(o)
|
||||
}
|
||||
}
|
||||
|
||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||
// transformed for domain name lookup according to the requirements set out in
|
||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||
// to add this check.
|
||||
//
|
||||
// The mappings include normalization and mapping case, width and other
|
||||
// compatibility mappings.
|
||||
func MapForLookup() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateAndMap
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
transitional bool
|
||||
useSTD3Rules bool
|
||||
checkHyphens bool
|
||||
checkJoiners bool
|
||||
verifyDNSLength bool
|
||||
removeLeadingDots bool
|
||||
|
||||
trie *idnaTrie
|
||||
|
||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||
fromPuny func(p *Profile, s string) error
|
||||
|
||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||
mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
|
||||
|
||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||
// defined in RFC 5893.
|
||||
bidirule func(s string) bool
|
||||
}
|
||||
|
||||
// A Profile defines the configuration of an IDNA mapper.
|
||||
type Profile struct {
|
||||
options
|
||||
}
|
||||
|
||||
func apply(o *options, opts []Option) {
|
||||
for _, f := range opts {
|
||||
f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Profile.
|
||||
//
|
||||
// With no options, the returned Profile is the most permissive and equals the
|
||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||
// for lookup and registration purposes respectively, which can be tailored by
|
||||
// adding more fine-grained options, where later options override earlier
|
||||
// options.
|
||||
func New(o ...Option) *Profile {
|
||||
p := &Profile{}
|
||||
apply(&p.options, o)
|
||||
return p
|
||||
}
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToASCII(s string) (string, error) {
|
||||
return p.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||
pp := *p
|
||||
pp.transitional = false
|
||||
return pp.process(s, false)
|
||||
}
|
||||
|
||||
// String reports a string with a description of the profile for debugging
|
||||
// purposes. The string format may change with different versions.
|
||||
func (p *Profile) String() string {
|
||||
s := ""
|
||||
if p.transitional {
|
||||
s = "Transitional"
|
||||
} else {
|
||||
s = "NonTransitional"
|
||||
}
|
||||
if p.useSTD3Rules {
|
||||
s += ":UseSTD3Rules"
|
||||
}
|
||||
if p.checkHyphens {
|
||||
s += ":CheckHyphens"
|
||||
}
|
||||
if p.checkJoiners {
|
||||
s += ":CheckJoiners"
|
||||
}
|
||||
if p.verifyDNSLength {
|
||||
s += ":VerifyDNSLength"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||
// of validation.
|
||||
Punycode *Profile = punycode
|
||||
|
||||
// Lookup is the recommended profile for looking up domain names, according
|
||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||
// change over time.
|
||||
Lookup *Profile = lookup
|
||||
|
||||
// Display is the recommended profile for displaying domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Display *Profile = display
|
||||
|
||||
// Registration is the recommended profile for checking whether a given
|
||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||
Registration *Profile = registration
|
||||
|
||||
punycode = &Profile{}
|
||||
lookup = &Profile{options{
|
||||
transitional: true,
|
||||
useSTD3Rules: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
display = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
registration = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
verifyDNSLength: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateRegistration,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
|
||||
// TODO: profiles
|
||||
// Register: recommended for approving domain names: don't do any mappings
|
||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||
)
|
||||
|
||||
type labelError struct{ label, code_ string }
|
||||
|
||||
func (e labelError) code() string { return e.code_ }
|
||||
func (e labelError) Error() string {
|
||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||
}
|
||||
|
||||
type runeError rune
|
||||
|
||||
func (e runeError) code() string { return "P1" }
|
||||
func (e runeError) Error() string {
|
||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||
}
|
||||
|
||||
// process implements the algorithm described in section 4 of UTS #46,
|
||||
// see https://www.unicode.org/reports/tr46.
|
||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||
var err error
|
||||
var isBidi bool
|
||||
if p.mapping != nil {
|
||||
s, isBidi, err = p.mapping(p, s)
|
||||
}
|
||||
// Remove leading empty labels.
|
||||
if p.removeLeadingDots {
|
||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||
}
|
||||
}
|
||||
// TODO: allow for a quick check of the tables data.
|
||||
// It seems like we should only create this error on ToASCII, but the
|
||||
// UTS 46 conformance tests suggests we should always check this.
|
||||
if err == nil && p.verifyDNSLength && s == "" {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
labels := labelIter{orig: s}
|
||||
for ; !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if label == "" {
|
||||
// Empty labels are not okay. The label iterator skips the last
|
||||
// label if it is empty.
|
||||
if err == nil && p.verifyDNSLength {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(label, acePrefix) {
|
||||
u, err2 := decode(label[len(acePrefix):])
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
// Spec says keep the old label.
|
||||
continue
|
||||
}
|
||||
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
|
||||
labels.set(u)
|
||||
if err == nil && p.fromPuny != nil {
|
||||
err = p.fromPuny(p, u)
|
||||
}
|
||||
if err == nil {
|
||||
// This should be called on NonTransitional, according to the
|
||||
// spec, but that currently does not have any effect. Use the
|
||||
// original profile to preserve options.
|
||||
err = p.validateLabel(u)
|
||||
}
|
||||
} else if err == nil {
|
||||
err = p.validateLabel(label)
|
||||
}
|
||||
}
|
||||
if isBidi && p.bidirule != nil && err == nil {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
if !p.bidirule(labels.label()) {
|
||||
err = &labelError{s, "B"}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if toASCII {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if !ascii(label) {
|
||||
a, err2 := encode(acePrefix, label)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
label = a
|
||||
labels.set(a)
|
||||
}
|
||||
n := len(label)
|
||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||
err = &labelError{label, "A4"}
|
||||
}
|
||||
}
|
||||
}
|
||||
s = labels.result()
|
||||
if toASCII && p.verifyDNSLength && err == nil {
|
||||
// Compute the length of the domain name minus the root label and its dot.
|
||||
n := len(s)
|
||||
if n > 0 && s[n-1] == '.' {
|
||||
n--
|
||||
}
|
||||
if len(s) < 1 || n > 253 {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
|
||||
// TODO: consider first doing a quick check to see if any of these checks
|
||||
// need to be done. This will make it slower in the general case, but
|
||||
// faster in the common case.
|
||||
mapped = norm.NFC.String(s)
|
||||
isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
|
||||
return mapped, isBidi, nil
|
||||
}
|
||||
|
||||
func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
|
||||
// TODO: filter need for normalization in loop below.
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return s, false, &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
return s, bidi, runeError(utf8.RuneError)
|
||||
}
|
||||
bidi = bidi || info(v).isBidi(s[i:])
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||
// for strict conformance to IDNA2008.
|
||||
case valid, deviation:
|
||||
case disallowed, mapped, unknown, ignored:
|
||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||
return s, bidi, runeError(r)
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return s, bidi, nil
|
||||
}
|
||||
|
||||
func (c info) isBidi(s string) bool {
|
||||
if !c.isMapped() {
|
||||
return c&attributesMask == rtl
|
||||
}
|
||||
// TODO: also store bidi info for mapped data. This is possible, but a bit
|
||||
// cumbersome and not for the common case.
|
||||
p, _ := bidi.LookupString(s)
|
||||
switch p.Class() {
|
||||
case bidi.R, bidi.AL, bidi.AN:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
|
||||
var (
|
||||
b []byte
|
||||
k int
|
||||
)
|
||||
// combinedInfoBits contains the or-ed bits of all runes. We use this
|
||||
// to derive the mayNeedNorm bit later. This may trigger normalization
|
||||
// overeagerly, but it will not do so in the common case. The end result
|
||||
// is another 10% saving on BenchmarkProfile for the common case.
|
||||
var combinedInfoBits info
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
b = append(b, s[k:i]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
k = len(s)
|
||||
if err == nil {
|
||||
err = runeError(utf8.RuneError)
|
||||
}
|
||||
break
|
||||
}
|
||||
combinedInfoBits |= info(v)
|
||||
bidi = bidi || info(v).isBidi(s[i:])
|
||||
start := i
|
||||
i += sz
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
case valid:
|
||||
continue
|
||||
case disallowed:
|
||||
if err == nil {
|
||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||
err = runeError(r)
|
||||
}
|
||||
continue
|
||||
case mapped, deviation:
|
||||
b = append(b, s[k:start]...)
|
||||
b = info(v).appendMapping(b, s[start:i])
|
||||
case ignored:
|
||||
b = append(b, s[k:start]...)
|
||||
// drop the rune
|
||||
case unknown:
|
||||
b = append(b, s[k:start]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
}
|
||||
k = i
|
||||
}
|
||||
if k == 0 {
|
||||
// No changes so far.
|
||||
if combinedInfoBits&mayNeedNorm != 0 {
|
||||
s = norm.NFC.String(s)
|
||||
}
|
||||
} else {
|
||||
b = append(b, s[k:]...)
|
||||
if norm.NFC.QuickSpan(b) != len(b) {
|
||||
b = norm.NFC.Bytes(b)
|
||||
}
|
||||
// TODO: the punycode converters require strings as input.
|
||||
s = string(b)
|
||||
}
|
||||
return s, bidi, err
|
||||
}
|
||||
|
||||
// A labelIter allows iterating over domain name labels.
|
||||
type labelIter struct {
|
||||
orig string
|
||||
slice []string
|
||||
curStart int
|
||||
curEnd int
|
||||
i int
|
||||
}
|
||||
|
||||
func (l *labelIter) reset() {
|
||||
l.curStart = 0
|
||||
l.curEnd = 0
|
||||
l.i = 0
|
||||
}
|
||||
|
||||
func (l *labelIter) done() bool {
|
||||
return l.curStart >= len(l.orig)
|
||||
}
|
||||
|
||||
func (l *labelIter) result() string {
|
||||
if l.slice != nil {
|
||||
return strings.Join(l.slice, ".")
|
||||
}
|
||||
return l.orig
|
||||
}
|
||||
|
||||
func (l *labelIter) label() string {
|
||||
if l.slice != nil {
|
||||
return l.slice[l.i]
|
||||
}
|
||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||
l.curEnd = l.curStart + p
|
||||
if p == -1 {
|
||||
l.curEnd = len(l.orig)
|
||||
}
|
||||
return l.orig[l.curStart:l.curEnd]
|
||||
}
|
||||
|
||||
// next sets the value to the next label. It skips the last label if it is empty.
|
||||
func (l *labelIter) next() {
|
||||
l.i++
|
||||
if l.slice != nil {
|
||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
} else {
|
||||
l.curStart = l.curEnd + 1
|
||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *labelIter) set(s string) {
|
||||
if l.slice == nil {
|
||||
l.slice = strings.Split(l.orig, ".")
|
||||
}
|
||||
l.slice[l.i] = s
|
||||
}
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
func (p *Profile) simplify(cat category) category {
|
||||
switch cat {
|
||||
case disallowedSTD3Mapped:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = mapped
|
||||
}
|
||||
case disallowedSTD3Valid:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = valid
|
||||
}
|
||||
case deviation:
|
||||
if !p.transitional {
|
||||
cat = valid
|
||||
}
|
||||
case validNV8, validXV8:
|
||||
// TODO: handle V2008
|
||||
cat = valid
|
||||
}
|
||||
return cat
|
||||
}
|
||||
|
||||
func validateFromPunycode(p *Profile, s string) error {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return &labelError{s, "V1"}
|
||||
}
|
||||
// TODO: detect whether string may have to be normalized in the following
|
||||
// loop.
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
return runeError(utf8.RuneError)
|
||||
}
|
||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||
return &labelError{s, "V6"}
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
zwnj = "\u200c"
|
||||
zwj = "\u200d"
|
||||
)
|
||||
|
||||
type joinState int8
|
||||
|
||||
const (
|
||||
stateStart joinState = iota
|
||||
stateVirama
|
||||
stateBefore
|
||||
stateBeforeVirama
|
||||
stateAfter
|
||||
stateFAIL
|
||||
)
|
||||
|
||||
var joinStates = [][numJoinTypes]joinState{
|
||||
stateStart: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateVirama,
|
||||
},
|
||||
stateVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
},
|
||||
stateBefore: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
joinZWNJ: stateAfter,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateBeforeVirama,
|
||||
},
|
||||
stateBeforeVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
},
|
||||
stateAfter: {
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateAfter,
|
||||
joiningR: stateStart,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||
},
|
||||
stateFAIL: {
|
||||
0: stateFAIL,
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateFAIL,
|
||||
joiningT: stateFAIL,
|
||||
joiningR: stateFAIL,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateFAIL,
|
||||
},
|
||||
}
|
||||
|
||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||
// already implicitly satisfied by the overall implementation.
|
||||
func (p *Profile) validateLabel(s string) (err error) {
|
||||
if s == "" {
|
||||
if p.verifyDNSLength {
|
||||
return &labelError{s, "A4"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if p.checkHyphens {
|
||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||
return &labelError{s, "V2"}
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return &labelError{s, "V3"}
|
||||
}
|
||||
}
|
||||
if !p.checkJoiners {
|
||||
return nil
|
||||
}
|
||||
trie := p.trie // p.checkJoiners is only set if trie is set.
|
||||
// TODO: merge the use of this in the trie.
|
||||
v, sz := trie.lookupString(s)
|
||||
x := info(v)
|
||||
if x.isModifier() {
|
||||
return &labelError{s, "V5"}
|
||||
}
|
||||
// Quickly return in the absence of zero-width (non) joiners.
|
||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||
return nil
|
||||
}
|
||||
st := stateStart
|
||||
for i := 0; ; {
|
||||
jt := x.joinType()
|
||||
if s[i:i+sz] == zwj {
|
||||
jt = joinZWJ
|
||||
} else if s[i:i+sz] == zwnj {
|
||||
jt = joinZWNJ
|
||||
}
|
||||
st = joinStates[st][jt]
|
||||
if x.isViramaModifier() {
|
||||
st = joinStates[st][joinVirama]
|
||||
}
|
||||
if i += sz; i == len(s) {
|
||||
break
|
||||
}
|
||||
v, sz = trie.lookupString(s[i:])
|
||||
x = info(v)
|
||||
}
|
||||
if st == stateFAIL || st == stateAfter {
|
||||
return &labelError{s, "C"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
718
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
Normal file
718
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
Normal file
@ -0,0 +1,718 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.10
|
||||
// +build !go1.10
|
||||
|
||||
// Package idna implements IDNA2008 using the compatibility processing
|
||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||
// deal with the transition from IDNA2003.
|
||||
//
|
||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
||||
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||
// differences between these two standards.
|
||||
package idna // import "golang.org/x/net/idna"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/secure/bidirule"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||
// evaluated string as lookup.
|
||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||
// Other strategies are also viable, though:
|
||||
// Option 1) Return an empty string in case of error, but allow the user to
|
||||
// specify explicitly which errors to ignore.
|
||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||
// string, otherwise return the empty string in case of error.
|
||||
// Option 3) Option 1 and 2.
|
||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||
// needed, and document that the return string may not be empty in case of
|
||||
// error in the future.
|
||||
// I think Option 1 is best, but it is quite opinionated.
|
||||
|
||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||
func ToASCII(s string) (string, error) {
|
||||
return Punycode.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||
func ToUnicode(s string) (string, error) {
|
||||
return Punycode.process(s, false)
|
||||
}
|
||||
|
||||
// An Option configures a Profile at creation time.
|
||||
type Option func(*options)
|
||||
|
||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||
// compatibility. It is used by most browsers when resolving domain names. This
|
||||
// option is only meaningful if combined with MapForLookup.
|
||||
func Transitional(transitional bool) Option {
|
||||
return func(o *options) { o.transitional = true }
|
||||
}
|
||||
|
||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||
// are longer than allowed by the RFC.
|
||||
//
|
||||
// This option corresponds to the VerifyDnsLength flag in UTS #46.
|
||||
func VerifyDNSLength(verify bool) Option {
|
||||
return func(o *options) { o.verifyDNSLength = verify }
|
||||
}
|
||||
|
||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||
func RemoveLeadingDots(remove bool) Option {
|
||||
return func(o *options) { o.removeLeadingDots = remove }
|
||||
}
|
||||
|
||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||
// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
|
||||
// in UTS #46.
|
||||
func ValidateLabels(enable bool) Option {
|
||||
return func(o *options) {
|
||||
// Don't override existing mappings, but set one that at least checks
|
||||
// normalization if it is not set.
|
||||
if o.mapping == nil && enable {
|
||||
o.mapping = normalize
|
||||
}
|
||||
o.trie = trie
|
||||
o.checkJoiners = enable
|
||||
o.checkHyphens = enable
|
||||
if enable {
|
||||
o.fromPuny = validateFromPunycode
|
||||
} else {
|
||||
o.fromPuny = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckHyphens sets whether to check for correct use of hyphens ('-') in
|
||||
// labels. Most web browsers do not have this option set, since labels such as
|
||||
// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
|
||||
//
|
||||
// This option corresponds to the CheckHyphens flag in UTS #46.
|
||||
func CheckHyphens(enable bool) Option {
|
||||
return func(o *options) { o.checkHyphens = enable }
|
||||
}
|
||||
|
||||
// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
|
||||
// A of RFC 5892, concerning the use of joiner runes.
|
||||
//
|
||||
// This option corresponds to the CheckJoiners flag in UTS #46.
|
||||
func CheckJoiners(enable bool) Option {
|
||||
return func(o *options) {
|
||||
o.trie = trie
|
||||
o.checkJoiners = enable
|
||||
}
|
||||
}
|
||||
|
||||
// StrictDomainName limits the set of permissable ASCII characters to those
|
||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
|
||||
// but is only useful if ValidateLabels is set.
|
||||
//
|
||||
// This option is useful, for instance, for browsers that allow characters
|
||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||
// http://www.rfc-editor.org/std/std3.txt for more details.
|
||||
//
|
||||
// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
|
||||
func StrictDomainName(use bool) Option {
|
||||
return func(o *options) { o.useSTD3Rules = use }
|
||||
}
|
||||
|
||||
// NOTE: the following options pull in tables. The tables should not be linked
|
||||
// in as long as the options are not used.
|
||||
|
||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||
// that relies on proper validation of labels should include this rule.
|
||||
//
|
||||
// This option corresponds to the CheckBidi flag in UTS #46.
|
||||
func BidiRule() Option {
|
||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||
}
|
||||
|
||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||
func ValidateForRegistration() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateRegistration
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
VerifyDNSLength(true)(o)
|
||||
BidiRule()(o)
|
||||
}
|
||||
}
|
||||
|
||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||
// transformed for domain name lookup according to the requirements set out in
|
||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||
// to add this check.
|
||||
//
|
||||
// The mappings include normalization and mapping case, width and other
|
||||
// compatibility mappings.
|
||||
func MapForLookup() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateAndMap
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
RemoveLeadingDots(true)(o)
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
transitional bool
|
||||
useSTD3Rules bool
|
||||
checkHyphens bool
|
||||
checkJoiners bool
|
||||
verifyDNSLength bool
|
||||
removeLeadingDots bool
|
||||
|
||||
trie *idnaTrie
|
||||
|
||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||
fromPuny func(p *Profile, s string) error
|
||||
|
||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||
mapping func(p *Profile, s string) (string, error)
|
||||
|
||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||
// defined in RFC 5893.
|
||||
bidirule func(s string) bool
|
||||
}
|
||||
|
||||
// A Profile defines the configuration of a IDNA mapper.
|
||||
type Profile struct {
|
||||
options
|
||||
}
|
||||
|
||||
func apply(o *options, opts []Option) {
|
||||
for _, f := range opts {
|
||||
f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Profile.
|
||||
//
|
||||
// With no options, the returned Profile is the most permissive and equals the
|
||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||
// for lookup and registration purposes respectively, which can be tailored by
|
||||
// adding more fine-grained options, where later options override earlier
|
||||
// options.
|
||||
func New(o ...Option) *Profile {
|
||||
p := &Profile{}
|
||||
apply(&p.options, o)
|
||||
return p
|
||||
}
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToASCII(s string) (string, error) {
|
||||
return p.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||
pp := *p
|
||||
pp.transitional = false
|
||||
return pp.process(s, false)
|
||||
}
|
||||
|
||||
// String reports a string with a description of the profile for debugging
|
||||
// purposes. The string format may change with different versions.
|
||||
func (p *Profile) String() string {
|
||||
s := ""
|
||||
if p.transitional {
|
||||
s = "Transitional"
|
||||
} else {
|
||||
s = "NonTransitional"
|
||||
}
|
||||
if p.useSTD3Rules {
|
||||
s += ":UseSTD3Rules"
|
||||
}
|
||||
if p.checkHyphens {
|
||||
s += ":CheckHyphens"
|
||||
}
|
||||
if p.checkJoiners {
|
||||
s += ":CheckJoiners"
|
||||
}
|
||||
if p.verifyDNSLength {
|
||||
s += ":VerifyDNSLength"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||
// of validation.
|
||||
Punycode *Profile = punycode
|
||||
|
||||
// Lookup is the recommended profile for looking up domain names, according
|
||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||
// change over time.
|
||||
Lookup *Profile = lookup
|
||||
|
||||
// Display is the recommended profile for displaying domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Display *Profile = display
|
||||
|
||||
// Registration is the recommended profile for checking whether a given
|
||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||
Registration *Profile = registration
|
||||
|
||||
punycode = &Profile{}
|
||||
lookup = &Profile{options{
|
||||
transitional: true,
|
||||
removeLeadingDots: true,
|
||||
useSTD3Rules: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
display = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
removeLeadingDots: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
registration = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
verifyDNSLength: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateRegistration,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
|
||||
// TODO: profiles
|
||||
// Register: recommended for approving domain names: don't do any mappings
|
||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||
)
|
||||
|
||||
type labelError struct{ label, code_ string }
|
||||
|
||||
func (e labelError) code() string { return e.code_ }
|
||||
func (e labelError) Error() string {
|
||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||
}
|
||||
|
||||
type runeError rune
|
||||
|
||||
func (e runeError) code() string { return "P1" }
|
||||
func (e runeError) Error() string {
|
||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||
}
|
||||
|
||||
// process implements the algorithm described in section 4 of UTS #46,
|
||||
// see https://www.unicode.org/reports/tr46.
|
||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||
var err error
|
||||
if p.mapping != nil {
|
||||
s, err = p.mapping(p, s)
|
||||
}
|
||||
// Remove leading empty labels.
|
||||
if p.removeLeadingDots {
|
||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||
}
|
||||
}
|
||||
// It seems like we should only create this error on ToASCII, but the
|
||||
// UTS 46 conformance tests suggests we should always check this.
|
||||
if err == nil && p.verifyDNSLength && s == "" {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
labels := labelIter{orig: s}
|
||||
for ; !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if label == "" {
|
||||
// Empty labels are not okay. The label iterator skips the last
|
||||
// label if it is empty.
|
||||
if err == nil && p.verifyDNSLength {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(label, acePrefix) {
|
||||
u, err2 := decode(label[len(acePrefix):])
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
// Spec says keep the old label.
|
||||
continue
|
||||
}
|
||||
labels.set(u)
|
||||
if err == nil && p.fromPuny != nil {
|
||||
err = p.fromPuny(p, u)
|
||||
}
|
||||
if err == nil {
|
||||
// This should be called on NonTransitional, according to the
|
||||
// spec, but that currently does not have any effect. Use the
|
||||
// original profile to preserve options.
|
||||
err = p.validateLabel(u)
|
||||
}
|
||||
} else if err == nil {
|
||||
err = p.validateLabel(label)
|
||||
}
|
||||
}
|
||||
if toASCII {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if !ascii(label) {
|
||||
a, err2 := encode(acePrefix, label)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
label = a
|
||||
labels.set(a)
|
||||
}
|
||||
n := len(label)
|
||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||
err = &labelError{label, "A4"}
|
||||
}
|
||||
}
|
||||
}
|
||||
s = labels.result()
|
||||
if toASCII && p.verifyDNSLength && err == nil {
|
||||
// Compute the length of the domain name minus the root label and its dot.
|
||||
n := len(s)
|
||||
if n > 0 && s[n-1] == '.' {
|
||||
n--
|
||||
}
|
||||
if len(s) < 1 || n > 253 {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
func normalize(p *Profile, s string) (string, error) {
|
||||
return norm.NFC.String(s), nil
|
||||
}
|
||||
|
||||
func validateRegistration(p *Profile, s string) (string, error) {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return s, &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||
// for strict conformance to IDNA2008.
|
||||
case valid, deviation:
|
||||
case disallowed, mapped, unknown, ignored:
|
||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||
return s, runeError(r)
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func validateAndMap(p *Profile, s string) (string, error) {
|
||||
var (
|
||||
err error
|
||||
b []byte
|
||||
k int
|
||||
)
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
start := i
|
||||
i += sz
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
case valid:
|
||||
continue
|
||||
case disallowed:
|
||||
if err == nil {
|
||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||
err = runeError(r)
|
||||
}
|
||||
continue
|
||||
case mapped, deviation:
|
||||
b = append(b, s[k:start]...)
|
||||
b = info(v).appendMapping(b, s[start:i])
|
||||
case ignored:
|
||||
b = append(b, s[k:start]...)
|
||||
// drop the rune
|
||||
case unknown:
|
||||
b = append(b, s[k:start]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
}
|
||||
k = i
|
||||
}
|
||||
if k == 0 {
|
||||
// No changes so far.
|
||||
s = norm.NFC.String(s)
|
||||
} else {
|
||||
b = append(b, s[k:]...)
|
||||
if norm.NFC.QuickSpan(b) != len(b) {
|
||||
b = norm.NFC.Bytes(b)
|
||||
}
|
||||
// TODO: the punycode converters require strings as input.
|
||||
s = string(b)
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
// A labelIter allows iterating over domain name labels.
|
||||
type labelIter struct {
|
||||
orig string
|
||||
slice []string
|
||||
curStart int
|
||||
curEnd int
|
||||
i int
|
||||
}
|
||||
|
||||
func (l *labelIter) reset() {
|
||||
l.curStart = 0
|
||||
l.curEnd = 0
|
||||
l.i = 0
|
||||
}
|
||||
|
||||
func (l *labelIter) done() bool {
|
||||
return l.curStart >= len(l.orig)
|
||||
}
|
||||
|
||||
func (l *labelIter) result() string {
|
||||
if l.slice != nil {
|
||||
return strings.Join(l.slice, ".")
|
||||
}
|
||||
return l.orig
|
||||
}
|
||||
|
||||
func (l *labelIter) label() string {
|
||||
if l.slice != nil {
|
||||
return l.slice[l.i]
|
||||
}
|
||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||
l.curEnd = l.curStart + p
|
||||
if p == -1 {
|
||||
l.curEnd = len(l.orig)
|
||||
}
|
||||
return l.orig[l.curStart:l.curEnd]
|
||||
}
|
||||
|
||||
// next sets the value to the next label. It skips the last label if it is empty.
|
||||
func (l *labelIter) next() {
|
||||
l.i++
|
||||
if l.slice != nil {
|
||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
} else {
|
||||
l.curStart = l.curEnd + 1
|
||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *labelIter) set(s string) {
|
||||
if l.slice == nil {
|
||||
l.slice = strings.Split(l.orig, ".")
|
||||
}
|
||||
l.slice[l.i] = s
|
||||
}
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
func (p *Profile) simplify(cat category) category {
|
||||
switch cat {
|
||||
case disallowedSTD3Mapped:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = mapped
|
||||
}
|
||||
case disallowedSTD3Valid:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = valid
|
||||
}
|
||||
case deviation:
|
||||
if !p.transitional {
|
||||
cat = valid
|
||||
}
|
||||
case validNV8, validXV8:
|
||||
// TODO: handle V2008
|
||||
cat = valid
|
||||
}
|
||||
return cat
|
||||
}
|
||||
|
||||
func validateFromPunycode(p *Profile, s string) error {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||
return &labelError{s, "V6"}
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
zwnj = "\u200c"
|
||||
zwj = "\u200d"
|
||||
)
|
||||
|
||||
type joinState int8
|
||||
|
||||
const (
|
||||
stateStart joinState = iota
|
||||
stateVirama
|
||||
stateBefore
|
||||
stateBeforeVirama
|
||||
stateAfter
|
||||
stateFAIL
|
||||
)
|
||||
|
||||
var joinStates = [][numJoinTypes]joinState{
|
||||
stateStart: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateVirama,
|
||||
},
|
||||
stateVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
},
|
||||
stateBefore: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
joinZWNJ: stateAfter,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateBeforeVirama,
|
||||
},
|
||||
stateBeforeVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
},
|
||||
stateAfter: {
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateAfter,
|
||||
joiningR: stateStart,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||
},
|
||||
stateFAIL: {
|
||||
0: stateFAIL,
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateFAIL,
|
||||
joiningT: stateFAIL,
|
||||
joiningR: stateFAIL,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateFAIL,
|
||||
},
|
||||
}
|
||||
|
||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||
// already implicitly satisfied by the overall implementation.
|
||||
func (p *Profile) validateLabel(s string) error {
|
||||
if s == "" {
|
||||
if p.verifyDNSLength {
|
||||
return &labelError{s, "A4"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if p.bidirule != nil && !p.bidirule(s) {
|
||||
return &labelError{s, "B"}
|
||||
}
|
||||
if p.checkHyphens {
|
||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||
return &labelError{s, "V2"}
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return &labelError{s, "V3"}
|
||||
}
|
||||
}
|
||||
if !p.checkJoiners {
|
||||
return nil
|
||||
}
|
||||
trie := p.trie // p.checkJoiners is only set if trie is set.
|
||||
// TODO: merge the use of this in the trie.
|
||||
v, sz := trie.lookupString(s)
|
||||
x := info(v)
|
||||
if x.isModifier() {
|
||||
return &labelError{s, "V5"}
|
||||
}
|
||||
// Quickly return in the absence of zero-width (non) joiners.
|
||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||
return nil
|
||||
}
|
||||
st := stateStart
|
||||
for i := 0; ; {
|
||||
jt := x.joinType()
|
||||
if s[i:i+sz] == zwj {
|
||||
jt = joinZWJ
|
||||
} else if s[i:i+sz] == zwnj {
|
||||
jt = joinZWNJ
|
||||
}
|
||||
st = joinStates[st][jt]
|
||||
if x.isViramaModifier() {
|
||||
st = joinStates[st][joinVirama]
|
||||
}
|
||||
if i += sz; i == len(s) {
|
||||
break
|
||||
}
|
||||
v, sz = trie.lookupString(s[i:])
|
||||
x = info(v)
|
||||
}
|
||||
if st == stateFAIL || st == stateAfter {
|
||||
return &labelError{s, "C"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
203
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
Normal file
203
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// This file implements the Punycode algorithm from RFC 3492.
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// These parameter values are specified in section 5.
|
||||
//
|
||||
// All computation is done with int32s, so that overflow behavior is identical
|
||||
// regardless of whether int is 32-bit or 64-bit.
|
||||
const (
|
||||
base int32 = 36
|
||||
damp int32 = 700
|
||||
initialBias int32 = 72
|
||||
initialN int32 = 128
|
||||
skew int32 = 38
|
||||
tmax int32 = 26
|
||||
tmin int32 = 1
|
||||
)
|
||||
|
||||
func punyError(s string) error { return &labelError{s, "A3"} }
|
||||
|
||||
// decode decodes a string as specified in section 6.2.
|
||||
func decode(encoded string) (string, error) {
|
||||
if encoded == "" {
|
||||
return "", nil
|
||||
}
|
||||
pos := 1 + strings.LastIndex(encoded, "-")
|
||||
if pos == 1 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
if pos == len(encoded) {
|
||||
return encoded[:len(encoded)-1], nil
|
||||
}
|
||||
output := make([]rune, 0, len(encoded))
|
||||
if pos != 0 {
|
||||
for _, r := range encoded[:pos-1] {
|
||||
output = append(output, r)
|
||||
}
|
||||
}
|
||||
i, n, bias := int32(0), initialN, initialBias
|
||||
for pos < len(encoded) {
|
||||
oldI, w := i, int32(1)
|
||||
for k := base; ; k += base {
|
||||
if pos == len(encoded) {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
digit, ok := decodeDigit(encoded[pos])
|
||||
if !ok {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
pos++
|
||||
i += digit * w
|
||||
if i < 0 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
t := k - bias
|
||||
if t < tmin {
|
||||
t = tmin
|
||||
} else if t > tmax {
|
||||
t = tmax
|
||||
}
|
||||
if digit < t {
|
||||
break
|
||||
}
|
||||
w *= base - t
|
||||
if w >= math.MaxInt32/base {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
}
|
||||
x := int32(len(output) + 1)
|
||||
bias = adapt(i-oldI, x, oldI == 0)
|
||||
n += i / x
|
||||
i %= x
|
||||
if n > utf8.MaxRune || len(output) >= 1024 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
output = append(output, 0)
|
||||
copy(output[i+1:], output[i:])
|
||||
output[i] = n
|
||||
i++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// encode encodes a string as specified in section 6.3 and prepends prefix to
|
||||
// the result.
|
||||
//
|
||||
// The "while h < length(input)" line in the specification becomes "for
|
||||
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
|
||||
func encode(prefix, s string) (string, error) {
|
||||
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
|
||||
copy(output, prefix)
|
||||
delta, n, bias := int32(0), initialN, initialBias
|
||||
b, remaining := int32(0), int32(0)
|
||||
for _, r := range s {
|
||||
if r < 0x80 {
|
||||
b++
|
||||
output = append(output, byte(r))
|
||||
} else {
|
||||
remaining++
|
||||
}
|
||||
}
|
||||
h := b
|
||||
if b > 0 {
|
||||
output = append(output, '-')
|
||||
}
|
||||
for remaining != 0 {
|
||||
m := int32(0x7fffffff)
|
||||
for _, r := range s {
|
||||
if m > r && r >= n {
|
||||
m = r
|
||||
}
|
||||
}
|
||||
delta += (m - n) * (h + 1)
|
||||
if delta < 0 {
|
||||
return "", punyError(s)
|
||||
}
|
||||
n = m
|
||||
for _, r := range s {
|
||||
if r < n {
|
||||
delta++
|
||||
if delta < 0 {
|
||||
return "", punyError(s)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if r > n {
|
||||
continue
|
||||
}
|
||||
q := delta
|
||||
for k := base; ; k += base {
|
||||
t := k - bias
|
||||
if t < tmin {
|
||||
t = tmin
|
||||
} else if t > tmax {
|
||||
t = tmax
|
||||
}
|
||||
if q < t {
|
||||
break
|
||||
}
|
||||
output = append(output, encodeDigit(t+(q-t)%(base-t)))
|
||||
q = (q - t) / (base - t)
|
||||
}
|
||||
output = append(output, encodeDigit(q))
|
||||
bias = adapt(delta, h+1, h == b)
|
||||
delta = 0
|
||||
h++
|
||||
remaining--
|
||||
}
|
||||
delta++
|
||||
n++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
func decodeDigit(x byte) (digit int32, ok bool) {
|
||||
switch {
|
||||
case '0' <= x && x <= '9':
|
||||
return int32(x - ('0' - 26)), true
|
||||
case 'A' <= x && x <= 'Z':
|
||||
return int32(x - 'A'), true
|
||||
case 'a' <= x && x <= 'z':
|
||||
return int32(x - 'a'), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func encodeDigit(digit int32) byte {
|
||||
switch {
|
||||
case 0 <= digit && digit < 26:
|
||||
return byte(digit + 'a')
|
||||
case 26 <= digit && digit < 36:
|
||||
return byte(digit + ('0' - 26))
|
||||
}
|
||||
panic("idna: internal error in punycode encoding")
|
||||
}
|
||||
|
||||
// adapt is the bias adaptation function specified in section 6.1.
|
||||
func adapt(delta, numPoints int32, firstTime bool) int32 {
|
||||
if firstTime {
|
||||
delta /= damp
|
||||
} else {
|
||||
delta /= 2
|
||||
}
|
||||
delta += delta / numPoints
|
||||
k := int32(0)
|
||||
for delta > ((base-tmin)*tmax)/2 {
|
||||
delta /= base - tmin
|
||||
k += base
|
||||
}
|
||||
return k + (base-tmin+1)*delta/(delta+skew)
|
||||
}
|
4560
vendor/golang.org/x/net/idna/tables10.0.0.go
generated
vendored
Normal file
4560
vendor/golang.org/x/net/idna/tables10.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4654
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
Normal file
4654
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4734
vendor/golang.org/x/net/idna/tables12.0.0.go
generated
vendored
Normal file
4734
vendor/golang.org/x/net/idna/tables12.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4840
vendor/golang.org/x/net/idna/tables13.0.0.go
generated
vendored
Normal file
4840
vendor/golang.org/x/net/idna/tables13.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4487
vendor/golang.org/x/net/idna/tables9.0.0.go
generated
vendored
Normal file
4487
vendor/golang.org/x/net/idna/tables9.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
72
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
72
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
||||
func (c info) appendMapping(b []byte, s string) []byte {
|
||||
index := int(c >> indexShift)
|
||||
if c&xorBit == 0 {
|
||||
s := mappings[index:]
|
||||
return append(b, s[1:s[0]+1]...)
|
||||
}
|
||||
b = append(b, s...)
|
||||
if c&inlineXOR == inlineXOR {
|
||||
// TODO: support and handle two-byte inline masks
|
||||
b[len(b)-1] ^= byte(index)
|
||||
} else {
|
||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
||||
index++
|
||||
b[p] ^= xorData[index]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Sparse block handling code.
|
||||
|
||||
type valueRange struct {
|
||||
value uint16 // header: value:stride
|
||||
lo, hi byte // header: lo:n
|
||||
}
|
||||
|
||||
type sparseBlocks struct {
|
||||
values []valueRange
|
||||
offset []uint16
|
||||
}
|
||||
|
||||
var idnaSparse = sparseBlocks{
|
||||
values: idnaSparseValues[:],
|
||||
offset: idnaSparseOffset[:],
|
||||
}
|
||||
|
||||
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
|
||||
var trie = &idnaTrie{}
|
||||
|
||||
// lookup determines the type of block n and looks up the value for b.
|
||||
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
||||
// is a list of ranges with an accompanying value. Given a matching range r,
|
||||
// the value for b is by r.value + (b - r.lo) * stride.
|
||||
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
||||
offset := t.offset[n]
|
||||
header := t.values[offset]
|
||||
lo := offset + 1
|
||||
hi := lo + uint16(header.lo)
|
||||
for lo < hi {
|
||||
m := lo + (hi-lo)/2
|
||||
r := t.values[m]
|
||||
if r.lo <= b && b <= r.hi {
|
||||
return r.value + uint16(b-r.lo)*header.value
|
||||
}
|
||||
if b < r.lo {
|
||||
hi = m
|
||||
} else {
|
||||
lo = m + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
119
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
Normal file
119
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package idna
|
||||
|
||||
// This file contains definitions for interpreting the trie value of the idna
|
||||
// trie generated by "go run gen*.go". It is shared by both the generator
|
||||
// program and the resultant package. Sharing is achieved by the generator
|
||||
// copying gen_trieval.go to trieval.go and changing what's above this comment.
|
||||
|
||||
// info holds information from the IDNA mapping table for a single rune. It is
|
||||
// the value returned by a trie lookup. In most cases, all information fits in
|
||||
// a 16-bit value. For mappings, this value may contain an index into a slice
|
||||
// with the mapped string. Such mappings can consist of the actual mapped value
|
||||
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
|
||||
// input rune. This technique is used by the cases packages and reduces the
|
||||
// table size significantly.
|
||||
//
|
||||
// The per-rune values have the following format:
|
||||
//
|
||||
// if mapped {
|
||||
// if inlinedXOR {
|
||||
// 15..13 inline XOR marker
|
||||
// 12..11 unused
|
||||
// 10..3 inline XOR mask
|
||||
// } else {
|
||||
// 15..3 index into xor or mapping table
|
||||
// }
|
||||
// } else {
|
||||
// 15..14 unused
|
||||
// 13 mayNeedNorm
|
||||
// 12..11 attributes
|
||||
// 10..8 joining type
|
||||
// 7..3 category type
|
||||
// }
|
||||
// 2 use xor pattern
|
||||
// 1..0 mapped category
|
||||
//
|
||||
// See the definitions below for a more detailed description of the various
|
||||
// bits.
|
||||
type info uint16
|
||||
|
||||
const (
|
||||
catSmallMask = 0x3
|
||||
catBigMask = 0xF8
|
||||
indexShift = 3
|
||||
xorBit = 0x4 // interpret the index as an xor pattern
|
||||
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
|
||||
|
||||
joinShift = 8
|
||||
joinMask = 0x07
|
||||
|
||||
// Attributes
|
||||
attributesMask = 0x1800
|
||||
viramaModifier = 0x1800
|
||||
modifier = 0x1000
|
||||
rtl = 0x0800
|
||||
|
||||
mayNeedNorm = 0x2000
|
||||
)
|
||||
|
||||
// A category corresponds to a category defined in the IDNA mapping table.
|
||||
type category uint16
|
||||
|
||||
const (
|
||||
unknown category = 0 // not currently defined in unicode.
|
||||
mapped category = 1
|
||||
disallowedSTD3Mapped category = 2
|
||||
deviation category = 3
|
||||
)
|
||||
|
||||
const (
|
||||
valid category = 0x08
|
||||
validNV8 category = 0x18
|
||||
validXV8 category = 0x28
|
||||
disallowed category = 0x40
|
||||
disallowedSTD3Valid category = 0x80
|
||||
ignored category = 0xC0
|
||||
)
|
||||
|
||||
// join types and additional rune information
|
||||
const (
|
||||
joiningL = (iota + 1)
|
||||
joiningD
|
||||
joiningT
|
||||
joiningR
|
||||
|
||||
//the following types are derived during processing
|
||||
joinZWJ
|
||||
joinZWNJ
|
||||
joinVirama
|
||||
numJoinTypes
|
||||
)
|
||||
|
||||
func (c info) isMapped() bool {
|
||||
return c&0x3 != 0
|
||||
}
|
||||
|
||||
func (c info) category() category {
|
||||
small := c & catSmallMask
|
||||
if small != 0 {
|
||||
return category(small)
|
||||
}
|
||||
return category(c & catBigMask)
|
||||
}
|
||||
|
||||
func (c info) joinType() info {
|
||||
if c.isMapped() {
|
||||
return 0
|
||||
}
|
||||
return (c >> joinShift) & joinMask
|
||||
}
|
||||
|
||||
func (c info) isModifier() bool {
|
||||
return c&(modifier|catSmallMask) == modifier
|
||||
}
|
||||
|
||||
func (c info) isViramaModifier() bool {
|
||||
return c&(attributesMask|catSmallMask) == viramaModifier
|
||||
}
|
802
vendor/golang.org/x/net/webdav/file.go
generated
vendored
Normal file
802
vendor/golang.org/x/net/webdav/file.go
generated
vendored
Normal file
@ -0,0 +1,802 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// slashClean is equivalent to but slightly more efficient than
|
||||
// path.Clean("/" + name).
|
||||
func slashClean(name string) string {
|
||||
if name == "" || name[0] != '/' {
|
||||
name = "/" + name
|
||||
}
|
||||
return path.Clean(name)
|
||||
}
|
||||
|
||||
// A FileSystem implements access to a collection of named files. The elements
|
||||
// in a file path are separated by slash ('/', U+002F) characters, regardless
|
||||
// of host operating system convention.
|
||||
//
|
||||
// Each method has the same semantics as the os package's function of the same
|
||||
// name.
|
||||
//
|
||||
// Note that the os.Rename documentation says that "OS-specific restrictions
|
||||
// might apply". In particular, whether or not renaming a file or directory
|
||||
// overwriting another existing file or directory is an error is OS-dependent.
|
||||
type FileSystem interface {
|
||||
Mkdir(ctx context.Context, name string, perm os.FileMode) error
|
||||
OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error)
|
||||
RemoveAll(ctx context.Context, name string) error
|
||||
Rename(ctx context.Context, oldName, newName string) error
|
||||
Stat(ctx context.Context, name string) (os.FileInfo, error)
|
||||
}
|
||||
|
||||
// A File is returned by a FileSystem's OpenFile method and can be served by a
|
||||
// Handler.
|
||||
//
|
||||
// A File may optionally implement the DeadPropsHolder interface, if it can
|
||||
// load and save dead properties.
|
||||
type File interface {
|
||||
http.File
|
||||
io.Writer
|
||||
}
|
||||
|
||||
// A Dir implements FileSystem using the native file system restricted to a
|
||||
// specific directory tree.
|
||||
//
|
||||
// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's
|
||||
// string value is a filename on the native file system, not a URL, so it is
|
||||
// separated by filepath.Separator, which isn't necessarily '/'.
|
||||
//
|
||||
// An empty Dir is treated as ".".
|
||||
type Dir string
|
||||
|
||||
func (d Dir) resolve(name string) string {
|
||||
// This implementation is based on Dir.Open's code in the standard net/http package.
|
||||
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
|
||||
strings.Contains(name, "\x00") {
|
||||
return ""
|
||||
}
|
||||
dir := string(d)
|
||||
if dir == "" {
|
||||
dir = "."
|
||||
}
|
||||
return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
|
||||
}
|
||||
|
||||
func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
|
||||
if name = d.resolve(name); name == "" {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
return os.Mkdir(name, perm)
|
||||
}
|
||||
|
||||
func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
|
||||
if name = d.resolve(name); name == "" {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
f, err := os.OpenFile(name, flag, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (d Dir) RemoveAll(ctx context.Context, name string) error {
|
||||
if name = d.resolve(name); name == "" {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
if name == filepath.Clean(string(d)) {
|
||||
// Prohibit removing the virtual root directory.
|
||||
return os.ErrInvalid
|
||||
}
|
||||
return os.RemoveAll(name)
|
||||
}
|
||||
|
||||
func (d Dir) Rename(ctx context.Context, oldName, newName string) error {
|
||||
if oldName = d.resolve(oldName); oldName == "" {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
if newName = d.resolve(newName); newName == "" {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
if root := filepath.Clean(string(d)); root == oldName || root == newName {
|
||||
// Prohibit renaming from or to the virtual root directory.
|
||||
return os.ErrInvalid
|
||||
}
|
||||
return os.Rename(oldName, newName)
|
||||
}
|
||||
|
||||
func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {
|
||||
if name = d.resolve(name); name == "" {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return os.Stat(name)
|
||||
}
|
||||
|
||||
// NewMemFS returns a new in-memory FileSystem implementation.
|
||||
func NewMemFS() FileSystem {
|
||||
return &memFS{
|
||||
root: memFSNode{
|
||||
children: make(map[string]*memFSNode),
|
||||
mode: 0660 | os.ModeDir,
|
||||
modTime: time.Now(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// A memFS implements FileSystem, storing all metadata and actual file data
|
||||
// in-memory. No limits on filesystem size are used, so it is not recommended
|
||||
// this be used where the clients are untrusted.
|
||||
//
|
||||
// Concurrent access is permitted. The tree structure is protected by a mutex,
|
||||
// and each node's contents and metadata are protected by a per-node mutex.
|
||||
//
|
||||
// TODO: Enforce file permissions.
|
||||
type memFS struct {
|
||||
mu sync.Mutex
|
||||
root memFSNode
|
||||
}
|
||||
|
||||
// TODO: clean up and rationalize the walk/find code.
|
||||
|
||||
// walk walks the directory tree for the fullname, calling f at each step. If f
|
||||
// returns an error, the walk will be aborted and return that same error.
|
||||
//
|
||||
// dir is the directory at that step, frag is the name fragment, and final is
|
||||
// whether it is the final step. For example, walking "/foo/bar/x" will result
|
||||
// in 3 calls to f:
|
||||
// - "/", "foo", false
|
||||
// - "/foo/", "bar", false
|
||||
// - "/foo/bar/", "x", true
|
||||
// The frag argument will be empty only if dir is the root node and the walk
|
||||
// ends at that root node.
|
||||
func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
|
||||
original := fullname
|
||||
fullname = slashClean(fullname)
|
||||
|
||||
// Strip any leading "/"s to make fullname a relative path, as the walk
|
||||
// starts at fs.root.
|
||||
if fullname[0] == '/' {
|
||||
fullname = fullname[1:]
|
||||
}
|
||||
dir := &fs.root
|
||||
|
||||
for {
|
||||
frag, remaining := fullname, ""
|
||||
i := strings.IndexRune(fullname, '/')
|
||||
final := i < 0
|
||||
if !final {
|
||||
frag, remaining = fullname[:i], fullname[i+1:]
|
||||
}
|
||||
if frag == "" && dir != &fs.root {
|
||||
panic("webdav: empty path fragment for a clean path")
|
||||
}
|
||||
if err := f(dir, frag, final); err != nil {
|
||||
return &os.PathError{
|
||||
Op: op,
|
||||
Path: original,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
if final {
|
||||
break
|
||||
}
|
||||
child := dir.children[frag]
|
||||
if child == nil {
|
||||
return &os.PathError{
|
||||
Op: op,
|
||||
Path: original,
|
||||
Err: os.ErrNotExist,
|
||||
}
|
||||
}
|
||||
if !child.mode.IsDir() {
|
||||
return &os.PathError{
|
||||
Op: op,
|
||||
Path: original,
|
||||
Err: os.ErrInvalid,
|
||||
}
|
||||
}
|
||||
dir, fullname = child, remaining
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// find returns the parent of the named node and the relative name fragment
|
||||
// from the parent to the child. For example, if finding "/foo/bar/baz" then
|
||||
// parent will be the node for "/foo/bar" and frag will be "baz".
|
||||
//
|
||||
// If the fullname names the root node, then parent, frag and err will be zero.
|
||||
//
|
||||
// find returns an error if the parent does not already exist or the parent
|
||||
// isn't a directory, but it will not return an error per se if the child does
|
||||
// not already exist. The error returned is either nil or an *os.PathError
|
||||
// whose Op is op.
|
||||
func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
|
||||
err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
|
||||
if !final {
|
||||
return nil
|
||||
}
|
||||
if frag0 != "" {
|
||||
parent, frag = parent0, frag0
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return parent, frag, err
|
||||
}
|
||||
|
||||
func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
|
||||
dir, frag, err := fs.find("mkdir", name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir == nil {
|
||||
// We can't create the root.
|
||||
return os.ErrInvalid
|
||||
}
|
||||
if _, ok := dir.children[frag]; ok {
|
||||
return os.ErrExist
|
||||
}
|
||||
dir.children[frag] = &memFSNode{
|
||||
children: make(map[string]*memFSNode),
|
||||
mode: perm.Perm() | os.ModeDir,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
|
||||
dir, frag, err := fs.find("open", name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var n *memFSNode
|
||||
if dir == nil {
|
||||
// We're opening the root.
|
||||
if runtime.GOOS == "zos" {
|
||||
if flag&os.O_WRONLY != 0 {
|
||||
return nil, os.ErrPermission
|
||||
}
|
||||
} else {
|
||||
if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
|
||||
return nil, os.ErrPermission
|
||||
}
|
||||
}
|
||||
n, frag = &fs.root, "/"
|
||||
|
||||
} else {
|
||||
n = dir.children[frag]
|
||||
if flag&(os.O_SYNC|os.O_APPEND) != 0 {
|
||||
// memFile doesn't support these flags yet.
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
if flag&os.O_CREATE != 0 {
|
||||
if flag&os.O_EXCL != 0 && n != nil {
|
||||
return nil, os.ErrExist
|
||||
}
|
||||
if n == nil {
|
||||
n = &memFSNode{
|
||||
mode: perm.Perm(),
|
||||
}
|
||||
dir.children[frag] = n
|
||||
}
|
||||
}
|
||||
if n == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
|
||||
n.mu.Lock()
|
||||
n.data = nil
|
||||
n.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
children := make([]os.FileInfo, 0, len(n.children))
|
||||
for cName, c := range n.children {
|
||||
children = append(children, c.stat(cName))
|
||||
}
|
||||
return &memFile{
|
||||
n: n,
|
||||
nameSnapshot: frag,
|
||||
childrenSnapshot: children,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fs *memFS) RemoveAll(ctx context.Context, name string) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
|
||||
dir, frag, err := fs.find("remove", name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir == nil {
|
||||
// We can't remove the root.
|
||||
return os.ErrInvalid
|
||||
}
|
||||
delete(dir.children, frag)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
|
||||
oldName = slashClean(oldName)
|
||||
newName = slashClean(newName)
|
||||
if oldName == newName {
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(newName, oldName+"/") {
|
||||
// We can't rename oldName to be a sub-directory of itself.
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
oDir, oFrag, err := fs.find("rename", oldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if oDir == nil {
|
||||
// We can't rename from the root.
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
nDir, nFrag, err := fs.find("rename", newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nDir == nil {
|
||||
// We can't rename to the root.
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
oNode, ok := oDir.children[oFrag]
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
if oNode.children != nil {
|
||||
if nNode, ok := nDir.children[nFrag]; ok {
|
||||
if nNode.children == nil {
|
||||
return errNotADirectory
|
||||
}
|
||||
if len(nNode.children) != 0 {
|
||||
return errDirectoryNotEmpty
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(oDir.children, oFrag)
|
||||
nDir.children[nFrag] = oNode
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
|
||||
dir, frag, err := fs.find("stat", name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dir == nil {
|
||||
// We're stat'ting the root.
|
||||
return fs.root.stat("/"), nil
|
||||
}
|
||||
if n, ok := dir.children[frag]; ok {
|
||||
return n.stat(path.Base(name)), nil
|
||||
}
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
// A memFSNode represents a single entry in the in-memory filesystem and also
|
||||
// implements os.FileInfo.
|
||||
type memFSNode struct {
|
||||
// children is protected by memFS.mu.
|
||||
children map[string]*memFSNode
|
||||
|
||||
mu sync.Mutex
|
||||
data []byte
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
deadProps map[xml.Name]Property
|
||||
}
|
||||
|
||||
func (n *memFSNode) stat(name string) *memFileInfo {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
return &memFileInfo{
|
||||
name: name,
|
||||
size: int64(len(n.data)),
|
||||
mode: n.mode,
|
||||
modTime: n.modTime,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
if len(n.deadProps) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
ret := make(map[xml.Name]Property, len(n.deadProps))
|
||||
for k, v := range n.deadProps {
|
||||
ret[k] = v
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
pstat := Propstat{Status: http.StatusOK}
|
||||
for _, patch := range patches {
|
||||
for _, p := range patch.Props {
|
||||
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
|
||||
if patch.Remove {
|
||||
delete(n.deadProps, p.XMLName)
|
||||
continue
|
||||
}
|
||||
if n.deadProps == nil {
|
||||
n.deadProps = map[xml.Name]Property{}
|
||||
}
|
||||
n.deadProps[p.XMLName] = p
|
||||
}
|
||||
}
|
||||
return []Propstat{pstat}, nil
|
||||
}
|
||||
|
||||
type memFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (f *memFileInfo) Name() string { return f.name }
|
||||
func (f *memFileInfo) Size() int64 { return f.size }
|
||||
func (f *memFileInfo) Mode() os.FileMode { return f.mode }
|
||||
func (f *memFileInfo) ModTime() time.Time { return f.modTime }
|
||||
func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
|
||||
func (f *memFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
// A memFile is a File implementation for a memFSNode. It is a per-file (not
|
||||
// per-node) read/write position, and a snapshot of the memFS' tree structure
|
||||
// (a node's name and children) for that node.
|
||||
type memFile struct {
|
||||
n *memFSNode
|
||||
nameSnapshot string
|
||||
childrenSnapshot []os.FileInfo
|
||||
// pos is protected by n.mu.
|
||||
pos int
|
||||
}
|
||||
|
||||
// A *memFile implements the optional DeadPropsHolder interface.
|
||||
var _ DeadPropsHolder = (*memFile)(nil)
|
||||
|
||||
func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
|
||||
func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
|
||||
|
||||
func (f *memFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *memFile) Read(p []byte) (int, error) {
|
||||
f.n.mu.Lock()
|
||||
defer f.n.mu.Unlock()
|
||||
if f.n.mode.IsDir() {
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
if f.pos >= len(f.n.data) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(p, f.n.data[f.pos:])
|
||||
f.pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
f.n.mu.Lock()
|
||||
defer f.n.mu.Unlock()
|
||||
if !f.n.mode.IsDir() {
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
old := f.pos
|
||||
if old >= len(f.childrenSnapshot) {
|
||||
// The os.File Readdir docs say that at the end of a directory,
|
||||
// the error is io.EOF if count > 0 and nil if count <= 0.
|
||||
if count > 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
if count > 0 {
|
||||
f.pos += count
|
||||
if f.pos > len(f.childrenSnapshot) {
|
||||
f.pos = len(f.childrenSnapshot)
|
||||
}
|
||||
} else {
|
||||
f.pos = len(f.childrenSnapshot)
|
||||
old = 0
|
||||
}
|
||||
return f.childrenSnapshot[old:f.pos], nil
|
||||
}
|
||||
|
||||
func (f *memFile) Seek(offset int64, whence int) (int64, error) {
|
||||
f.n.mu.Lock()
|
||||
defer f.n.mu.Unlock()
|
||||
npos := f.pos
|
||||
// TODO: How to handle offsets greater than the size of system int?
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
npos = int(offset)
|
||||
case os.SEEK_CUR:
|
||||
npos += int(offset)
|
||||
case os.SEEK_END:
|
||||
npos = len(f.n.data) + int(offset)
|
||||
default:
|
||||
npos = -1
|
||||
}
|
||||
if npos < 0 {
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
f.pos = npos
|
||||
return int64(f.pos), nil
|
||||
}
|
||||
|
||||
func (f *memFile) Stat() (os.FileInfo, error) {
|
||||
return f.n.stat(f.nameSnapshot), nil
|
||||
}
|
||||
|
||||
func (f *memFile) Write(p []byte) (int, error) {
|
||||
lenp := len(p)
|
||||
f.n.mu.Lock()
|
||||
defer f.n.mu.Unlock()
|
||||
|
||||
if f.n.mode.IsDir() {
|
||||
return 0, os.ErrInvalid
|
||||
}
|
||||
if f.pos < len(f.n.data) {
|
||||
n := copy(f.n.data[f.pos:], p)
|
||||
f.pos += n
|
||||
p = p[n:]
|
||||
} else if f.pos > len(f.n.data) {
|
||||
// Write permits the creation of holes, if we've seek'ed past the
|
||||
// existing end of file.
|
||||
if f.pos <= cap(f.n.data) {
|
||||
oldLen := len(f.n.data)
|
||||
f.n.data = f.n.data[:f.pos]
|
||||
hole := f.n.data[oldLen:]
|
||||
for i := range hole {
|
||||
hole[i] = 0
|
||||
}
|
||||
} else {
|
||||
d := make([]byte, f.pos, f.pos+len(p))
|
||||
copy(d, f.n.data)
|
||||
f.n.data = d
|
||||
}
|
||||
}
|
||||
|
||||
if len(p) > 0 {
|
||||
// We should only get here if f.pos == len(f.n.data).
|
||||
f.n.data = append(f.n.data, p...)
|
||||
f.pos = len(f.n.data)
|
||||
}
|
||||
f.n.modTime = time.Now()
|
||||
return lenp, nil
|
||||
}
|
||||
|
||||
// moveFiles moves files and/or directories from src to dst.
|
||||
//
|
||||
// See section 9.9.4 for when various HTTP status codes apply.
|
||||
func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) {
|
||||
created := false
|
||||
if _, err := fs.Stat(ctx, dst); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
created = true
|
||||
} else if overwrite {
|
||||
// Section 9.9.3 says that "If a resource exists at the destination
|
||||
// and the Overwrite header is "T", then prior to performing the move,
|
||||
// the server must perform a DELETE with "Depth: infinity" on the
|
||||
// destination resource.
|
||||
if err := fs.RemoveAll(ctx, dst); err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
} else {
|
||||
return http.StatusPreconditionFailed, os.ErrExist
|
||||
}
|
||||
if err := fs.Rename(ctx, src, dst); err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
if created {
|
||||
return http.StatusCreated, nil
|
||||
}
|
||||
return http.StatusNoContent, nil
|
||||
}
|
||||
|
||||
func copyProps(dst, src File) error {
|
||||
d, ok := dst.(DeadPropsHolder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
s, ok := src.(DeadPropsHolder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
m, err := s.DeadProps()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
props := make([]Property, 0, len(m))
|
||||
for _, prop := range m {
|
||||
props = append(props, prop)
|
||||
}
|
||||
_, err = d.Patch([]Proppatch{{Props: props}})
|
||||
return err
|
||||
}
|
||||
|
||||
// copyFiles copies files and/or directories from src to dst.
|
||||
//
|
||||
// See section 9.8.5 for when various HTTP status codes apply.
|
||||
func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
|
||||
if recursion == 1000 {
|
||||
return http.StatusInternalServerError, errRecursionTooDeep
|
||||
}
|
||||
recursion++
|
||||
|
||||
// TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/
|
||||
// into /A/B/ could lead to infinite recursion if not handled correctly."
|
||||
|
||||
srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
srcStat, err := srcFile.Stat()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
srcPerm := srcStat.Mode() & os.ModePerm
|
||||
|
||||
created := false
|
||||
if _, err := fs.Stat(ctx, dst); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
created = true
|
||||
} else {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
} else {
|
||||
if !overwrite {
|
||||
return http.StatusPreconditionFailed, os.ErrExist
|
||||
}
|
||||
if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
}
|
||||
|
||||
if srcStat.IsDir() {
|
||||
if err := fs.Mkdir(ctx, dst, srcPerm); err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
if depth == infiniteDepth {
|
||||
children, err := srcFile.Readdir(-1)
|
||||
if err != nil {
|
||||
return http.StatusForbidden, err
|
||||
}
|
||||
for _, c := range children {
|
||||
name := c.Name()
|
||||
s := path.Join(src, name)
|
||||
d := path.Join(dst, name)
|
||||
cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion)
|
||||
if cErr != nil {
|
||||
// TODO: MultiStatus.
|
||||
return cStatus, cErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return http.StatusConflict, err
|
||||
}
|
||||
return http.StatusForbidden, err
|
||||
|
||||
}
|
||||
_, copyErr := io.Copy(dstFile, srcFile)
|
||||
propsErr := copyProps(dstFile, srcFile)
|
||||
closeErr := dstFile.Close()
|
||||
if copyErr != nil {
|
||||
return http.StatusInternalServerError, copyErr
|
||||
}
|
||||
if propsErr != nil {
|
||||
return http.StatusInternalServerError, propsErr
|
||||
}
|
||||
if closeErr != nil {
|
||||
return http.StatusInternalServerError, closeErr
|
||||
}
|
||||
}
|
||||
|
||||
if created {
|
||||
return http.StatusCreated, nil
|
||||
}
|
||||
return http.StatusNoContent, nil
|
||||
}
|
||||
|
||||
// walkFS traverses filesystem fs starting at name up to depth levels.
|
||||
//
|
||||
// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
|
||||
// walkFS calls walkFn. If a visited file system node is a directory and
|
||||
// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
|
||||
func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {
|
||||
// This implementation is based on Walk's code in the standard path/filepath package.
|
||||
err := walkFn(name, info, nil)
|
||||
if err != nil {
|
||||
if info.IsDir() && err == filepath.SkipDir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() || depth == 0 {
|
||||
return nil
|
||||
}
|
||||
if depth == 1 {
|
||||
depth = 0
|
||||
}
|
||||
|
||||
// Read directory names.
|
||||
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return walkFn(name, info, err)
|
||||
}
|
||||
fileInfos, err := f.Readdir(0)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return walkFn(name, info, err)
|
||||
}
|
||||
|
||||
for _, fileInfo := range fileInfos {
|
||||
filename := path.Join(name, fileInfo.Name())
|
||||
fileInfo, err := fs.Stat(ctx, filename)
|
||||
if err != nil {
|
||||
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn)
|
||||
if err != nil {
|
||||
if !fileInfo.IsDir() || err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
173
vendor/golang.org/x/net/webdav/if.go
generated
vendored
Normal file
173
vendor/golang.org/x/net/webdav/if.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package webdav
|
||||
|
||||
// The If header is covered by Section 10.4.
|
||||
// http://www.webdav.org/specs/rfc4918.html#HEADER_If
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ifHeader is a disjunction (OR) of ifLists.
|
||||
type ifHeader struct {
|
||||
lists []ifList
|
||||
}
|
||||
|
||||
// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
|
||||
type ifList struct {
|
||||
resourceTag string
|
||||
conditions []Condition
|
||||
}
|
||||
|
||||
// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
|
||||
// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
|
||||
// returned by req.Header.Get("If") for a http.Request req.
|
||||
func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
|
||||
s := strings.TrimSpace(httpHeader)
|
||||
switch tokenType, _, _ := lex(s); tokenType {
|
||||
case '(':
|
||||
return parseNoTagLists(s)
|
||||
case angleTokenType:
|
||||
return parseTaggedLists(s)
|
||||
default:
|
||||
return ifHeader{}, false
|
||||
}
|
||||
}
|
||||
|
||||
func parseNoTagLists(s string) (h ifHeader, ok bool) {
|
||||
for {
|
||||
l, remaining, ok := parseList(s)
|
||||
if !ok {
|
||||
return ifHeader{}, false
|
||||
}
|
||||
h.lists = append(h.lists, l)
|
||||
if remaining == "" {
|
||||
return h, true
|
||||
}
|
||||
s = remaining
|
||||
}
|
||||
}
|
||||
|
||||
func parseTaggedLists(s string) (h ifHeader, ok bool) {
|
||||
resourceTag, n := "", 0
|
||||
for first := true; ; first = false {
|
||||
tokenType, tokenStr, remaining := lex(s)
|
||||
switch tokenType {
|
||||
case angleTokenType:
|
||||
if !first && n == 0 {
|
||||
return ifHeader{}, false
|
||||
}
|
||||
resourceTag, n = tokenStr, 0
|
||||
s = remaining
|
||||
case '(':
|
||||
n++
|
||||
l, remaining, ok := parseList(s)
|
||||
if !ok {
|
||||
return ifHeader{}, false
|
||||
}
|
||||
l.resourceTag = resourceTag
|
||||
h.lists = append(h.lists, l)
|
||||
if remaining == "" {
|
||||
return h, true
|
||||
}
|
||||
s = remaining
|
||||
default:
|
||||
return ifHeader{}, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseList(s string) (l ifList, remaining string, ok bool) {
|
||||
tokenType, _, s := lex(s)
|
||||
if tokenType != '(' {
|
||||
return ifList{}, "", false
|
||||
}
|
||||
for {
|
||||
tokenType, _, remaining = lex(s)
|
||||
if tokenType == ')' {
|
||||
if len(l.conditions) == 0 {
|
||||
return ifList{}, "", false
|
||||
}
|
||||
return l, remaining, true
|
||||
}
|
||||
c, remaining, ok := parseCondition(s)
|
||||
if !ok {
|
||||
return ifList{}, "", false
|
||||
}
|
||||
l.conditions = append(l.conditions, c)
|
||||
s = remaining
|
||||
}
|
||||
}
|
||||
|
||||
func parseCondition(s string) (c Condition, remaining string, ok bool) {
|
||||
tokenType, tokenStr, s := lex(s)
|
||||
if tokenType == notTokenType {
|
||||
c.Not = true
|
||||
tokenType, tokenStr, s = lex(s)
|
||||
}
|
||||
switch tokenType {
|
||||
case strTokenType, angleTokenType:
|
||||
c.Token = tokenStr
|
||||
case squareTokenType:
|
||||
c.ETag = tokenStr
|
||||
default:
|
||||
return Condition{}, "", false
|
||||
}
|
||||
return c, s, true
|
||||
}
|
||||
|
||||
// Single-rune tokens like '(' or ')' have a token type equal to their rune.
|
||||
// All other tokens have a negative token type.
|
||||
const (
|
||||
errTokenType = rune(-1)
|
||||
eofTokenType = rune(-2)
|
||||
strTokenType = rune(-3)
|
||||
notTokenType = rune(-4)
|
||||
angleTokenType = rune(-5)
|
||||
squareTokenType = rune(-6)
|
||||
)
|
||||
|
||||
func lex(s string) (tokenType rune, tokenStr string, remaining string) {
|
||||
// The net/textproto Reader that parses the HTTP header will collapse
|
||||
// Linear White Space that spans multiple "\r\n" lines to a single " ",
|
||||
// so we don't need to look for '\r' or '\n'.
|
||||
for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
|
||||
s = s[1:]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
return eofTokenType, "", ""
|
||||
}
|
||||
i := 0
|
||||
loop:
|
||||
for ; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '\t', ' ', '(', ')', '<', '>', '[', ']':
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if i != 0 {
|
||||
tokenStr, remaining = s[:i], s[i:]
|
||||
if tokenStr == "Not" {
|
||||
return notTokenType, "", remaining
|
||||
}
|
||||
return strTokenType, tokenStr, remaining
|
||||
}
|
||||
|
||||
j := 0
|
||||
switch s[0] {
|
||||
case '<':
|
||||
j, tokenType = strings.IndexByte(s, '>'), angleTokenType
|
||||
case '[':
|
||||
j, tokenType = strings.IndexByte(s, ']'), squareTokenType
|
||||
default:
|
||||
return rune(s[0]), "", s[1:]
|
||||
}
|
||||
if j < 0 {
|
||||
return errTokenType, "", ""
|
||||
}
|
||||
return tokenType, s[1:j], s[j+1:]
|
||||
}
|
11
vendor/golang.org/x/net/webdav/internal/xml/README
generated
vendored
Normal file
11
vendor/golang.org/x/net/webdav/internal/xml/README
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
This is a fork of the encoding/xml package at ca1d6c4, the last commit before
|
||||
https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
|
||||
space behavior" made late in the lead-up to the Go 1.5 release.
|
||||
|
||||
The list of encoding/xml changes is at
|
||||
https://go.googlesource.com/go/+log/master/src/encoding/xml
|
||||
|
||||
This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
|
||||
released.
|
||||
|
||||
See http://golang.org/issue/11841
|
1223
vendor/golang.org/x/net/webdav/internal/xml/marshal.go
generated
vendored
Normal file
1223
vendor/golang.org/x/net/webdav/internal/xml/marshal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
692
vendor/golang.org/x/net/webdav/internal/xml/read.go
generated
vendored
Normal file
692
vendor/golang.org/x/net/webdav/internal/xml/read.go
generated
vendored
Normal file
@ -0,0 +1,692 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
|
||||
// an XML element is an order-dependent collection of anonymous
|
||||
// values, while a data structure is an order-independent collection
|
||||
// of named values.
|
||||
// See package json for a textual representation more suitable
|
||||
// to data structures.
|
||||
|
||||
// Unmarshal parses the XML-encoded data and stores the result in
|
||||
// the value pointed to by v, which must be an arbitrary struct,
|
||||
// slice, or string. Well-formed data that does not fit into v is
|
||||
// discarded.
|
||||
//
|
||||
// Because Unmarshal uses the reflect package, it can only assign
|
||||
// to exported (upper case) fields. Unmarshal uses a case-sensitive
|
||||
// comparison to match XML element names to tag values and struct
|
||||
// field names.
|
||||
//
|
||||
// Unmarshal maps an XML element to a struct using the following rules.
|
||||
// In the rules, the tag of a field refers to the value associated with the
|
||||
// key 'xml' in the struct field's tag (see the example above).
|
||||
//
|
||||
// * If the struct has a field of type []byte or string with tag
|
||||
// ",innerxml", Unmarshal accumulates the raw XML nested inside the
|
||||
// element in that field. The rest of the rules still apply.
|
||||
//
|
||||
// * If the struct has a field named XMLName of type xml.Name,
|
||||
// Unmarshal records the element name in that field.
|
||||
//
|
||||
// * If the XMLName field has an associated tag of the form
|
||||
// "name" or "namespace-URL name", the XML element must have
|
||||
// the given name (and, optionally, name space) or else Unmarshal
|
||||
// returns an error.
|
||||
//
|
||||
// * If the XML element has an attribute whose name matches a
|
||||
// struct field name with an associated tag containing ",attr" or
|
||||
// the explicit name in a struct field tag of the form "name,attr",
|
||||
// Unmarshal records the attribute value in that field.
|
||||
//
|
||||
// * If the XML element contains character data, that data is
|
||||
// accumulated in the first struct field that has tag ",chardata".
|
||||
// The struct field may have type []byte or string.
|
||||
// If there is no such field, the character data is discarded.
|
||||
//
|
||||
// * If the XML element contains comments, they are accumulated in
|
||||
// the first struct field that has tag ",comment". The struct
|
||||
// field may have type []byte or string. If there is no such
|
||||
// field, the comments are discarded.
|
||||
//
|
||||
// * If the XML element contains a sub-element whose name matches
|
||||
// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
|
||||
// will descend into the XML structure looking for elements with the
|
||||
// given names, and will map the innermost elements to that struct
|
||||
// field. A tag starting with ">" is equivalent to one starting
|
||||
// with the field name followed by ">".
|
||||
//
|
||||
// * If the XML element contains a sub-element whose name matches
|
||||
// a struct field's XMLName tag and the struct field has no
|
||||
// explicit name tag as per the previous rule, unmarshal maps
|
||||
// the sub-element to that struct field.
|
||||
//
|
||||
// * If the XML element contains a sub-element whose name matches a
|
||||
// field without any mode flags (",attr", ",chardata", etc), Unmarshal
|
||||
// maps the sub-element to that struct field.
|
||||
//
|
||||
// * If the XML element contains a sub-element that hasn't matched any
|
||||
// of the above rules and the struct has a field with tag ",any",
|
||||
// unmarshal maps the sub-element to that struct field.
|
||||
//
|
||||
// * An anonymous struct field is handled as if the fields of its
|
||||
// value were part of the outer struct.
|
||||
//
|
||||
// * A struct field with tag "-" is never unmarshalled into.
|
||||
//
|
||||
// Unmarshal maps an XML element to a string or []byte by saving the
|
||||
// concatenation of that element's character data in the string or
|
||||
// []byte. The saved []byte is never nil.
|
||||
//
|
||||
// Unmarshal maps an attribute value to a string or []byte by saving
|
||||
// the value in the string or slice.
|
||||
//
|
||||
// Unmarshal maps an XML element to a slice by extending the length of
|
||||
// the slice and mapping the element to the newly created value.
|
||||
//
|
||||
// Unmarshal maps an XML element or attribute value to a bool by
|
||||
// setting it to the boolean value represented by the string.
|
||||
//
|
||||
// Unmarshal maps an XML element or attribute value to an integer or
|
||||
// floating-point field by setting the field to the result of
|
||||
// interpreting the string value in decimal. There is no check for
|
||||
// overflow.
|
||||
//
|
||||
// Unmarshal maps an XML element to an xml.Name by recording the
|
||||
// element name.
|
||||
//
|
||||
// Unmarshal maps an XML element to a pointer by setting the pointer
|
||||
// to a freshly allocated value and then mapping the element to that value.
|
||||
//
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
return NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// Decode works like xml.Unmarshal, except it reads the decoder
|
||||
// stream to find the start element.
|
||||
func (d *Decoder) Decode(v interface{}) error {
|
||||
return d.DecodeElement(v, nil)
|
||||
}
|
||||
|
||||
// DecodeElement works like xml.Unmarshal except that it takes
|
||||
// a pointer to the start XML element to decode into v.
|
||||
// It is useful when a client reads some raw XML tokens itself
|
||||
// but also wants to defer to Unmarshal for some elements.
|
||||
func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
|
||||
val := reflect.ValueOf(v)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return errors.New("non-pointer passed to Unmarshal")
|
||||
}
|
||||
return d.unmarshal(val.Elem(), start)
|
||||
}
|
||||
|
||||
// An UnmarshalError represents an error in the unmarshalling process.
|
||||
type UnmarshalError string
|
||||
|
||||
func (e UnmarshalError) Error() string { return string(e) }
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal
|
||||
// an XML element description of themselves.
|
||||
//
|
||||
// UnmarshalXML decodes a single XML element
|
||||
// beginning with the given start element.
|
||||
// If it returns an error, the outer call to Unmarshal stops and
|
||||
// returns that error.
|
||||
// UnmarshalXML must consume exactly one XML element.
|
||||
// One common implementation strategy is to unmarshal into
|
||||
// a separate value with a layout matching the expected XML
|
||||
// using d.DecodeElement, and then to copy the data from
|
||||
// that value into the receiver.
|
||||
// Another common strategy is to use d.Token to process the
|
||||
// XML object one token at a time.
|
||||
// UnmarshalXML may not use d.RawToken.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalXML(d *Decoder, start StartElement) error
|
||||
}
|
||||
|
||||
// UnmarshalerAttr is the interface implemented by objects that can unmarshal
|
||||
// an XML attribute description of themselves.
|
||||
//
|
||||
// UnmarshalXMLAttr decodes a single XML attribute.
|
||||
// If it returns an error, the outer call to Unmarshal stops and
|
||||
// returns that error.
|
||||
// UnmarshalXMLAttr is used only for struct fields with the
|
||||
// "attr" option in the field tag.
|
||||
type UnmarshalerAttr interface {
|
||||
UnmarshalXMLAttr(attr Attr) error
|
||||
}
|
||||
|
||||
// receiverType returns the receiver type to use in an expression like "%s.MethodName".
|
||||
func receiverType(val interface{}) string {
|
||||
t := reflect.TypeOf(val)
|
||||
if t.Name() != "" {
|
||||
return t.String()
|
||||
}
|
||||
return "(" + t.String() + ")"
|
||||
}
|
||||
|
||||
// unmarshalInterface unmarshals a single XML element into val.
|
||||
// start is the opening tag of the element.
|
||||
func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
|
||||
// Record that decoder must stop at end tag corresponding to start.
|
||||
p.pushEOF()
|
||||
|
||||
p.unmarshalDepth++
|
||||
err := val.UnmarshalXML(p, *start)
|
||||
p.unmarshalDepth--
|
||||
if err != nil {
|
||||
p.popEOF()
|
||||
return err
|
||||
}
|
||||
|
||||
if !p.popEOF() {
|
||||
return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalTextInterface unmarshals a single XML element into val.
|
||||
// The chardata contained in the element (but not its children)
|
||||
// is passed to the text unmarshaler.
|
||||
func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
|
||||
var buf []byte
|
||||
depth := 1
|
||||
for depth > 0 {
|
||||
t, err := p.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch t := t.(type) {
|
||||
case CharData:
|
||||
if depth == 1 {
|
||||
buf = append(buf, t...)
|
||||
}
|
||||
case StartElement:
|
||||
depth++
|
||||
case EndElement:
|
||||
depth--
|
||||
}
|
||||
}
|
||||
return val.UnmarshalText(buf)
|
||||
}
|
||||
|
||||
// unmarshalAttr unmarshals a single XML attribute into val.
|
||||
func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
|
||||
if val.Kind() == reflect.Ptr {
|
||||
if val.IsNil() {
|
||||
val.Set(reflect.New(val.Type().Elem()))
|
||||
}
|
||||
val = val.Elem()
|
||||
}
|
||||
|
||||
if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
|
||||
// This is an unmarshaler with a non-pointer receiver,
|
||||
// so it's likely to be incorrect, but we do what we're told.
|
||||
return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
|
||||
}
|
||||
if val.CanAddr() {
|
||||
pv := val.Addr()
|
||||
if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
|
||||
return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
|
||||
}
|
||||
}
|
||||
|
||||
// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
|
||||
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
|
||||
// This is an unmarshaler with a non-pointer receiver,
|
||||
// so it's likely to be incorrect, but we do what we're told.
|
||||
return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
|
||||
}
|
||||
if val.CanAddr() {
|
||||
pv := val.Addr()
|
||||
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
|
||||
return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
|
||||
}
|
||||
}
|
||||
|
||||
copyValue(val, []byte(attr.Value))
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
|
||||
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Unmarshal a single XML element into val.
|
||||
func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
|
||||
// Find start element if we need it.
|
||||
if start == nil {
|
||||
for {
|
||||
tok, err := p.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t, ok := tok.(StartElement); ok {
|
||||
start = &t
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if val.Kind() == reflect.Interface && !val.IsNil() {
|
||||
e := val.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() {
|
||||
val = e
|
||||
}
|
||||
}
|
||||
|
||||
if val.Kind() == reflect.Ptr {
|
||||
if val.IsNil() {
|
||||
val.Set(reflect.New(val.Type().Elem()))
|
||||
}
|
||||
val = val.Elem()
|
||||
}
|
||||
|
||||
if val.CanInterface() && val.Type().Implements(unmarshalerType) {
|
||||
// This is an unmarshaler with a non-pointer receiver,
|
||||
// so it's likely to be incorrect, but we do what we're told.
|
||||
return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
|
||||
}
|
||||
|
||||
if val.CanAddr() {
|
||||
pv := val.Addr()
|
||||
if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
|
||||
return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
|
||||
}
|
||||
}
|
||||
|
||||
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
|
||||
return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
|
||||
}
|
||||
|
||||
if val.CanAddr() {
|
||||
pv := val.Addr()
|
||||
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
|
||||
return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
data []byte
|
||||
saveData reflect.Value
|
||||
comment []byte
|
||||
saveComment reflect.Value
|
||||
saveXML reflect.Value
|
||||
saveXMLIndex int
|
||||
saveXMLData []byte
|
||||
saveAny reflect.Value
|
||||
sv reflect.Value
|
||||
tinfo *typeInfo
|
||||
err error
|
||||
)
|
||||
|
||||
switch v := val; v.Kind() {
|
||||
default:
|
||||
return errors.New("unknown type " + v.Type().String())
|
||||
|
||||
case reflect.Interface:
|
||||
// TODO: For now, simply ignore the field. In the near
|
||||
// future we may choose to unmarshal the start
|
||||
// element on it, if not nil.
|
||||
return p.Skip()
|
||||
|
||||
case reflect.Slice:
|
||||
typ := v.Type()
|
||||
if typ.Elem().Kind() == reflect.Uint8 {
|
||||
// []byte
|
||||
saveData = v
|
||||
break
|
||||
}
|
||||
|
||||
// Slice of element values.
|
||||
// Grow slice.
|
||||
n := v.Len()
|
||||
if n >= v.Cap() {
|
||||
ncap := 2 * n
|
||||
if ncap < 4 {
|
||||
ncap = 4
|
||||
}
|
||||
new := reflect.MakeSlice(typ, n, ncap)
|
||||
reflect.Copy(new, v)
|
||||
v.Set(new)
|
||||
}
|
||||
v.SetLen(n + 1)
|
||||
|
||||
// Recur to read element into slice.
|
||||
if err := p.unmarshal(v.Index(n), start); err != nil {
|
||||
v.SetLen(n)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
|
||||
saveData = v
|
||||
|
||||
case reflect.Struct:
|
||||
typ := v.Type()
|
||||
if typ == nameType {
|
||||
v.Set(reflect.ValueOf(start.Name))
|
||||
break
|
||||
}
|
||||
|
||||
sv = v
|
||||
tinfo, err = getTypeInfo(typ)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate and assign element name.
|
||||
if tinfo.xmlname != nil {
|
||||
finfo := tinfo.xmlname
|
||||
if finfo.name != "" && finfo.name != start.Name.Local {
|
||||
return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
|
||||
}
|
||||
if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
|
||||
e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
|
||||
if start.Name.Space == "" {
|
||||
e += "no name space"
|
||||
} else {
|
||||
e += start.Name.Space
|
||||
}
|
||||
return UnmarshalError(e)
|
||||
}
|
||||
fv := finfo.value(sv)
|
||||
if _, ok := fv.Interface().(Name); ok {
|
||||
fv.Set(reflect.ValueOf(start.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// Assign attributes.
|
||||
// Also, determine whether we need to save character data or comments.
|
||||
for i := range tinfo.fields {
|
||||
finfo := &tinfo.fields[i]
|
||||
switch finfo.flags & fMode {
|
||||
case fAttr:
|
||||
strv := finfo.value(sv)
|
||||
// Look for attribute.
|
||||
for _, a := range start.Attr {
|
||||
if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
|
||||
if err := p.unmarshalAttr(strv, a); err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case fCharData:
|
||||
if !saveData.IsValid() {
|
||||
saveData = finfo.value(sv)
|
||||
}
|
||||
|
||||
case fComment:
|
||||
if !saveComment.IsValid() {
|
||||
saveComment = finfo.value(sv)
|
||||
}
|
||||
|
||||
case fAny, fAny | fElement:
|
||||
if !saveAny.IsValid() {
|
||||
saveAny = finfo.value(sv)
|
||||
}
|
||||
|
||||
case fInnerXml:
|
||||
if !saveXML.IsValid() {
|
||||
saveXML = finfo.value(sv)
|
||||
if p.saved == nil {
|
||||
saveXMLIndex = 0
|
||||
p.saved = new(bytes.Buffer)
|
||||
} else {
|
||||
saveXMLIndex = p.savedOffset()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find end element.
|
||||
// Process sub-elements along the way.
|
||||
Loop:
|
||||
for {
|
||||
var savedOffset int
|
||||
if saveXML.IsValid() {
|
||||
savedOffset = p.savedOffset()
|
||||
}
|
||||
tok, err := p.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch t := tok.(type) {
|
||||
case StartElement:
|
||||
consumed := false
|
||||
if sv.IsValid() {
|
||||
consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !consumed && saveAny.IsValid() {
|
||||
consumed = true
|
||||
if err := p.unmarshal(saveAny, &t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if !consumed {
|
||||
if err := p.Skip(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case EndElement:
|
||||
if saveXML.IsValid() {
|
||||
saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
|
||||
if saveXMLIndex == 0 {
|
||||
p.saved = nil
|
||||
}
|
||||
}
|
||||
break Loop
|
||||
|
||||
case CharData:
|
||||
if saveData.IsValid() {
|
||||
data = append(data, t...)
|
||||
}
|
||||
|
||||
case Comment:
|
||||
if saveComment.IsValid() {
|
||||
comment = append(comment, t...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
|
||||
if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
|
||||
return err
|
||||
}
|
||||
saveData = reflect.Value{}
|
||||
}
|
||||
|
||||
if saveData.IsValid() && saveData.CanAddr() {
|
||||
pv := saveData.Addr()
|
||||
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
|
||||
if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
|
||||
return err
|
||||
}
|
||||
saveData = reflect.Value{}
|
||||
}
|
||||
}
|
||||
|
||||
if err := copyValue(saveData, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch t := saveComment; t.Kind() {
|
||||
case reflect.String:
|
||||
t.SetString(string(comment))
|
||||
case reflect.Slice:
|
||||
t.Set(reflect.ValueOf(comment))
|
||||
}
|
||||
|
||||
switch t := saveXML; t.Kind() {
|
||||
case reflect.String:
|
||||
t.SetString(string(saveXMLData))
|
||||
case reflect.Slice:
|
||||
t.Set(reflect.ValueOf(saveXMLData))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyValue(dst reflect.Value, src []byte) (err error) {
|
||||
dst0 := dst
|
||||
|
||||
if dst.Kind() == reflect.Ptr {
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.New(dst.Type().Elem()))
|
||||
}
|
||||
dst = dst.Elem()
|
||||
}
|
||||
|
||||
// Save accumulated data.
|
||||
switch dst.Kind() {
|
||||
case reflect.Invalid:
|
||||
// Probably a comment.
|
||||
default:
|
||||
return errors.New("cannot unmarshal into " + dst0.Type().String())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dst.SetInt(itmp)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dst.SetUint(utmp)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dst.SetFloat(ftmp)
|
||||
case reflect.Bool:
|
||||
value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dst.SetBool(value)
|
||||
case reflect.String:
|
||||
dst.SetString(string(src))
|
||||
case reflect.Slice:
|
||||
if len(src) == 0 {
|
||||
// non-nil to flag presence
|
||||
src = []byte{}
|
||||
}
|
||||
dst.SetBytes(src)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalPath walks down an XML structure looking for wanted
|
||||
// paths, and calls unmarshal on them.
|
||||
// The consumed result tells whether XML elements have been consumed
|
||||
// from the Decoder until start's matching end element, or if it's
|
||||
// still untouched because start is uninteresting for sv's fields.
|
||||
func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
|
||||
recurse := false
|
||||
Loop:
|
||||
for i := range tinfo.fields {
|
||||
finfo := &tinfo.fields[i]
|
||||
if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
|
||||
continue
|
||||
}
|
||||
for j := range parents {
|
||||
if parents[j] != finfo.parents[j] {
|
||||
continue Loop
|
||||
}
|
||||
}
|
||||
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
|
||||
// It's a perfect match, unmarshal the field.
|
||||
return true, p.unmarshal(finfo.value(sv), start)
|
||||
}
|
||||
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
|
||||
// It's a prefix for the field. Break and recurse
|
||||
// since it's not ok for one field path to be itself
|
||||
// the prefix for another field path.
|
||||
recurse = true
|
||||
|
||||
// We can reuse the same slice as long as we
|
||||
// don't try to append to it.
|
||||
parents = finfo.parents[:len(parents)+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
if !recurse {
|
||||
// We have no business with this element.
|
||||
return false, nil
|
||||
}
|
||||
// The element is not a perfect match for any field, but one
|
||||
// or more fields have the path to this element as a parent
|
||||
// prefix. Recurse and attempt to match these.
|
||||
for {
|
||||
var tok Token
|
||||
tok, err = p.Token()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
switch t := tok.(type) {
|
||||
case StartElement:
|
||||
consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if !consumed2 {
|
||||
if err := p.Skip(); err != nil {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
case EndElement:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip reads tokens until it has consumed the end element
|
||||
// matching the most recent start element already consumed.
|
||||
// It recurs if it encounters a start element, so it can be used to
|
||||
// skip nested structures.
|
||||
// It returns nil if it finds an end element matching the start
|
||||
// element; otherwise it returns an error describing the problem.
|
||||
func (d *Decoder) Skip() error {
|
||||
for {
|
||||
tok, err := d.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.(type) {
|
||||
case StartElement:
|
||||
if err := d.Skip(); err != nil {
|
||||
return err
|
||||
}
|
||||
case EndElement:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
371
vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go
generated
vendored
Normal file
371
vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go
generated
vendored
Normal file
@ -0,0 +1,371 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// typeInfo holds details for the xml representation of a type.
|
||||
type typeInfo struct {
|
||||
xmlname *fieldInfo
|
||||
fields []fieldInfo
|
||||
}
|
||||
|
||||
// fieldInfo holds details for the xml representation of a single field.
|
||||
type fieldInfo struct {
|
||||
idx []int
|
||||
name string
|
||||
xmlns string
|
||||
flags fieldFlags
|
||||
parents []string
|
||||
}
|
||||
|
||||
type fieldFlags int
|
||||
|
||||
const (
|
||||
fElement fieldFlags = 1 << iota
|
||||
fAttr
|
||||
fCharData
|
||||
fInnerXml
|
||||
fComment
|
||||
fAny
|
||||
|
||||
fOmitEmpty
|
||||
|
||||
fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
|
||||
)
|
||||
|
||||
var tinfoMap = make(map[reflect.Type]*typeInfo)
|
||||
var tinfoLock sync.RWMutex
|
||||
|
||||
var nameType = reflect.TypeOf(Name{})
|
||||
|
||||
// getTypeInfo returns the typeInfo structure with details necessary
|
||||
// for marshalling and unmarshalling typ.
|
||||
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
|
||||
tinfoLock.RLock()
|
||||
tinfo, ok := tinfoMap[typ]
|
||||
tinfoLock.RUnlock()
|
||||
if ok {
|
||||
return tinfo, nil
|
||||
}
|
||||
tinfo = &typeInfo{}
|
||||
if typ.Kind() == reflect.Struct && typ != nameType {
|
||||
n := typ.NumField()
|
||||
for i := 0; i < n; i++ {
|
||||
f := typ.Field(i)
|
||||
if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
// For embedded structs, embed its fields.
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() == reflect.Struct {
|
||||
inner, err := getTypeInfo(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tinfo.xmlname == nil {
|
||||
tinfo.xmlname = inner.xmlname
|
||||
}
|
||||
for _, finfo := range inner.fields {
|
||||
finfo.idx = append([]int{i}, finfo.idx...)
|
||||
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
finfo, err := structFieldInfo(typ, &f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f.Name == "XMLName" {
|
||||
tinfo.xmlname = finfo
|
||||
continue
|
||||
}
|
||||
|
||||
// Add the field if it doesn't conflict with other fields.
|
||||
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
tinfoLock.Lock()
|
||||
tinfoMap[typ] = tinfo
|
||||
tinfoLock.Unlock()
|
||||
return tinfo, nil
|
||||
}
|
||||
|
||||
// structFieldInfo builds and returns a fieldInfo for f.
|
||||
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
|
||||
finfo := &fieldInfo{idx: f.Index}
|
||||
|
||||
// Split the tag from the xml namespace if necessary.
|
||||
tag := f.Tag.Get("xml")
|
||||
if i := strings.Index(tag, " "); i >= 0 {
|
||||
finfo.xmlns, tag = tag[:i], tag[i+1:]
|
||||
}
|
||||
|
||||
// Parse flags.
|
||||
tokens := strings.Split(tag, ",")
|
||||
if len(tokens) == 1 {
|
||||
finfo.flags = fElement
|
||||
} else {
|
||||
tag = tokens[0]
|
||||
for _, flag := range tokens[1:] {
|
||||
switch flag {
|
||||
case "attr":
|
||||
finfo.flags |= fAttr
|
||||
case "chardata":
|
||||
finfo.flags |= fCharData
|
||||
case "innerxml":
|
||||
finfo.flags |= fInnerXml
|
||||
case "comment":
|
||||
finfo.flags |= fComment
|
||||
case "any":
|
||||
finfo.flags |= fAny
|
||||
case "omitempty":
|
||||
finfo.flags |= fOmitEmpty
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the flags used.
|
||||
valid := true
|
||||
switch mode := finfo.flags & fMode; mode {
|
||||
case 0:
|
||||
finfo.flags |= fElement
|
||||
case fAttr, fCharData, fInnerXml, fComment, fAny:
|
||||
if f.Name == "XMLName" || tag != "" && mode != fAttr {
|
||||
valid = false
|
||||
}
|
||||
default:
|
||||
// This will also catch multiple modes in a single field.
|
||||
valid = false
|
||||
}
|
||||
if finfo.flags&fMode == fAny {
|
||||
finfo.flags |= fElement
|
||||
}
|
||||
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
|
||||
valid = false
|
||||
}
|
||||
if !valid {
|
||||
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
|
||||
f.Name, typ, f.Tag.Get("xml"))
|
||||
}
|
||||
}
|
||||
|
||||
// Use of xmlns without a name is not allowed.
|
||||
if finfo.xmlns != "" && tag == "" {
|
||||
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
|
||||
f.Name, typ, f.Tag.Get("xml"))
|
||||
}
|
||||
|
||||
if f.Name == "XMLName" {
|
||||
// The XMLName field records the XML element name. Don't
|
||||
// process it as usual because its name should default to
|
||||
// empty rather than to the field name.
|
||||
finfo.name = tag
|
||||
return finfo, nil
|
||||
}
|
||||
|
||||
if tag == "" {
|
||||
// If the name part of the tag is completely empty, get
|
||||
// default from XMLName of underlying struct if feasible,
|
||||
// or field name otherwise.
|
||||
if xmlname := lookupXMLName(f.Type); xmlname != nil {
|
||||
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
|
||||
} else {
|
||||
finfo.name = f.Name
|
||||
}
|
||||
return finfo, nil
|
||||
}
|
||||
|
||||
if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
|
||||
// If it's an element no namespace specified, get the default
|
||||
// from the XMLName of enclosing struct if possible.
|
||||
if xmlname := lookupXMLName(typ); xmlname != nil {
|
||||
finfo.xmlns = xmlname.xmlns
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare field name and parents.
|
||||
parents := strings.Split(tag, ">")
|
||||
if parents[0] == "" {
|
||||
parents[0] = f.Name
|
||||
}
|
||||
if parents[len(parents)-1] == "" {
|
||||
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
|
||||
}
|
||||
finfo.name = parents[len(parents)-1]
|
||||
if len(parents) > 1 {
|
||||
if (finfo.flags & fElement) == 0 {
|
||||
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
|
||||
}
|
||||
finfo.parents = parents[:len(parents)-1]
|
||||
}
|
||||
|
||||
// If the field type has an XMLName field, the names must match
|
||||
// so that the behavior of both marshalling and unmarshalling
|
||||
// is straightforward and unambiguous.
|
||||
if finfo.flags&fElement != 0 {
|
||||
ftyp := f.Type
|
||||
xmlname := lookupXMLName(ftyp)
|
||||
if xmlname != nil && xmlname.name != finfo.name {
|
||||
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
|
||||
finfo.name, typ, f.Name, xmlname.name, ftyp)
|
||||
}
|
||||
}
|
||||
return finfo, nil
|
||||
}
|
||||
|
||||
// lookupXMLName returns the fieldInfo for typ's XMLName field
|
||||
// in case it exists and has a valid xml field tag, otherwise
|
||||
// it returns nil.
|
||||
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
|
||||
for typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
}
|
||||
if typ.Kind() != reflect.Struct {
|
||||
return nil
|
||||
}
|
||||
for i, n := 0, typ.NumField(); i < n; i++ {
|
||||
f := typ.Field(i)
|
||||
if f.Name != "XMLName" {
|
||||
continue
|
||||
}
|
||||
finfo, err := structFieldInfo(typ, &f)
|
||||
if finfo.name != "" && err == nil {
|
||||
return finfo
|
||||
}
|
||||
// Also consider errors as a non-existent field tag
|
||||
// and let getTypeInfo itself report the error.
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a <= b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// addFieldInfo adds finfo to tinfo.fields if there are no
|
||||
// conflicts, or if conflicts arise from previous fields that were
|
||||
// obtained from deeper embedded structures than finfo. In the latter
|
||||
// case, the conflicting entries are dropped.
|
||||
// A conflict occurs when the path (parent + name) to a field is
|
||||
// itself a prefix of another path, or when two paths match exactly.
|
||||
// It is okay for field paths to share a common, shorter prefix.
|
||||
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
|
||||
var conflicts []int
|
||||
Loop:
|
||||
// First, figure all conflicts. Most working code will have none.
|
||||
for i := range tinfo.fields {
|
||||
oldf := &tinfo.fields[i]
|
||||
if oldf.flags&fMode != newf.flags&fMode {
|
||||
continue
|
||||
}
|
||||
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
|
||||
continue
|
||||
}
|
||||
minl := min(len(newf.parents), len(oldf.parents))
|
||||
for p := 0; p < minl; p++ {
|
||||
if oldf.parents[p] != newf.parents[p] {
|
||||
continue Loop
|
||||
}
|
||||
}
|
||||
if len(oldf.parents) > len(newf.parents) {
|
||||
if oldf.parents[len(newf.parents)] == newf.name {
|
||||
conflicts = append(conflicts, i)
|
||||
}
|
||||
} else if len(oldf.parents) < len(newf.parents) {
|
||||
if newf.parents[len(oldf.parents)] == oldf.name {
|
||||
conflicts = append(conflicts, i)
|
||||
}
|
||||
} else {
|
||||
if newf.name == oldf.name {
|
||||
conflicts = append(conflicts, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Without conflicts, add the new field and return.
|
||||
if conflicts == nil {
|
||||
tinfo.fields = append(tinfo.fields, *newf)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any conflict is shallower, ignore the new field.
|
||||
// This matches the Go field resolution on embedding.
|
||||
for _, i := range conflicts {
|
||||
if len(tinfo.fields[i].idx) < len(newf.idx) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, if any of them is at the same depth level, it's an error.
|
||||
for _, i := range conflicts {
|
||||
oldf := &tinfo.fields[i]
|
||||
if len(oldf.idx) == len(newf.idx) {
|
||||
f1 := typ.FieldByIndex(oldf.idx)
|
||||
f2 := typ.FieldByIndex(newf.idx)
|
||||
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, the new field is shallower, and thus takes precedence,
|
||||
// so drop the conflicting fields from tinfo and append the new one.
|
||||
for c := len(conflicts) - 1; c >= 0; c-- {
|
||||
i := conflicts[c]
|
||||
copy(tinfo.fields[i:], tinfo.fields[i+1:])
|
||||
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
|
||||
}
|
||||
tinfo.fields = append(tinfo.fields, *newf)
|
||||
return nil
|
||||
}
|
||||
|
||||
// A TagPathError represents an error in the unmarshalling process
|
||||
// caused by the use of field tags with conflicting paths.
|
||||
type TagPathError struct {
|
||||
Struct reflect.Type
|
||||
Field1, Tag1 string
|
||||
Field2, Tag2 string
|
||||
}
|
||||
|
||||
func (e *TagPathError) Error() string {
|
||||
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
|
||||
}
|
||||
|
||||
// value returns v's field value corresponding to finfo.
|
||||
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
|
||||
// and dereferences pointers as necessary.
|
||||
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
|
||||
for i, x := range finfo.idx {
|
||||
if i > 0 {
|
||||
t := v.Type()
|
||||
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
v = v.Field(x)
|
||||
}
|
||||
return v
|
||||
}
|
1998
vendor/golang.org/x/net/webdav/internal/xml/xml.go
generated
vendored
Normal file
1998
vendor/golang.org/x/net/webdav/internal/xml/xml.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
445
vendor/golang.org/x/net/webdav/lock.go
generated
vendored
Normal file
445
vendor/golang.org/x/net/webdav/lock.go
generated
vendored
Normal file
@ -0,0 +1,445 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrConfirmationFailed is returned by a LockSystem's Confirm method.
|
||||
ErrConfirmationFailed = errors.New("webdav: confirmation failed")
|
||||
// ErrForbidden is returned by a LockSystem's Unlock method.
|
||||
ErrForbidden = errors.New("webdav: forbidden")
|
||||
// ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
|
||||
ErrLocked = errors.New("webdav: locked")
|
||||
// ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
|
||||
ErrNoSuchLock = errors.New("webdav: no such lock")
|
||||
)
|
||||
|
||||
// Condition can match a WebDAV resource, based on a token or ETag.
|
||||
// Exactly one of Token and ETag should be non-empty.
|
||||
type Condition struct {
|
||||
Not bool
|
||||
Token string
|
||||
ETag string
|
||||
}
|
||||
|
||||
// LockSystem manages access to a collection of named resources. The elements
|
||||
// in a lock name are separated by slash ('/', U+002F) characters, regardless
|
||||
// of host operating system convention.
|
||||
type LockSystem interface {
|
||||
// Confirm confirms that the caller can claim all of the locks specified by
|
||||
// the given conditions, and that holding the union of all of those locks
|
||||
// gives exclusive access to all of the named resources. Up to two resources
|
||||
// can be named. Empty names are ignored.
|
||||
//
|
||||
// Exactly one of release and err will be non-nil. If release is non-nil,
|
||||
// all of the requested locks are held until release is called. Calling
|
||||
// release does not unlock the lock, in the WebDAV UNLOCK sense, but once
|
||||
// Confirm has confirmed that a lock claim is valid, that lock cannot be
|
||||
// Confirmed again until it has been released.
|
||||
//
|
||||
// If Confirm returns ErrConfirmationFailed then the Handler will continue
|
||||
// to try any other set of locks presented (a WebDAV HTTP request can
|
||||
// present more than one set of locks). If it returns any other non-nil
|
||||
// error, the Handler will write a "500 Internal Server Error" HTTP status.
|
||||
Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
|
||||
|
||||
// Create creates a lock with the given depth, duration, owner and root
|
||||
// (name). The depth will either be negative (meaning infinite) or zero.
|
||||
//
|
||||
// If Create returns ErrLocked then the Handler will write a "423 Locked"
|
||||
// HTTP status. If it returns any other non-nil error, the Handler will
|
||||
// write a "500 Internal Server Error" HTTP status.
|
||||
//
|
||||
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
|
||||
// when to use each error.
|
||||
//
|
||||
// The token returned identifies the created lock. It should be an absolute
|
||||
// URI as defined by RFC 3986, Section 4.3. In particular, it should not
|
||||
// contain whitespace.
|
||||
Create(now time.Time, details LockDetails) (token string, err error)
|
||||
|
||||
// Refresh refreshes the lock with the given token.
|
||||
//
|
||||
// If Refresh returns ErrLocked then the Handler will write a "423 Locked"
|
||||
// HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
|
||||
// a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
|
||||
// error, the Handler will write a "500 Internal Server Error" HTTP status.
|
||||
//
|
||||
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
|
||||
// when to use each error.
|
||||
Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
|
||||
|
||||
// Unlock unlocks the lock with the given token.
|
||||
//
|
||||
// If Unlock returns ErrForbidden then the Handler will write a "403
|
||||
// Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
|
||||
// will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
|
||||
// then the Handler will write a "409 Conflict" HTTP Status. If it returns
|
||||
// any other non-nil error, the Handler will write a "500 Internal Server
|
||||
// Error" HTTP status.
|
||||
//
|
||||
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
|
||||
// when to use each error.
|
||||
Unlock(now time.Time, token string) error
|
||||
}
|
||||
|
||||
// LockDetails are a lock's metadata.
|
||||
type LockDetails struct {
|
||||
// Root is the root resource name being locked. For a zero-depth lock, the
|
||||
// root is the only resource being locked.
|
||||
Root string
|
||||
// Duration is the lock timeout. A negative duration means infinite.
|
||||
Duration time.Duration
|
||||
// OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
|
||||
//
|
||||
// TODO: does the "verbatim" nature play well with XML namespaces?
|
||||
// Does the OwnerXML field need to have more structure? See
|
||||
// https://codereview.appspot.com/175140043/#msg2
|
||||
OwnerXML string
|
||||
// ZeroDepth is whether the lock has zero depth. If it does not have zero
|
||||
// depth, it has infinite depth.
|
||||
ZeroDepth bool
|
||||
}
|
||||
|
||||
// NewMemLS returns a new in-memory LockSystem.
|
||||
func NewMemLS() LockSystem {
|
||||
return &memLS{
|
||||
byName: make(map[string]*memLSNode),
|
||||
byToken: make(map[string]*memLSNode),
|
||||
gen: uint64(time.Now().Unix()),
|
||||
}
|
||||
}
|
||||
|
||||
type memLS struct {
|
||||
mu sync.Mutex
|
||||
byName map[string]*memLSNode
|
||||
byToken map[string]*memLSNode
|
||||
gen uint64
|
||||
// byExpiry only contains those nodes whose LockDetails have a finite
|
||||
// Duration and are yet to expire.
|
||||
byExpiry byExpiry
|
||||
}
|
||||
|
||||
func (m *memLS) nextToken() string {
|
||||
m.gen++
|
||||
return strconv.FormatUint(m.gen, 10)
|
||||
}
|
||||
|
||||
func (m *memLS) collectExpiredNodes(now time.Time) {
|
||||
for len(m.byExpiry) > 0 {
|
||||
if now.Before(m.byExpiry[0].expiry) {
|
||||
break
|
||||
}
|
||||
m.remove(m.byExpiry[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.collectExpiredNodes(now)
|
||||
|
||||
var n0, n1 *memLSNode
|
||||
if name0 != "" {
|
||||
if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
|
||||
return nil, ErrConfirmationFailed
|
||||
}
|
||||
}
|
||||
if name1 != "" {
|
||||
if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
|
||||
return nil, ErrConfirmationFailed
|
||||
}
|
||||
}
|
||||
|
||||
// Don't hold the same node twice.
|
||||
if n1 == n0 {
|
||||
n1 = nil
|
||||
}
|
||||
|
||||
if n0 != nil {
|
||||
m.hold(n0)
|
||||
}
|
||||
if n1 != nil {
|
||||
m.hold(n1)
|
||||
}
|
||||
return func() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if n1 != nil {
|
||||
m.unhold(n1)
|
||||
}
|
||||
if n0 != nil {
|
||||
m.unhold(n0)
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// lookup returns the node n that locks the named resource, provided that n
|
||||
// matches at least one of the given conditions and that lock isn't held by
|
||||
// another party. Otherwise, it returns nil.
|
||||
//
|
||||
// n may be a parent of the named resource, if n is an infinite depth lock.
|
||||
func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
|
||||
// TODO: support Condition.Not and Condition.ETag.
|
||||
for _, c := range conditions {
|
||||
n = m.byToken[c.Token]
|
||||
if n == nil || n.held {
|
||||
continue
|
||||
}
|
||||
if name == n.details.Root {
|
||||
return n
|
||||
}
|
||||
if n.details.ZeroDepth {
|
||||
continue
|
||||
}
|
||||
if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
|
||||
return n
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memLS) hold(n *memLSNode) {
|
||||
if n.held {
|
||||
panic("webdav: memLS inconsistent held state")
|
||||
}
|
||||
n.held = true
|
||||
if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
|
||||
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *memLS) unhold(n *memLSNode) {
|
||||
if !n.held {
|
||||
panic("webdav: memLS inconsistent held state")
|
||||
}
|
||||
n.held = false
|
||||
if n.details.Duration >= 0 {
|
||||
heap.Push(&m.byExpiry, n)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.collectExpiredNodes(now)
|
||||
details.Root = slashClean(details.Root)
|
||||
|
||||
if !m.canCreate(details.Root, details.ZeroDepth) {
|
||||
return "", ErrLocked
|
||||
}
|
||||
n := m.create(details.Root)
|
||||
n.token = m.nextToken()
|
||||
m.byToken[n.token] = n
|
||||
n.details = details
|
||||
if n.details.Duration >= 0 {
|
||||
n.expiry = now.Add(n.details.Duration)
|
||||
heap.Push(&m.byExpiry, n)
|
||||
}
|
||||
return n.token, nil
|
||||
}
|
||||
|
||||
func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.collectExpiredNodes(now)
|
||||
|
||||
n := m.byToken[token]
|
||||
if n == nil {
|
||||
return LockDetails{}, ErrNoSuchLock
|
||||
}
|
||||
if n.held {
|
||||
return LockDetails{}, ErrLocked
|
||||
}
|
||||
if n.byExpiryIndex >= 0 {
|
||||
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||
}
|
||||
n.details.Duration = duration
|
||||
if n.details.Duration >= 0 {
|
||||
n.expiry = now.Add(n.details.Duration)
|
||||
heap.Push(&m.byExpiry, n)
|
||||
}
|
||||
return n.details, nil
|
||||
}
|
||||
|
||||
func (m *memLS) Unlock(now time.Time, token string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.collectExpiredNodes(now)
|
||||
|
||||
n := m.byToken[token]
|
||||
if n == nil {
|
||||
return ErrNoSuchLock
|
||||
}
|
||||
if n.held {
|
||||
return ErrLocked
|
||||
}
|
||||
m.remove(n)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memLS) canCreate(name string, zeroDepth bool) bool {
|
||||
return walkToRoot(name, func(name0 string, first bool) bool {
|
||||
n := m.byName[name0]
|
||||
if n == nil {
|
||||
return true
|
||||
}
|
||||
if first {
|
||||
if n.token != "" {
|
||||
// The target node is already locked.
|
||||
return false
|
||||
}
|
||||
if !zeroDepth {
|
||||
// The requested lock depth is infinite, and the fact that n exists
|
||||
// (n != nil) means that a descendent of the target node is locked.
|
||||
return false
|
||||
}
|
||||
} else if n.token != "" && !n.details.ZeroDepth {
|
||||
// An ancestor of the target node is locked with infinite depth.
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (m *memLS) create(name string) (ret *memLSNode) {
|
||||
walkToRoot(name, func(name0 string, first bool) bool {
|
||||
n := m.byName[name0]
|
||||
if n == nil {
|
||||
n = &memLSNode{
|
||||
details: LockDetails{
|
||||
Root: name0,
|
||||
},
|
||||
byExpiryIndex: -1,
|
||||
}
|
||||
m.byName[name0] = n
|
||||
}
|
||||
n.refCount++
|
||||
if first {
|
||||
ret = n
|
||||
}
|
||||
return true
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m *memLS) remove(n *memLSNode) {
|
||||
delete(m.byToken, n.token)
|
||||
n.token = ""
|
||||
walkToRoot(n.details.Root, func(name0 string, first bool) bool {
|
||||
x := m.byName[name0]
|
||||
x.refCount--
|
||||
if x.refCount == 0 {
|
||||
delete(m.byName, name0)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if n.byExpiryIndex >= 0 {
|
||||
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
|
||||
for first := true; ; first = false {
|
||||
if !f(name, first) {
|
||||
return false
|
||||
}
|
||||
if name == "/" {
|
||||
break
|
||||
}
|
||||
name = name[:strings.LastIndex(name, "/")]
|
||||
if name == "" {
|
||||
name = "/"
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type memLSNode struct {
|
||||
// details are the lock metadata. Even if this node's name is not explicitly locked,
|
||||
// details.Root will still equal the node's name.
|
||||
details LockDetails
|
||||
// token is the unique identifier for this node's lock. An empty token means that
|
||||
// this node is not explicitly locked.
|
||||
token string
|
||||
// refCount is the number of self-or-descendent nodes that are explicitly locked.
|
||||
refCount int
|
||||
// expiry is when this node's lock expires.
|
||||
expiry time.Time
|
||||
// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
|
||||
// if this node does not expire, or has expired.
|
||||
byExpiryIndex int
|
||||
// held is whether this node's lock is actively held by a Confirm call.
|
||||
held bool
|
||||
}
|
||||
|
||||
type byExpiry []*memLSNode
|
||||
|
||||
func (b *byExpiry) Len() int {
|
||||
return len(*b)
|
||||
}
|
||||
|
||||
func (b *byExpiry) Less(i, j int) bool {
|
||||
return (*b)[i].expiry.Before((*b)[j].expiry)
|
||||
}
|
||||
|
||||
func (b *byExpiry) Swap(i, j int) {
|
||||
(*b)[i], (*b)[j] = (*b)[j], (*b)[i]
|
||||
(*b)[i].byExpiryIndex = i
|
||||
(*b)[j].byExpiryIndex = j
|
||||
}
|
||||
|
||||
func (b *byExpiry) Push(x interface{}) {
|
||||
n := x.(*memLSNode)
|
||||
n.byExpiryIndex = len(*b)
|
||||
*b = append(*b, n)
|
||||
}
|
||||
|
||||
func (b *byExpiry) Pop() interface{} {
|
||||
i := len(*b) - 1
|
||||
n := (*b)[i]
|
||||
(*b)[i] = nil
|
||||
n.byExpiryIndex = -1
|
||||
*b = (*b)[:i]
|
||||
return n
|
||||
}
|
||||
|
||||
const infiniteTimeout = -1
|
||||
|
||||
// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
|
||||
// empty, an infiniteTimeout is returned.
|
||||
func parseTimeout(s string) (time.Duration, error) {
|
||||
if s == "" {
|
||||
return infiniteTimeout, nil
|
||||
}
|
||||
if i := strings.IndexByte(s, ','); i >= 0 {
|
||||
s = s[:i]
|
||||
}
|
||||
s = strings.TrimSpace(s)
|
||||
if s == "Infinite" {
|
||||
return infiniteTimeout, nil
|
||||
}
|
||||
const pre = "Second-"
|
||||
if !strings.HasPrefix(s, pre) {
|
||||
return 0, errInvalidTimeout
|
||||
}
|
||||
s = s[len(pre):]
|
||||
if s == "" || s[0] < '0' || '9' < s[0] {
|
||||
return 0, errInvalidTimeout
|
||||
}
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil || 1<<32-1 < n {
|
||||
return 0, errInvalidTimeout
|
||||
}
|
||||
return time.Duration(n) * time.Second, nil
|
||||
}
|
469
vendor/golang.org/x/net/webdav/prop.go
generated
vendored
Normal file
469
vendor/golang.org/x/net/webdav/prop.go
generated
vendored
Normal file
@ -0,0 +1,469 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Proppatch describes a property update instruction as defined in RFC 4918.
|
||||
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
|
||||
type Proppatch struct {
|
||||
// Remove specifies whether this patch removes properties. If it does not
|
||||
// remove them, it sets them.
|
||||
Remove bool
|
||||
// Props contains the properties to be set or removed.
|
||||
Props []Property
|
||||
}
|
||||
|
||||
// Propstat describes a XML propstat element as defined in RFC 4918.
|
||||
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
|
||||
type Propstat struct {
|
||||
// Props contains the properties for which Status applies.
|
||||
Props []Property
|
||||
|
||||
// Status defines the HTTP status code of the properties in Prop.
|
||||
// Allowed values include, but are not limited to the WebDAV status
|
||||
// code extensions for HTTP/1.1.
|
||||
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
|
||||
Status int
|
||||
|
||||
// XMLError contains the XML representation of the optional error element.
|
||||
// XML content within this field must not rely on any predefined
|
||||
// namespace declarations or prefixes. If empty, the XML error element
|
||||
// is omitted.
|
||||
XMLError string
|
||||
|
||||
// ResponseDescription contains the contents of the optional
|
||||
// responsedescription field. If empty, the XML element is omitted.
|
||||
ResponseDescription string
|
||||
}
|
||||
|
||||
// makePropstats returns a slice containing those of x and y whose Props slice
|
||||
// is non-empty. If both are empty, it returns a slice containing an otherwise
|
||||
// zero Propstat whose HTTP status code is 200 OK.
|
||||
func makePropstats(x, y Propstat) []Propstat {
|
||||
pstats := make([]Propstat, 0, 2)
|
||||
if len(x.Props) != 0 {
|
||||
pstats = append(pstats, x)
|
||||
}
|
||||
if len(y.Props) != 0 {
|
||||
pstats = append(pstats, y)
|
||||
}
|
||||
if len(pstats) == 0 {
|
||||
pstats = append(pstats, Propstat{
|
||||
Status: http.StatusOK,
|
||||
})
|
||||
}
|
||||
return pstats
|
||||
}
|
||||
|
||||
// DeadPropsHolder holds the dead properties of a resource.
|
||||
//
|
||||
// Dead properties are those properties that are explicitly defined. In
|
||||
// comparison, live properties, such as DAV:getcontentlength, are implicitly
|
||||
// defined by the underlying resource, and cannot be explicitly overridden or
|
||||
// removed. See the Terminology section of
|
||||
// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
|
||||
//
|
||||
// There is a whitelist of the names of live properties. This package handles
|
||||
// all live properties, and will only pass non-whitelisted names to the Patch
|
||||
// method of DeadPropsHolder implementations.
|
||||
type DeadPropsHolder interface {
|
||||
// DeadProps returns a copy of the dead properties held.
|
||||
DeadProps() (map[xml.Name]Property, error)
|
||||
|
||||
// Patch patches the dead properties held.
|
||||
//
|
||||
// Patching is atomic; either all or no patches succeed. It returns (nil,
|
||||
// non-nil) if an internal server error occurred, otherwise the Propstats
|
||||
// collectively contain one Property for each proposed patch Property. If
|
||||
// all patches succeed, Patch returns a slice of length one and a Propstat
|
||||
// element with a 200 OK HTTP status code. If none succeed, for reasons
|
||||
// other than an internal server error, no Propstat has status 200 OK.
|
||||
//
|
||||
// For more details on when various HTTP status codes apply, see
|
||||
// http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
|
||||
Patch([]Proppatch) ([]Propstat, error)
|
||||
}
|
||||
|
||||
// liveProps contains all supported, protected DAV: properties.
|
||||
var liveProps = map[xml.Name]struct {
|
||||
// findFn implements the propfind function of this property. If nil,
|
||||
// it indicates a hidden property.
|
||||
findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
|
||||
// dir is true if the property applies to directories.
|
||||
dir bool
|
||||
}{
|
||||
{Space: "DAV:", Local: "resourcetype"}: {
|
||||
findFn: findResourceType,
|
||||
dir: true,
|
||||
},
|
||||
{Space: "DAV:", Local: "displayname"}: {
|
||||
findFn: findDisplayName,
|
||||
dir: true,
|
||||
},
|
||||
{Space: "DAV:", Local: "getcontentlength"}: {
|
||||
findFn: findContentLength,
|
||||
dir: false,
|
||||
},
|
||||
{Space: "DAV:", Local: "getlastmodified"}: {
|
||||
findFn: findLastModified,
|
||||
// http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
|
||||
// suggests that getlastmodified should only apply to GETable
|
||||
// resources, and this package does not support GET on directories.
|
||||
//
|
||||
// Nonetheless, some WebDAV clients expect child directories to be
|
||||
// sortable by getlastmodified date, so this value is true, not false.
|
||||
// See golang.org/issue/15334.
|
||||
dir: true,
|
||||
},
|
||||
{Space: "DAV:", Local: "creationdate"}: {
|
||||
findFn: nil,
|
||||
dir: false,
|
||||
},
|
||||
{Space: "DAV:", Local: "getcontentlanguage"}: {
|
||||
findFn: nil,
|
||||
dir: false,
|
||||
},
|
||||
{Space: "DAV:", Local: "getcontenttype"}: {
|
||||
findFn: findContentType,
|
||||
dir: false,
|
||||
},
|
||||
{Space: "DAV:", Local: "getetag"}: {
|
||||
findFn: findETag,
|
||||
// findETag implements ETag as the concatenated hex values of a file's
|
||||
// modification time and size. This is not a reliable synchronization
|
||||
// mechanism for directories, so we do not advertise getetag for DAV
|
||||
// collections.
|
||||
dir: false,
|
||||
},
|
||||
|
||||
// TODO: The lockdiscovery property requires LockSystem to list the
|
||||
// active locks on a resource.
|
||||
{Space: "DAV:", Local: "lockdiscovery"}: {},
|
||||
{Space: "DAV:", Local: "supportedlock"}: {
|
||||
findFn: findSupportedLock,
|
||||
dir: true,
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(nigeltao) merge props and allprop?
|
||||
|
||||
// Props returns the status of the properties named pnames for resource name.
|
||||
//
|
||||
// Each Propstat has a unique status and each property name will only be part
|
||||
// of one Propstat element.
|
||||
func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
|
||||
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isDir := fi.IsDir()
|
||||
|
||||
var deadProps map[xml.Name]Property
|
||||
if dph, ok := f.(DeadPropsHolder); ok {
|
||||
deadProps, err = dph.DeadProps()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pstatOK := Propstat{Status: http.StatusOK}
|
||||
pstatNotFound := Propstat{Status: http.StatusNotFound}
|
||||
for _, pn := range pnames {
|
||||
// If this file has dead properties, check if they contain pn.
|
||||
if dp, ok := deadProps[pn]; ok {
|
||||
pstatOK.Props = append(pstatOK.Props, dp)
|
||||
continue
|
||||
}
|
||||
// Otherwise, it must either be a live property or we don't know it.
|
||||
if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
|
||||
innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pstatOK.Props = append(pstatOK.Props, Property{
|
||||
XMLName: pn,
|
||||
InnerXML: []byte(innerXML),
|
||||
})
|
||||
} else {
|
||||
pstatNotFound.Props = append(pstatNotFound.Props, Property{
|
||||
XMLName: pn,
|
||||
})
|
||||
}
|
||||
}
|
||||
return makePropstats(pstatOK, pstatNotFound), nil
|
||||
}
|
||||
|
||||
// Propnames returns the property names defined for resource name.
|
||||
func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
|
||||
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
isDir := fi.IsDir()
|
||||
|
||||
var deadProps map[xml.Name]Property
|
||||
if dph, ok := f.(DeadPropsHolder); ok {
|
||||
deadProps, err = dph.DeadProps()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
|
||||
for pn, prop := range liveProps {
|
||||
if prop.findFn != nil && (prop.dir || !isDir) {
|
||||
pnames = append(pnames, pn)
|
||||
}
|
||||
}
|
||||
for pn := range deadProps {
|
||||
pnames = append(pnames, pn)
|
||||
}
|
||||
return pnames, nil
|
||||
}
|
||||
|
||||
// Allprop returns the properties defined for resource name and the properties
|
||||
// named in include.
|
||||
//
|
||||
// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
|
||||
// within the RFC plus dead properties. Other live properties should only be
|
||||
// returned if they are named in 'include'.
|
||||
//
|
||||
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
|
||||
func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
|
||||
pnames, err := propnames(ctx, fs, ls, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add names from include if they are not already covered in pnames.
|
||||
nameset := make(map[xml.Name]bool)
|
||||
for _, pn := range pnames {
|
||||
nameset[pn] = true
|
||||
}
|
||||
for _, pn := range include {
|
||||
if !nameset[pn] {
|
||||
pnames = append(pnames, pn)
|
||||
}
|
||||
}
|
||||
return props(ctx, fs, ls, name, pnames)
|
||||
}
|
||||
|
||||
// Patch patches the properties of resource name. The return values are
|
||||
// constrained in the same manner as DeadPropsHolder.Patch.
|
||||
func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
|
||||
conflict := false
|
||||
loop:
|
||||
for _, patch := range patches {
|
||||
for _, p := range patch.Props {
|
||||
if _, ok := liveProps[p.XMLName]; ok {
|
||||
conflict = true
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
if conflict {
|
||||
pstatForbidden := Propstat{
|
||||
Status: http.StatusForbidden,
|
||||
XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
|
||||
}
|
||||
pstatFailedDep := Propstat{
|
||||
Status: StatusFailedDependency,
|
||||
}
|
||||
for _, patch := range patches {
|
||||
for _, p := range patch.Props {
|
||||
if _, ok := liveProps[p.XMLName]; ok {
|
||||
pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
|
||||
} else {
|
||||
pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
|
||||
}
|
||||
}
|
||||
}
|
||||
return makePropstats(pstatForbidden, pstatFailedDep), nil
|
||||
}
|
||||
|
||||
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
if dph, ok := f.(DeadPropsHolder); ok {
|
||||
ret, err := dph.Patch(patches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
|
||||
// "The contents of the prop XML element must only list the names of
|
||||
// properties to which the result in the status element applies."
|
||||
for _, pstat := range ret {
|
||||
for i, p := range pstat.Props {
|
||||
pstat.Props[i] = Property{XMLName: p.XMLName}
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
// The file doesn't implement the optional DeadPropsHolder interface, so
|
||||
// all patches are forbidden.
|
||||
pstat := Propstat{Status: http.StatusForbidden}
|
||||
for _, patch := range patches {
|
||||
for _, p := range patch.Props {
|
||||
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
|
||||
}
|
||||
}
|
||||
return []Propstat{pstat}, nil
|
||||
}
|
||||
|
||||
func escapeXML(s string) string {
|
||||
for i := 0; i < len(s); i++ {
|
||||
// As an optimization, if s contains only ASCII letters, digits or a
|
||||
// few special characters, the escaped value is s itself and we don't
|
||||
// need to allocate a buffer and convert between string and []byte.
|
||||
switch c := s[i]; {
|
||||
case c == ' ' || c == '_' ||
|
||||
('+' <= c && c <= '9') || // Digits as well as + , - . and /
|
||||
('A' <= c && c <= 'Z') ||
|
||||
('a' <= c && c <= 'z'):
|
||||
continue
|
||||
}
|
||||
// Otherwise, go through the full escaping process.
|
||||
var buf bytes.Buffer
|
||||
xml.EscapeText(&buf, []byte(s))
|
||||
return buf.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||
if fi.IsDir() {
|
||||
return `<D:collection xmlns:D="DAV:"/>`, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||
if slashClean(name) == "/" {
|
||||
// Hide the real name of a possibly prefixed root directory.
|
||||
return "", nil
|
||||
}
|
||||
return escapeXML(fi.Name()), nil
|
||||
}
|
||||
|
||||
func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||
return strconv.FormatInt(fi.Size(), 10), nil
|
||||
}
|
||||
|
||||
func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||
return fi.ModTime().UTC().Format(http.TimeFormat), nil
|
||||
}
|
||||
|
||||
// ErrNotImplemented should be returned by optional interfaces if they
|
||||
// want the original implementation to be used.
|
||||
var ErrNotImplemented = errors.New("not implemented")
|
||||
|
||||
// ContentTyper is an optional interface for the os.FileInfo
|
||||
// objects returned by the FileSystem.
|
||||
//
|
||||
// If this interface is defined then it will be used to read the
|
||||
// content type from the object.
|
||||
//
|
||||
// If this interface is not defined the file will be opened and the
|
||||
// content type will be guessed from the initial contents of the file.
|
||||
type ContentTyper interface {
|
||||
// ContentType returns the content type for the file.
|
||||
//
|
||||
// If this returns error ErrNotImplemented then the error will
|
||||
// be ignored and the base implementation will be used
|
||||
// instead.
|
||||
ContentType(ctx context.Context) (string, error)
|
||||
}
|
||||
|
||||
func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||
if do, ok := fi.(ContentTyper); ok {
|
||||
ctype, err := do.ContentType(ctx)
|
||||
if err != ErrNotImplemented {
|
||||
return ctype, err
|
||||
}
|
||||
}
|
||||
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
// This implementation is based on serveContent's code in the standard net/http package.
|
||||
ctype := mime.TypeByExtension(filepath.Ext(name))
|
||||
if ctype != "" {
|
||||
return ctype, nil
|
||||
}
|
||||
// Read a chunk to decide between utf-8 text and binary.
|
||||
var buf [512]byte
|
||||
n, err := io.ReadFull(f, buf[:])
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return "", err
|
||||
}
|
||||
ctype = http.DetectContentType(buf[:n])
|
||||
// Rewind file.
|
||||
_, err = f.Seek(0, os.SEEK_SET)
|
||||
return ctype, err
|
||||
}
|
||||
|
||||
// ETager is an optional interface for the os.FileInfo objects
|
||||
// returned by the FileSystem.
|
||||
//
|
||||
// If this interface is defined then it will be used to read the ETag
|
||||
// for the object.
|
||||
//
|
||||
// If this interface is not defined an ETag will be computed using the
|
||||
// ModTime() and the Size() methods of the os.FileInfo object.
|
||||
type ETager interface {
|
||||
// ETag returns an ETag for the file. This should be of the
|
||||
// form "value" or W/"value"
|
||||
//
|
||||
// If this returns error ErrNotImplemented then the error will
|
||||
// be ignored and the base implementation will be used
|
||||
// instead.
|
||||
ETag(ctx context.Context) (string, error)
|
||||
}
|
||||
|
||||
func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||
if do, ok := fi.(ETager); ok {
|
||||
etag, err := do.ETag(ctx)
|
||||
if err != ErrNotImplemented {
|
||||
return etag, err
|
||||
}
|
||||
}
|
||||
// The Apache http 2.4 web server by default concatenates the
|
||||
// modification time and size of a file. We replicate the heuristic
|
||||
// with nanosecond granularity.
|
||||
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
|
||||
}
|
||||
|
||||
func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||
return `` +
|
||||
`<D:lockentry xmlns:D="DAV:">` +
|
||||
`<D:lockscope><D:exclusive/></D:lockscope>` +
|
||||
`<D:locktype><D:write/></D:locktype>` +
|
||||
`</D:lockentry>`, nil
|
||||
}
|
706
vendor/golang.org/x/net/webdav/webdav.go
generated
vendored
Normal file
706
vendor/golang.org/x/net/webdav/webdav.go
generated
vendored
Normal file
@ -0,0 +1,706 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package webdav provides a WebDAV server implementation.
|
||||
package webdav // import "golang.org/x/net/webdav"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
// Prefix is the URL path prefix to strip from WebDAV resource paths.
|
||||
Prefix string
|
||||
// FileSystem is the virtual file system.
|
||||
FileSystem FileSystem
|
||||
// LockSystem is the lock management system.
|
||||
LockSystem LockSystem
|
||||
// Logger is an optional error logger. If non-nil, it will be called
|
||||
// for all HTTP requests.
|
||||
Logger func(*http.Request, error)
|
||||
}
|
||||
|
||||
func (h *Handler) stripPrefix(p string) (string, int, error) {
|
||||
if h.Prefix == "" {
|
||||
return p, http.StatusOK, nil
|
||||
}
|
||||
if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {
|
||||
return r, http.StatusOK, nil
|
||||
}
|
||||
return p, http.StatusNotFound, errPrefixMismatch
|
||||
}
|
||||
|
||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
status, err := http.StatusBadRequest, errUnsupportedMethod
|
||||
if h.FileSystem == nil {
|
||||
status, err = http.StatusInternalServerError, errNoFileSystem
|
||||
} else if h.LockSystem == nil {
|
||||
status, err = http.StatusInternalServerError, errNoLockSystem
|
||||
} else {
|
||||
switch r.Method {
|
||||
case "OPTIONS":
|
||||
status, err = h.handleOptions(w, r)
|
||||
case "GET", "HEAD", "POST":
|
||||
status, err = h.handleGetHeadPost(w, r)
|
||||
case "DELETE":
|
||||
status, err = h.handleDelete(w, r)
|
||||
case "PUT":
|
||||
status, err = h.handlePut(w, r)
|
||||
case "MKCOL":
|
||||
status, err = h.handleMkcol(w, r)
|
||||
case "COPY", "MOVE":
|
||||
status, err = h.handleCopyMove(w, r)
|
||||
case "LOCK":
|
||||
status, err = h.handleLock(w, r)
|
||||
case "UNLOCK":
|
||||
status, err = h.handleUnlock(w, r)
|
||||
case "PROPFIND":
|
||||
status, err = h.handlePropfind(w, r)
|
||||
case "PROPPATCH":
|
||||
status, err = h.handleProppatch(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
if status != 0 {
|
||||
w.WriteHeader(status)
|
||||
if status != http.StatusNoContent {
|
||||
w.Write([]byte(StatusText(status)))
|
||||
}
|
||||
}
|
||||
if h.Logger != nil {
|
||||
h.Logger(r, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
|
||||
token, err = h.LockSystem.Create(now, LockDetails{
|
||||
Root: root,
|
||||
Duration: infiniteTimeout,
|
||||
ZeroDepth: true,
|
||||
})
|
||||
if err != nil {
|
||||
if err == ErrLocked {
|
||||
return "", StatusLocked, err
|
||||
}
|
||||
return "", http.StatusInternalServerError, err
|
||||
}
|
||||
return token, 0, nil
|
||||
}
|
||||
|
||||
func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
|
||||
hdr := r.Header.Get("If")
|
||||
if hdr == "" {
|
||||
// An empty If header means that the client hasn't previously created locks.
|
||||
// Even if this client doesn't care about locks, we still need to check that
|
||||
// the resources aren't locked by another client, so we create temporary
|
||||
// locks that would conflict with another client's locks. These temporary
|
||||
// locks are unlocked at the end of the HTTP request.
|
||||
now, srcToken, dstToken := time.Now(), "", ""
|
||||
if src != "" {
|
||||
srcToken, status, err = h.lock(now, src)
|
||||
if err != nil {
|
||||
return nil, status, err
|
||||
}
|
||||
}
|
||||
if dst != "" {
|
||||
dstToken, status, err = h.lock(now, dst)
|
||||
if err != nil {
|
||||
if srcToken != "" {
|
||||
h.LockSystem.Unlock(now, srcToken)
|
||||
}
|
||||
return nil, status, err
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
if dstToken != "" {
|
||||
h.LockSystem.Unlock(now, dstToken)
|
||||
}
|
||||
if srcToken != "" {
|
||||
h.LockSystem.Unlock(now, srcToken)
|
||||
}
|
||||
}, 0, nil
|
||||
}
|
||||
|
||||
ih, ok := parseIfHeader(hdr)
|
||||
if !ok {
|
||||
return nil, http.StatusBadRequest, errInvalidIfHeader
|
||||
}
|
||||
// ih is a disjunction (OR) of ifLists, so any ifList will do.
|
||||
for _, l := range ih.lists {
|
||||
lsrc := l.resourceTag
|
||||
if lsrc == "" {
|
||||
lsrc = src
|
||||
} else {
|
||||
u, err := url.Parse(lsrc)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if u.Host != r.Host {
|
||||
continue
|
||||
}
|
||||
lsrc, status, err = h.stripPrefix(u.Path)
|
||||
if err != nil {
|
||||
return nil, status, err
|
||||
}
|
||||
}
|
||||
release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
|
||||
if err == ErrConfirmationFailed {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, http.StatusInternalServerError, err
|
||||
}
|
||||
return release, 0, nil
|
||||
}
|
||||
// Section 10.4.1 says that "If this header is evaluated and all state lists
|
||||
// fail, then the request must fail with a 412 (Precondition Failed) status."
|
||||
// We follow the spec even though the cond_put_corrupt_token test case from
|
||||
// the litmus test warns on seeing a 412 instead of a 423 (Locked).
|
||||
return nil, http.StatusPreconditionFailed, ErrLocked
|
||||
}
|
||||
|
||||
func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
ctx := r.Context()
|
||||
allow := "OPTIONS, LOCK, PUT, MKCOL"
|
||||
if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {
|
||||
if fi.IsDir() {
|
||||
allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
|
||||
} else {
|
||||
allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
|
||||
}
|
||||
}
|
||||
w.Header().Set("Allow", allow)
|
||||
// http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
|
||||
w.Header().Set("DAV", "1, 2")
|
||||
// http://msdn.microsoft.com/en-au/library/cc250217.aspx
|
||||
w.Header().Set("MS-Author-Via", "DAV")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
// TODO: check locks for read-only access??
|
||||
ctx := r.Context()
|
||||
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
defer f.Close()
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return http.StatusMethodNotAllowed, nil
|
||||
}
|
||||
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
w.Header().Set("ETag", etag)
|
||||
// Let ServeContent determine the Content-Type header.
|
||||
http.ServeContent(w, r, reqPath, fi.ModTime(), f)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
release, status, err := h.confirmLocks(r, reqPath, "")
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
// TODO: return MultiStatus where appropriate.
|
||||
|
||||
// "godoc os RemoveAll" says that "If the path does not exist, RemoveAll
|
||||
// returns nil (no error)." WebDAV semantics are that it should return a
|
||||
// "404 Not Found". We therefore have to Stat before we RemoveAll.
|
||||
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
return http.StatusMethodNotAllowed, err
|
||||
}
|
||||
if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil {
|
||||
return http.StatusMethodNotAllowed, err
|
||||
}
|
||||
return http.StatusNoContent, nil
|
||||
}
|
||||
|
||||
func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
release, status, err := h.confirmLocks(r, reqPath, "")
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
defer release()
|
||||
// TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
|
||||
// comments in http.checkEtag.
|
||||
ctx := r.Context()
|
||||
|
||||
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
_, copyErr := io.Copy(f, r.Body)
|
||||
fi, statErr := f.Stat()
|
||||
closeErr := f.Close()
|
||||
// TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
|
||||
if copyErr != nil {
|
||||
return http.StatusMethodNotAllowed, copyErr
|
||||
}
|
||||
if statErr != nil {
|
||||
return http.StatusMethodNotAllowed, statErr
|
||||
}
|
||||
if closeErr != nil {
|
||||
return http.StatusMethodNotAllowed, closeErr
|
||||
}
|
||||
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
w.Header().Set("ETag", etag)
|
||||
return http.StatusCreated, nil
|
||||
}
|
||||
|
||||
func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
release, status, err := h.confirmLocks(r, reqPath, "")
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
if r.ContentLength > 0 {
|
||||
return http.StatusUnsupportedMediaType, nil
|
||||
}
|
||||
if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return http.StatusConflict, err
|
||||
}
|
||||
return http.StatusMethodNotAllowed, err
|
||||
}
|
||||
return http.StatusCreated, nil
|
||||
}
|
||||
|
||||
func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
hdr := r.Header.Get("Destination")
|
||||
if hdr == "" {
|
||||
return http.StatusBadRequest, errInvalidDestination
|
||||
}
|
||||
u, err := url.Parse(hdr)
|
||||
if err != nil {
|
||||
return http.StatusBadRequest, errInvalidDestination
|
||||
}
|
||||
if u.Host != "" && u.Host != r.Host {
|
||||
return http.StatusBadGateway, errInvalidDestination
|
||||
}
|
||||
|
||||
src, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
||||
dst, status, err := h.stripPrefix(u.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
||||
if dst == "" {
|
||||
return http.StatusBadGateway, errInvalidDestination
|
||||
}
|
||||
if dst == src {
|
||||
return http.StatusForbidden, errDestinationEqualsSource
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
if r.Method == "COPY" {
|
||||
// Section 7.5.1 says that a COPY only needs to lock the destination,
|
||||
// not both destination and source. Strictly speaking, this is racy,
|
||||
// even though a COPY doesn't modify the source, if a concurrent
|
||||
// operation modifies the source. However, the litmus test explicitly
|
||||
// checks that COPYing a locked-by-another source is OK.
|
||||
release, status, err := h.confirmLocks(r, "", dst)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
// Section 9.8.3 says that "The COPY method on a collection without a Depth
|
||||
// header must act as if a Depth header with value "infinity" was included".
|
||||
depth := infiniteDepth
|
||||
if hdr := r.Header.Get("Depth"); hdr != "" {
|
||||
depth = parseDepth(hdr)
|
||||
if depth != 0 && depth != infiniteDepth {
|
||||
// Section 9.8.3 says that "A client may submit a Depth header on a
|
||||
// COPY on a collection with a value of "0" or "infinity"."
|
||||
return http.StatusBadRequest, errInvalidDepth
|
||||
}
|
||||
}
|
||||
return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
|
||||
}
|
||||
|
||||
release, status, err := h.confirmLocks(r, src, dst)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
// Section 9.9.2 says that "The MOVE method on a collection must act as if
|
||||
// a "Depth: infinity" header was used on it. A client must not submit a
|
||||
// Depth header on a MOVE on a collection with any value but "infinity"."
|
||||
if hdr := r.Header.Get("Depth"); hdr != "" {
|
||||
if parseDepth(hdr) != infiniteDepth {
|
||||
return http.StatusBadRequest, errInvalidDepth
|
||||
}
|
||||
}
|
||||
return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
|
||||
}
|
||||
|
||||
func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
|
||||
duration, err := parseTimeout(r.Header.Get("Timeout"))
|
||||
if err != nil {
|
||||
return http.StatusBadRequest, err
|
||||
}
|
||||
li, status, err := readLockInfo(r.Body)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
token, ld, now, created := "", LockDetails{}, time.Now(), false
|
||||
if li == (lockInfo{}) {
|
||||
// An empty lockInfo means to refresh the lock.
|
||||
ih, ok := parseIfHeader(r.Header.Get("If"))
|
||||
if !ok {
|
||||
return http.StatusBadRequest, errInvalidIfHeader
|
||||
}
|
||||
if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
|
||||
token = ih.lists[0].conditions[0].Token
|
||||
}
|
||||
if token == "" {
|
||||
return http.StatusBadRequest, errInvalidLockToken
|
||||
}
|
||||
ld, err = h.LockSystem.Refresh(now, token, duration)
|
||||
if err != nil {
|
||||
if err == ErrNoSuchLock {
|
||||
return http.StatusPreconditionFailed, err
|
||||
}
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
|
||||
// then the request MUST act as if a "Depth:infinity" had been submitted."
|
||||
depth := infiniteDepth
|
||||
if hdr := r.Header.Get("Depth"); hdr != "" {
|
||||
depth = parseDepth(hdr)
|
||||
if depth != 0 && depth != infiniteDepth {
|
||||
// Section 9.10.3 says that "Values other than 0 or infinity must not be
|
||||
// used with the Depth header on a LOCK method".
|
||||
return http.StatusBadRequest, errInvalidDepth
|
||||
}
|
||||
}
|
||||
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
ld = LockDetails{
|
||||
Root: reqPath,
|
||||
Duration: duration,
|
||||
OwnerXML: li.Owner.InnerXML,
|
||||
ZeroDepth: depth == 0,
|
||||
}
|
||||
token, err = h.LockSystem.Create(now, ld)
|
||||
if err != nil {
|
||||
if err == ErrLocked {
|
||||
return StatusLocked, err
|
||||
}
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
h.LockSystem.Unlock(now, token)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create the resource if it didn't previously exist.
|
||||
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
|
||||
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
// TODO: detect missing intermediate dirs and return http.StatusConflict?
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
f.Close()
|
||||
created = true
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
|
||||
// Lock-Token value is a Coded-URL. We add angle brackets.
|
||||
w.Header().Set("Lock-Token", "<"+token+">")
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||
if created {
|
||||
// This is "w.WriteHeader(http.StatusCreated)" and not "return
|
||||
// http.StatusCreated, nil" because we write our own (XML) response to w
|
||||
// and Handler.ServeHTTP would otherwise write "Created".
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
}
|
||||
writeLockInfo(w, token, ld)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
|
||||
// Lock-Token value is a Coded-URL. We strip its angle brackets.
|
||||
t := r.Header.Get("Lock-Token")
|
||||
if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
|
||||
return http.StatusBadRequest, errInvalidLockToken
|
||||
}
|
||||
t = t[1 : len(t)-1]
|
||||
|
||||
switch err = h.LockSystem.Unlock(time.Now(), t); err {
|
||||
case nil:
|
||||
return http.StatusNoContent, err
|
||||
case ErrForbidden:
|
||||
return http.StatusForbidden, err
|
||||
case ErrLocked:
|
||||
return StatusLocked, err
|
||||
case ErrNoSuchLock:
|
||||
return http.StatusConflict, err
|
||||
default:
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
ctx := r.Context()
|
||||
fi, err := h.FileSystem.Stat(ctx, reqPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
return http.StatusMethodNotAllowed, err
|
||||
}
|
||||
depth := infiniteDepth
|
||||
if hdr := r.Header.Get("Depth"); hdr != "" {
|
||||
depth = parseDepth(hdr)
|
||||
if depth == invalidDepth {
|
||||
return http.StatusBadRequest, errInvalidDepth
|
||||
}
|
||||
}
|
||||
pf, status, err := readPropfind(r.Body)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
||||
mw := multistatusWriter{w: w}
|
||||
|
||||
walkFn := func(reqPath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pstats []Propstat
|
||||
if pf.Propname != nil {
|
||||
pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pstat := Propstat{Status: http.StatusOK}
|
||||
for _, xmlname := range pnames {
|
||||
pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
|
||||
}
|
||||
pstats = append(pstats, pstat)
|
||||
} else if pf.Allprop != nil {
|
||||
pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
|
||||
} else {
|
||||
pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
href := path.Join(h.Prefix, reqPath)
|
||||
if href != "/" && info.IsDir() {
|
||||
href += "/"
|
||||
}
|
||||
return mw.write(makePropstatResponse(href, pstats))
|
||||
}
|
||||
|
||||
walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn)
|
||||
closeErr := mw.close()
|
||||
if walkErr != nil {
|
||||
return http.StatusInternalServerError, walkErr
|
||||
}
|
||||
if closeErr != nil {
|
||||
return http.StatusInternalServerError, closeErr
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
release, status, err := h.confirmLocks(r, reqPath, "")
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
ctx := r.Context()
|
||||
|
||||
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return http.StatusNotFound, err
|
||||
}
|
||||
return http.StatusMethodNotAllowed, err
|
||||
}
|
||||
patches, status, err := readProppatch(r.Body)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)
|
||||
if err != nil {
|
||||
return http.StatusInternalServerError, err
|
||||
}
|
||||
mw := multistatusWriter{w: w}
|
||||
writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
|
||||
closeErr := mw.close()
|
||||
if writeErr != nil {
|
||||
return http.StatusInternalServerError, writeErr
|
||||
}
|
||||
if closeErr != nil {
|
||||
return http.StatusInternalServerError, closeErr
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func makePropstatResponse(href string, pstats []Propstat) *response {
|
||||
resp := response{
|
||||
Href: []string{(&url.URL{Path: href}).EscapedPath()},
|
||||
Propstat: make([]propstat, 0, len(pstats)),
|
||||
}
|
||||
for _, p := range pstats {
|
||||
var xmlErr *xmlError
|
||||
if p.XMLError != "" {
|
||||
xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
|
||||
}
|
||||
resp.Propstat = append(resp.Propstat, propstat{
|
||||
Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
|
||||
Prop: p.Props,
|
||||
ResponseDescription: p.ResponseDescription,
|
||||
Error: xmlErr,
|
||||
})
|
||||
}
|
||||
return &resp
|
||||
}
|
||||
|
||||
const (
|
||||
infiniteDepth = -1
|
||||
invalidDepth = -2
|
||||
)
|
||||
|
||||
// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
|
||||
// infiniteDepth. Parsing any other string returns invalidDepth.
|
||||
//
|
||||
// Different WebDAV methods have further constraints on valid depths:
|
||||
// - PROPFIND has no further restrictions, as per section 9.1.
|
||||
// - COPY accepts only "0" or "infinity", as per section 9.8.3.
|
||||
// - MOVE accepts only "infinity", as per section 9.9.2.
|
||||
// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
|
||||
// These constraints are enforced by the handleXxx methods.
|
||||
func parseDepth(s string) int {
|
||||
switch s {
|
||||
case "0":
|
||||
return 0
|
||||
case "1":
|
||||
return 1
|
||||
case "infinity":
|
||||
return infiniteDepth
|
||||
}
|
||||
return invalidDepth
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
|
||||
const (
|
||||
StatusMulti = 207
|
||||
StatusUnprocessableEntity = 422
|
||||
StatusLocked = 423
|
||||
StatusFailedDependency = 424
|
||||
StatusInsufficientStorage = 507
|
||||
)
|
||||
|
||||
func StatusText(code int) string {
|
||||
switch code {
|
||||
case StatusMulti:
|
||||
return "Multi-Status"
|
||||
case StatusUnprocessableEntity:
|
||||
return "Unprocessable Entity"
|
||||
case StatusLocked:
|
||||
return "Locked"
|
||||
case StatusFailedDependency:
|
||||
return "Failed Dependency"
|
||||
case StatusInsufficientStorage:
|
||||
return "Insufficient Storage"
|
||||
}
|
||||
return http.StatusText(code)
|
||||
}
|
||||
|
||||
var (
|
||||
errDestinationEqualsSource = errors.New("webdav: destination equals source")
|
||||
errDirectoryNotEmpty = errors.New("webdav: directory not empty")
|
||||
errInvalidDepth = errors.New("webdav: invalid depth")
|
||||
errInvalidDestination = errors.New("webdav: invalid destination")
|
||||
errInvalidIfHeader = errors.New("webdav: invalid If header")
|
||||
errInvalidLockInfo = errors.New("webdav: invalid lock info")
|
||||
errInvalidLockToken = errors.New("webdav: invalid lock token")
|
||||
errInvalidPropfind = errors.New("webdav: invalid propfind")
|
||||
errInvalidProppatch = errors.New("webdav: invalid proppatch")
|
||||
errInvalidResponse = errors.New("webdav: invalid response")
|
||||
errInvalidTimeout = errors.New("webdav: invalid timeout")
|
||||
errNoFileSystem = errors.New("webdav: no file system")
|
||||
errNoLockSystem = errors.New("webdav: no lock system")
|
||||
errNotADirectory = errors.New("webdav: not a directory")
|
||||
errPrefixMismatch = errors.New("webdav: prefix mismatch")
|
||||
errRecursionTooDeep = errors.New("webdav: recursion too deep")
|
||||
errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
|
||||
errUnsupportedMethod = errors.New("webdav: unsupported method")
|
||||
)
|
519
vendor/golang.org/x/net/webdav/xml.go
generated
vendored
Normal file
519
vendor/golang.org/x/net/webdav/xml.go
generated
vendored
Normal file
@ -0,0 +1,519 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package webdav
|
||||
|
||||
// The XML encoding is covered by Section 14.
|
||||
// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
// As of https://go-review.googlesource.com/#/c/12772/ which was submitted
|
||||
// in July 2015, this package uses an internal fork of the standard
|
||||
// library's encoding/xml package, due to changes in the way namespaces
|
||||
// were encoded. Such changes were introduced in the Go 1.5 cycle, but were
|
||||
// rolled back in response to https://github.com/golang/go/issues/11841
|
||||
//
|
||||
// However, this package's exported API, specifically the Property and
|
||||
// DeadPropsHolder types, need to refer to the standard library's version
|
||||
// of the xml.Name type, as code that imports this package cannot refer to
|
||||
// the internal version.
|
||||
//
|
||||
// This file therefore imports both the internal and external versions, as
|
||||
// ixml and xml, and converts between them.
|
||||
//
|
||||
// In the long term, this package should use the standard library's version
|
||||
// only, and the internal fork deleted, once
|
||||
// https://github.com/golang/go/issues/13400 is resolved.
|
||||
ixml "golang.org/x/net/webdav/internal/xml"
|
||||
)
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
|
||||
type lockInfo struct {
|
||||
XMLName ixml.Name `xml:"lockinfo"`
|
||||
Exclusive *struct{} `xml:"lockscope>exclusive"`
|
||||
Shared *struct{} `xml:"lockscope>shared"`
|
||||
Write *struct{} `xml:"locktype>write"`
|
||||
Owner owner `xml:"owner"`
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
|
||||
type owner struct {
|
||||
InnerXML string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
|
||||
c := &countingReader{r: r}
|
||||
if err = ixml.NewDecoder(c).Decode(&li); err != nil {
|
||||
if err == io.EOF {
|
||||
if c.n == 0 {
|
||||
// An empty body means to refresh the lock.
|
||||
// http://www.webdav.org/specs/rfc4918.html#refreshing-locks
|
||||
return lockInfo{}, 0, nil
|
||||
}
|
||||
err = errInvalidLockInfo
|
||||
}
|
||||
return lockInfo{}, http.StatusBadRequest, err
|
||||
}
|
||||
// We only support exclusive (non-shared) write locks. In practice, these are
|
||||
// the only types of locks that seem to matter.
|
||||
if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
|
||||
return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
|
||||
}
|
||||
return li, 0, nil
|
||||
}
|
||||
|
||||
type countingReader struct {
|
||||
n int
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (c *countingReader) Read(p []byte) (int, error) {
|
||||
n, err := c.r.Read(p)
|
||||
c.n += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
|
||||
depth := "infinity"
|
||||
if ld.ZeroDepth {
|
||||
depth = "0"
|
||||
}
|
||||
timeout := ld.Duration / time.Second
|
||||
return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
|
||||
"<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
|
||||
" <D:locktype><D:write/></D:locktype>\n"+
|
||||
" <D:lockscope><D:exclusive/></D:lockscope>\n"+
|
||||
" <D:depth>%s</D:depth>\n"+
|
||||
" <D:owner>%s</D:owner>\n"+
|
||||
" <D:timeout>Second-%d</D:timeout>\n"+
|
||||
" <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
|
||||
" <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
|
||||
"</D:activelock></D:lockdiscovery></D:prop>",
|
||||
depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
|
||||
)
|
||||
}
|
||||
|
||||
func escape(s string) string {
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"', '&', '\'', '<', '>':
|
||||
b := bytes.NewBuffer(nil)
|
||||
ixml.EscapeText(b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Next returns the next token, if any, in the XML stream of d.
|
||||
// RFC 4918 requires to ignore comments, processing instructions
|
||||
// and directives.
|
||||
// http://www.webdav.org/specs/rfc4918.html#property_values
|
||||
// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
|
||||
func next(d *ixml.Decoder) (ixml.Token, error) {
|
||||
for {
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
switch t.(type) {
|
||||
case ixml.Comment, ixml.Directive, ixml.ProcInst:
|
||||
continue
|
||||
default:
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
|
||||
type propfindProps []xml.Name
|
||||
|
||||
// UnmarshalXML appends the property names enclosed within start to pn.
|
||||
//
|
||||
// It returns an error if start does not contain any properties or if
|
||||
// properties contain values. Character data between properties is ignored.
|
||||
func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
|
||||
for {
|
||||
t, err := next(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch t.(type) {
|
||||
case ixml.EndElement:
|
||||
if len(*pn) == 0 {
|
||||
return fmt.Errorf("%s must not be empty", start.Name.Local)
|
||||
}
|
||||
return nil
|
||||
case ixml.StartElement:
|
||||
name := t.(ixml.StartElement).Name
|
||||
t, err = next(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := t.(ixml.EndElement); !ok {
|
||||
return fmt.Errorf("unexpected token %T", t)
|
||||
}
|
||||
*pn = append(*pn, xml.Name(name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
|
||||
type propfind struct {
|
||||
XMLName ixml.Name `xml:"DAV: propfind"`
|
||||
Allprop *struct{} `xml:"DAV: allprop"`
|
||||
Propname *struct{} `xml:"DAV: propname"`
|
||||
Prop propfindProps `xml:"DAV: prop"`
|
||||
Include propfindProps `xml:"DAV: include"`
|
||||
}
|
||||
|
||||
func readPropfind(r io.Reader) (pf propfind, status int, err error) {
|
||||
c := countingReader{r: r}
|
||||
if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
|
||||
if err == io.EOF {
|
||||
if c.n == 0 {
|
||||
// An empty body means to propfind allprop.
|
||||
// http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
|
||||
return propfind{Allprop: new(struct{})}, 0, nil
|
||||
}
|
||||
err = errInvalidPropfind
|
||||
}
|
||||
return propfind{}, http.StatusBadRequest, err
|
||||
}
|
||||
|
||||
if pf.Allprop == nil && pf.Include != nil {
|
||||
return propfind{}, http.StatusBadRequest, errInvalidPropfind
|
||||
}
|
||||
if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
|
||||
return propfind{}, http.StatusBadRequest, errInvalidPropfind
|
||||
}
|
||||
if pf.Prop != nil && pf.Propname != nil {
|
||||
return propfind{}, http.StatusBadRequest, errInvalidPropfind
|
||||
}
|
||||
if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
|
||||
return propfind{}, http.StatusBadRequest, errInvalidPropfind
|
||||
}
|
||||
return pf, 0, nil
|
||||
}
|
||||
|
||||
// Property represents a single DAV resource property as defined in RFC 4918.
|
||||
// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
|
||||
type Property struct {
|
||||
// XMLName is the fully qualified name that identifies this property.
|
||||
XMLName xml.Name
|
||||
|
||||
// Lang is an optional xml:lang attribute.
|
||||
Lang string `xml:"xml:lang,attr,omitempty"`
|
||||
|
||||
// InnerXML contains the XML representation of the property value.
|
||||
// See http://www.webdav.org/specs/rfc4918.html#property_values
|
||||
//
|
||||
// Property values of complex type or mixed-content must have fully
|
||||
// expanded XML namespaces or be self-contained with according
|
||||
// XML namespace declarations. They must not rely on any XML
|
||||
// namespace declarations within the scope of the XML document,
|
||||
// even including the DAV: namespace.
|
||||
InnerXML []byte `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// ixmlProperty is the same as the Property type except it holds an ixml.Name
|
||||
// instead of an xml.Name.
|
||||
type ixmlProperty struct {
|
||||
XMLName ixml.Name
|
||||
Lang string `xml:"xml:lang,attr,omitempty"`
|
||||
InnerXML []byte `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
|
||||
// See multistatusWriter for the "D:" namespace prefix.
|
||||
type xmlError struct {
|
||||
XMLName ixml.Name `xml:"D:error"`
|
||||
InnerXML []byte `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
|
||||
// See multistatusWriter for the "D:" namespace prefix.
|
||||
type propstat struct {
|
||||
Prop []Property `xml:"D:prop>_ignored_"`
|
||||
Status string `xml:"D:status"`
|
||||
Error *xmlError `xml:"D:error"`
|
||||
ResponseDescription string `xml:"D:responsedescription,omitempty"`
|
||||
}
|
||||
|
||||
// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
|
||||
// instead of an xml.Name.
|
||||
type ixmlPropstat struct {
|
||||
Prop []ixmlProperty `xml:"D:prop>_ignored_"`
|
||||
Status string `xml:"D:status"`
|
||||
Error *xmlError `xml:"D:error"`
|
||||
ResponseDescription string `xml:"D:responsedescription,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
|
||||
// before encoding. See multistatusWriter.
|
||||
func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
|
||||
// Convert from a propstat to an ixmlPropstat.
|
||||
ixmlPs := ixmlPropstat{
|
||||
Prop: make([]ixmlProperty, len(ps.Prop)),
|
||||
Status: ps.Status,
|
||||
Error: ps.Error,
|
||||
ResponseDescription: ps.ResponseDescription,
|
||||
}
|
||||
for k, prop := range ps.Prop {
|
||||
ixmlPs.Prop[k] = ixmlProperty{
|
||||
XMLName: ixml.Name(prop.XMLName),
|
||||
Lang: prop.Lang,
|
||||
InnerXML: prop.InnerXML,
|
||||
}
|
||||
}
|
||||
|
||||
for k, prop := range ixmlPs.Prop {
|
||||
if prop.XMLName.Space == "DAV:" {
|
||||
prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
|
||||
ixmlPs.Prop[k] = prop
|
||||
}
|
||||
}
|
||||
// Distinct type to avoid infinite recursion of MarshalXML.
|
||||
type newpropstat ixmlPropstat
|
||||
return e.EncodeElement(newpropstat(ixmlPs), start)
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
|
||||
// See multistatusWriter for the "D:" namespace prefix.
|
||||
type response struct {
|
||||
XMLName ixml.Name `xml:"D:response"`
|
||||
Href []string `xml:"D:href"`
|
||||
Propstat []propstat `xml:"D:propstat"`
|
||||
Status string `xml:"D:status,omitempty"`
|
||||
Error *xmlError `xml:"D:error"`
|
||||
ResponseDescription string `xml:"D:responsedescription,omitempty"`
|
||||
}
|
||||
|
||||
// MultistatusWriter marshals one or more Responses into a XML
|
||||
// multistatus response.
|
||||
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
|
||||
// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
|
||||
// "DAV:" on this element, is prepended on the nested response, as well as on all
|
||||
// its nested elements. All property names in the DAV: namespace are prefixed as
|
||||
// well. This is because some versions of Mini-Redirector (on windows 7) ignore
|
||||
// elements with a default namespace (no prefixed namespace). A less intrusive fix
|
||||
// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
|
||||
type multistatusWriter struct {
|
||||
// ResponseDescription contains the optional responsedescription
|
||||
// of the multistatus XML element. Only the latest content before
|
||||
// close will be emitted. Empty response descriptions are not
|
||||
// written.
|
||||
responseDescription string
|
||||
|
||||
w http.ResponseWriter
|
||||
enc *ixml.Encoder
|
||||
}
|
||||
|
||||
// Write validates and emits a DAV response as part of a multistatus response
|
||||
// element.
|
||||
//
|
||||
// It sets the HTTP status code of its underlying http.ResponseWriter to 207
|
||||
// (Multi-Status) and populates the Content-Type header. If r is the
|
||||
// first, valid response to be written, Write prepends the XML representation
|
||||
// of r with a multistatus tag. Callers must call close after the last response
|
||||
// has been written.
|
||||
func (w *multistatusWriter) write(r *response) error {
|
||||
switch len(r.Href) {
|
||||
case 0:
|
||||
return errInvalidResponse
|
||||
case 1:
|
||||
if len(r.Propstat) > 0 != (r.Status == "") {
|
||||
return errInvalidResponse
|
||||
}
|
||||
default:
|
||||
if len(r.Propstat) > 0 || r.Status == "" {
|
||||
return errInvalidResponse
|
||||
}
|
||||
}
|
||||
err := w.writeHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return w.enc.Encode(r)
|
||||
}
|
||||
|
||||
// writeHeader writes a XML multistatus start element on w's underlying
|
||||
// http.ResponseWriter and returns the result of the write operation.
|
||||
// After the first write attempt, writeHeader becomes a no-op.
|
||||
func (w *multistatusWriter) writeHeader() error {
|
||||
if w.enc != nil {
|
||||
return nil
|
||||
}
|
||||
w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
|
||||
w.w.WriteHeader(StatusMulti)
|
||||
_, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.enc = ixml.NewEncoder(w.w)
|
||||
return w.enc.EncodeToken(ixml.StartElement{
|
||||
Name: ixml.Name{
|
||||
Space: "DAV:",
|
||||
Local: "multistatus",
|
||||
},
|
||||
Attr: []ixml.Attr{{
|
||||
Name: ixml.Name{Space: "xmlns", Local: "D"},
|
||||
Value: "DAV:",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Close completes the marshalling of the multistatus response. It returns
|
||||
// an error if the multistatus response could not be completed. If both the
|
||||
// return value and field enc of w are nil, then no multistatus response has
|
||||
// been written.
|
||||
func (w *multistatusWriter) close() error {
|
||||
if w.enc == nil {
|
||||
return nil
|
||||
}
|
||||
var end []ixml.Token
|
||||
if w.responseDescription != "" {
|
||||
name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
|
||||
end = append(end,
|
||||
ixml.StartElement{Name: name},
|
||||
ixml.CharData(w.responseDescription),
|
||||
ixml.EndElement{Name: name},
|
||||
)
|
||||
}
|
||||
end = append(end, ixml.EndElement{
|
||||
Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
|
||||
})
|
||||
for _, t := range end {
|
||||
err := w.enc.EncodeToken(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.enc.Flush()
|
||||
}
|
||||
|
||||
var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
|
||||
|
||||
func xmlLang(s ixml.StartElement, d string) string {
|
||||
for _, attr := range s.Attr {
|
||||
if attr.Name == xmlLangName {
|
||||
return attr.Value
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type xmlValue []byte
|
||||
|
||||
func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
|
||||
// The XML value of a property can be arbitrary, mixed-content XML.
|
||||
// To make sure that the unmarshalled value contains all required
|
||||
// namespaces, we encode all the property value XML tokens into a
|
||||
// buffer. This forces the encoder to redeclare any used namespaces.
|
||||
var b bytes.Buffer
|
||||
e := ixml.NewEncoder(&b)
|
||||
for {
|
||||
t, err := next(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
|
||||
break
|
||||
}
|
||||
if err = e.EncodeToken(t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := e.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = b.Bytes()
|
||||
return nil
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
|
||||
type proppatchProps []Property
|
||||
|
||||
// UnmarshalXML appends the property names and values enclosed within start
|
||||
// to ps.
|
||||
//
|
||||
// An xml:lang attribute that is defined either on the DAV:prop or property
|
||||
// name XML element is propagated to the property's Lang field.
|
||||
//
|
||||
// UnmarshalXML returns an error if start does not contain any properties or if
|
||||
// property values contain syntactically incorrect XML.
|
||||
func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
|
||||
lang := xmlLang(start, "")
|
||||
for {
|
||||
t, err := next(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch elem := t.(type) {
|
||||
case ixml.EndElement:
|
||||
if len(*ps) == 0 {
|
||||
return fmt.Errorf("%s must not be empty", start.Name.Local)
|
||||
}
|
||||
return nil
|
||||
case ixml.StartElement:
|
||||
p := Property{
|
||||
XMLName: xml.Name(t.(ixml.StartElement).Name),
|
||||
Lang: xmlLang(t.(ixml.StartElement), lang),
|
||||
}
|
||||
err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ps = append(*ps, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
|
||||
type setRemove struct {
|
||||
XMLName ixml.Name
|
||||
Lang string `xml:"xml:lang,attr,omitempty"`
|
||||
Prop proppatchProps `xml:"DAV: prop"`
|
||||
}
|
||||
|
||||
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
|
||||
type propertyupdate struct {
|
||||
XMLName ixml.Name `xml:"DAV: propertyupdate"`
|
||||
Lang string `xml:"xml:lang,attr,omitempty"`
|
||||
SetRemove []setRemove `xml:",any"`
|
||||
}
|
||||
|
||||
func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
|
||||
var pu propertyupdate
|
||||
if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
|
||||
return nil, http.StatusBadRequest, err
|
||||
}
|
||||
for _, op := range pu.SetRemove {
|
||||
remove := false
|
||||
switch op.XMLName {
|
||||
case ixml.Name{Space: "DAV:", Local: "set"}:
|
||||
// No-op.
|
||||
case ixml.Name{Space: "DAV:", Local: "remove"}:
|
||||
for _, p := range op.Prop {
|
||||
if len(p.InnerXML) > 0 {
|
||||
return nil, http.StatusBadRequest, errInvalidProppatch
|
||||
}
|
||||
}
|
||||
remove = true
|
||||
default:
|
||||
return nil, http.StatusBadRequest, errInvalidProppatch
|
||||
}
|
||||
patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
|
||||
}
|
||||
return patches, 0, nil
|
||||
}
|
3
vendor/golang.org/x/sys/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/sys/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/sys/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/sys/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/sys/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/sys/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/sys/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/sys/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
18
vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
generated
vendored
Normal file
18
vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
|
||||
//
|
||||
|
||||
TEXT ·syscall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·syscall6(SB)
|
||||
|
||||
TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·rawSyscall6(SB)
|
65
vendor/golang.org/x/sys/cpu/byteorder.go
generated
vendored
Normal file
65
vendor/golang.org/x/sys/cpu/byteorder.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// byteOrder is a subset of encoding/binary.ByteOrder.
|
||||
type byteOrder interface {
|
||||
Uint32([]byte) uint32
|
||||
Uint64([]byte) uint64
|
||||
}
|
||||
|
||||
type littleEndian struct{}
|
||||
type bigEndian struct{}
|
||||
|
||||
func (littleEndian) Uint32(b []byte) uint32 {
|
||||
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func (littleEndian) Uint64(b []byte) uint64 {
|
||||
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
func (bigEndian) Uint32(b []byte) uint32 {
|
||||
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
|
||||
}
|
||||
|
||||
func (bigEndian) Uint64(b []byte) uint64 {
|
||||
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
|
||||
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
|
||||
}
|
||||
|
||||
// hostByteOrder returns littleEndian on little-endian machines and
|
||||
// bigEndian on big-endian machines.
|
||||
func hostByteOrder() byteOrder {
|
||||
switch runtime.GOARCH {
|
||||
case "386", "amd64", "amd64p32",
|
||||
"alpha",
|
||||
"arm", "arm64",
|
||||
"mipsle", "mips64le", "mips64p32le",
|
||||
"nios2",
|
||||
"ppc64le",
|
||||
"riscv", "riscv64",
|
||||
"sh":
|
||||
return littleEndian{}
|
||||
case "armbe", "arm64be",
|
||||
"m68k",
|
||||
"mips", "mips64", "mips64p32",
|
||||
"ppc", "ppc64",
|
||||
"s390", "s390x",
|
||||
"shbe",
|
||||
"sparc", "sparc64":
|
||||
return bigEndian{}
|
||||
}
|
||||
panic("unknown architecture")
|
||||
}
|
286
vendor/golang.org/x/sys/cpu/cpu.go
generated
vendored
Normal file
286
vendor/golang.org/x/sys/cpu/cpu.go
generated
vendored
Normal file
@ -0,0 +1,286 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cpu implements processor feature detection for
|
||||
// various CPU architectures.
|
||||
package cpu
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Initialized reports whether the CPU features were initialized.
|
||||
//
|
||||
// For some GOOS/GOARCH combinations initialization of the CPU features depends
|
||||
// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm
|
||||
// Initialized will report false if reading the file fails.
|
||||
var Initialized bool
|
||||
|
||||
// CacheLinePad is used to pad structs to avoid false sharing.
|
||||
type CacheLinePad struct{ _ [cacheLineSize]byte }
|
||||
|
||||
// X86 contains the supported CPU features of the
|
||||
// current X86/AMD64 platform. If the current platform
|
||||
// is not X86/AMD64 then all feature flags are false.
|
||||
//
|
||||
// X86 is padded to avoid false sharing. Further the HasAVX
|
||||
// and HasAVX2 are only set if the OS supports XMM and YMM
|
||||
// registers in addition to the CPUID feature bit being set.
|
||||
var X86 struct {
|
||||
_ CacheLinePad
|
||||
HasAES bool // AES hardware implementation (AES NI)
|
||||
HasADX bool // Multi-precision add-carry instruction extensions
|
||||
HasAVX bool // Advanced vector extension
|
||||
HasAVX2 bool // Advanced vector extension 2
|
||||
HasAVX512 bool // Advanced vector extension 512
|
||||
HasAVX512F bool // Advanced vector extension 512 Foundation Instructions
|
||||
HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions
|
||||
HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions
|
||||
HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions
|
||||
HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions
|
||||
HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions
|
||||
HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions
|
||||
HasAVX512IFMA bool // Advanced vector extension 512 Integer Fused Multiply Add
|
||||
HasAVX512VBMI bool // Advanced vector extension 512 Vector Byte Manipulation Instructions
|
||||
HasAVX5124VNNIW bool // Advanced vector extension 512 Vector Neural Network Instructions Word variable precision
|
||||
HasAVX5124FMAPS bool // Advanced vector extension 512 Fused Multiply Accumulation Packed Single precision
|
||||
HasAVX512VPOPCNTDQ bool // Advanced vector extension 512 Double and quad word population count instructions
|
||||
HasAVX512VPCLMULQDQ bool // Advanced vector extension 512 Vector carry-less multiply operations
|
||||
HasAVX512VNNI bool // Advanced vector extension 512 Vector Neural Network Instructions
|
||||
HasAVX512GFNI bool // Advanced vector extension 512 Galois field New Instructions
|
||||
HasAVX512VAES bool // Advanced vector extension 512 Vector AES instructions
|
||||
HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2
|
||||
HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms
|
||||
HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions
|
||||
HasBMI1 bool // Bit manipulation instruction set 1
|
||||
HasBMI2 bool // Bit manipulation instruction set 2
|
||||
HasERMS bool // Enhanced REP for MOVSB and STOSB
|
||||
HasFMA bool // Fused-multiply-add instructions
|
||||
HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
|
||||
HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM
|
||||
HasPOPCNT bool // Hamming weight instruction POPCNT.
|
||||
HasRDRAND bool // RDRAND instruction (on-chip random number generator)
|
||||
HasRDSEED bool // RDSEED instruction (on-chip random number generator)
|
||||
HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64)
|
||||
HasSSE3 bool // Streaming SIMD extension 3
|
||||
HasSSSE3 bool // Supplemental streaming SIMD extension 3
|
||||
HasSSE41 bool // Streaming SIMD extension 4 and 4.1
|
||||
HasSSE42 bool // Streaming SIMD extension 4 and 4.2
|
||||
_ CacheLinePad
|
||||
}
|
||||
|
||||
// ARM64 contains the supported CPU features of the
|
||||
// current ARMv8(aarch64) platform. If the current platform
|
||||
// is not arm64 then all feature flags are false.
|
||||
var ARM64 struct {
|
||||
_ CacheLinePad
|
||||
HasFP bool // Floating-point instruction set (always available)
|
||||
HasASIMD bool // Advanced SIMD (always available)
|
||||
HasEVTSTRM bool // Event stream support
|
||||
HasAES bool // AES hardware implementation
|
||||
HasPMULL bool // Polynomial multiplication instruction set
|
||||
HasSHA1 bool // SHA1 hardware implementation
|
||||
HasSHA2 bool // SHA2 hardware implementation
|
||||
HasCRC32 bool // CRC32 hardware implementation
|
||||
HasATOMICS bool // Atomic memory operation instruction set
|
||||
HasFPHP bool // Half precision floating-point instruction set
|
||||
HasASIMDHP bool // Advanced SIMD half precision instruction set
|
||||
HasCPUID bool // CPUID identification scheme registers
|
||||
HasASIMDRDM bool // Rounding double multiply add/subtract instruction set
|
||||
HasJSCVT bool // Javascript conversion from floating-point to integer
|
||||
HasFCMA bool // Floating-point multiplication and addition of complex numbers
|
||||
HasLRCPC bool // Release Consistent processor consistent support
|
||||
HasDCPOP bool // Persistent memory support
|
||||
HasSHA3 bool // SHA3 hardware implementation
|
||||
HasSM3 bool // SM3 hardware implementation
|
||||
HasSM4 bool // SM4 hardware implementation
|
||||
HasASIMDDP bool // Advanced SIMD double precision instruction set
|
||||
HasSHA512 bool // SHA512 hardware implementation
|
||||
HasSVE bool // Scalable Vector Extensions
|
||||
HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
|
||||
_ CacheLinePad
|
||||
}
|
||||
|
||||
// ARM contains the supported CPU features of the current ARM (32-bit) platform.
|
||||
// All feature flags are false if:
|
||||
// 1. the current platform is not arm, or
|
||||
// 2. the current operating system is not Linux.
|
||||
var ARM struct {
|
||||
_ CacheLinePad
|
||||
HasSWP bool // SWP instruction support
|
||||
HasHALF bool // Half-word load and store support
|
||||
HasTHUMB bool // ARM Thumb instruction set
|
||||
Has26BIT bool // Address space limited to 26-bits
|
||||
HasFASTMUL bool // 32-bit operand, 64-bit result multiplication support
|
||||
HasFPA bool // Floating point arithmetic support
|
||||
HasVFP bool // Vector floating point support
|
||||
HasEDSP bool // DSP Extensions support
|
||||
HasJAVA bool // Java instruction set
|
||||
HasIWMMXT bool // Intel Wireless MMX technology support
|
||||
HasCRUNCH bool // MaverickCrunch context switching and handling
|
||||
HasTHUMBEE bool // Thumb EE instruction set
|
||||
HasNEON bool // NEON instruction set
|
||||
HasVFPv3 bool // Vector floating point version 3 support
|
||||
HasVFPv3D16 bool // Vector floating point version 3 D8-D15
|
||||
HasTLS bool // Thread local storage support
|
||||
HasVFPv4 bool // Vector floating point version 4 support
|
||||
HasIDIVA bool // Integer divide instruction support in ARM mode
|
||||
HasIDIVT bool // Integer divide instruction support in Thumb mode
|
||||
HasVFPD32 bool // Vector floating point version 3 D15-D31
|
||||
HasLPAE bool // Large Physical Address Extensions
|
||||
HasEVTSTRM bool // Event stream support
|
||||
HasAES bool // AES hardware implementation
|
||||
HasPMULL bool // Polynomial multiplication instruction set
|
||||
HasSHA1 bool // SHA1 hardware implementation
|
||||
HasSHA2 bool // SHA2 hardware implementation
|
||||
HasCRC32 bool // CRC32 hardware implementation
|
||||
_ CacheLinePad
|
||||
}
|
||||
|
||||
// MIPS64X contains the supported CPU features of the current mips64/mips64le
|
||||
// platforms. If the current platform is not mips64/mips64le or the current
|
||||
// operating system is not Linux then all feature flags are false.
|
||||
var MIPS64X struct {
|
||||
_ CacheLinePad
|
||||
HasMSA bool // MIPS SIMD architecture
|
||||
_ CacheLinePad
|
||||
}
|
||||
|
||||
// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms.
|
||||
// If the current platform is not ppc64/ppc64le then all feature flags are false.
|
||||
//
|
||||
// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00,
|
||||
// since there are no optional categories. There are some exceptions that also
|
||||
// require kernel support to work (DARN, SCV), so there are feature bits for
|
||||
// those as well. The struct is padded to avoid false sharing.
|
||||
var PPC64 struct {
|
||||
_ CacheLinePad
|
||||
HasDARN bool // Hardware random number generator (requires kernel enablement)
|
||||
HasSCV bool // Syscall vectored (requires kernel enablement)
|
||||
IsPOWER8 bool // ISA v2.07 (POWER8)
|
||||
IsPOWER9 bool // ISA v3.00 (POWER9), implies IsPOWER8
|
||||
_ CacheLinePad
|
||||
}
|
||||
|
||||
// S390X contains the supported CPU features of the current IBM Z
|
||||
// (s390x) platform. If the current platform is not IBM Z then all
|
||||
// feature flags are false.
|
||||
//
|
||||
// S390X is padded to avoid false sharing. Further HasVX is only set
|
||||
// if the OS supports vector registers in addition to the STFLE
|
||||
// feature bit being set.
|
||||
var S390X struct {
|
||||
_ CacheLinePad
|
||||
HasZARCH bool // z/Architecture mode is active [mandatory]
|
||||
HasSTFLE bool // store facility list extended
|
||||
HasLDISP bool // long (20-bit) displacements
|
||||
HasEIMM bool // 32-bit immediates
|
||||
HasDFP bool // decimal floating point
|
||||
HasETF3EH bool // ETF-3 enhanced
|
||||
HasMSA bool // message security assist (CPACF)
|
||||
HasAES bool // KM-AES{128,192,256} functions
|
||||
HasAESCBC bool // KMC-AES{128,192,256} functions
|
||||
HasAESCTR bool // KMCTR-AES{128,192,256} functions
|
||||
HasAESGCM bool // KMA-GCM-AES{128,192,256} functions
|
||||
HasGHASH bool // KIMD-GHASH function
|
||||
HasSHA1 bool // K{I,L}MD-SHA-1 functions
|
||||
HasSHA256 bool // K{I,L}MD-SHA-256 functions
|
||||
HasSHA512 bool // K{I,L}MD-SHA-512 functions
|
||||
HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions
|
||||
HasVX bool // vector facility
|
||||
HasVXE bool // vector-enhancements facility 1
|
||||
_ CacheLinePad
|
||||
}
|
||||
|
||||
func init() {
|
||||
archInit()
|
||||
initOptions()
|
||||
processOptions()
|
||||
}
|
||||
|
||||
// options contains the cpu debug options that can be used in GODEBUG.
|
||||
// Options are arch dependent and are added by the arch specific initOptions functions.
|
||||
// Features that are mandatory for the specific GOARCH should have the Required field set
|
||||
// (e.g. SSE2 on amd64).
|
||||
var options []option
|
||||
|
||||
// Option names should be lower case. e.g. avx instead of AVX.
|
||||
type option struct {
|
||||
Name string
|
||||
Feature *bool
|
||||
Specified bool // whether feature value was specified in GODEBUG
|
||||
Enable bool // whether feature should be enabled
|
||||
Required bool // whether feature is mandatory and can not be disabled
|
||||
}
|
||||
|
||||
func processOptions() {
|
||||
env := os.Getenv("GODEBUG")
|
||||
field:
|
||||
for env != "" {
|
||||
field := ""
|
||||
i := strings.IndexByte(env, ',')
|
||||
if i < 0 {
|
||||
field, env = env, ""
|
||||
} else {
|
||||
field, env = env[:i], env[i+1:]
|
||||
}
|
||||
if len(field) < 4 || field[:4] != "cpu." {
|
||||
continue
|
||||
}
|
||||
i = strings.IndexByte(field, '=')
|
||||
if i < 0 {
|
||||
print("GODEBUG sys/cpu: no value specified for \"", field, "\"\n")
|
||||
continue
|
||||
}
|
||||
key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on"
|
||||
|
||||
var enable bool
|
||||
switch value {
|
||||
case "on":
|
||||
enable = true
|
||||
case "off":
|
||||
enable = false
|
||||
default:
|
||||
print("GODEBUG sys/cpu: value \"", value, "\" not supported for cpu option \"", key, "\"\n")
|
||||
continue field
|
||||
}
|
||||
|
||||
if key == "all" {
|
||||
for i := range options {
|
||||
options[i].Specified = true
|
||||
options[i].Enable = enable || options[i].Required
|
||||
}
|
||||
continue field
|
||||
}
|
||||
|
||||
for i := range options {
|
||||
if options[i].Name == key {
|
||||
options[i].Specified = true
|
||||
options[i].Enable = enable
|
||||
continue field
|
||||
}
|
||||
}
|
||||
|
||||
print("GODEBUG sys/cpu: unknown cpu feature \"", key, "\"\n")
|
||||
}
|
||||
|
||||
for _, o := range options {
|
||||
if !o.Specified {
|
||||
continue
|
||||
}
|
||||
|
||||
if o.Enable && !*o.Feature {
|
||||
print("GODEBUG sys/cpu: can not enable \"", o.Name, "\", missing CPU support\n")
|
||||
continue
|
||||
}
|
||||
|
||||
if !o.Enable && o.Required {
|
||||
print("GODEBUG sys/cpu: can not disable \"", o.Name, "\", required CPU feature\n")
|
||||
continue
|
||||
}
|
||||
|
||||
*o.Feature = o.Enable
|
||||
}
|
||||
}
|
34
vendor/golang.org/x/sys/cpu/cpu_aix.go
generated
vendored
Normal file
34
vendor/golang.org/x/sys/cpu/cpu_aix.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build aix
|
||||
// +build aix
|
||||
|
||||
package cpu
|
||||
|
||||
const (
|
||||
// getsystemcfg constants
|
||||
_SC_IMPL = 2
|
||||
_IMPL_POWER8 = 0x10000
|
||||
_IMPL_POWER9 = 0x20000
|
||||
)
|
||||
|
||||
func archInit() {
|
||||
impl := getsystemcfg(_SC_IMPL)
|
||||
if impl&_IMPL_POWER8 != 0 {
|
||||
PPC64.IsPOWER8 = true
|
||||
}
|
||||
if impl&_IMPL_POWER9 != 0 {
|
||||
PPC64.IsPOWER8 = true
|
||||
PPC64.IsPOWER9 = true
|
||||
}
|
||||
|
||||
Initialized = true
|
||||
}
|
||||
|
||||
func getsystemcfg(label int) (n uint64) {
|
||||
r0, _ := callgetsystemcfg(label)
|
||||
n = uint64(r0)
|
||||
return
|
||||
}
|
73
vendor/golang.org/x/sys/cpu/cpu_arm.go
generated
vendored
Normal file
73
vendor/golang.org/x/sys/cpu/cpu_arm.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
const cacheLineSize = 32
|
||||
|
||||
// HWCAP/HWCAP2 bits.
|
||||
// These are specific to Linux.
|
||||
const (
|
||||
hwcap_SWP = 1 << 0
|
||||
hwcap_HALF = 1 << 1
|
||||
hwcap_THUMB = 1 << 2
|
||||
hwcap_26BIT = 1 << 3
|
||||
hwcap_FAST_MULT = 1 << 4
|
||||
hwcap_FPA = 1 << 5
|
||||
hwcap_VFP = 1 << 6
|
||||
hwcap_EDSP = 1 << 7
|
||||
hwcap_JAVA = 1 << 8
|
||||
hwcap_IWMMXT = 1 << 9
|
||||
hwcap_CRUNCH = 1 << 10
|
||||
hwcap_THUMBEE = 1 << 11
|
||||
hwcap_NEON = 1 << 12
|
||||
hwcap_VFPv3 = 1 << 13
|
||||
hwcap_VFPv3D16 = 1 << 14
|
||||
hwcap_TLS = 1 << 15
|
||||
hwcap_VFPv4 = 1 << 16
|
||||
hwcap_IDIVA = 1 << 17
|
||||
hwcap_IDIVT = 1 << 18
|
||||
hwcap_VFPD32 = 1 << 19
|
||||
hwcap_LPAE = 1 << 20
|
||||
hwcap_EVTSTRM = 1 << 21
|
||||
|
||||
hwcap2_AES = 1 << 0
|
||||
hwcap2_PMULL = 1 << 1
|
||||
hwcap2_SHA1 = 1 << 2
|
||||
hwcap2_SHA2 = 1 << 3
|
||||
hwcap2_CRC32 = 1 << 4
|
||||
)
|
||||
|
||||
func initOptions() {
|
||||
options = []option{
|
||||
{Name: "pmull", Feature: &ARM.HasPMULL},
|
||||
{Name: "sha1", Feature: &ARM.HasSHA1},
|
||||
{Name: "sha2", Feature: &ARM.HasSHA2},
|
||||
{Name: "swp", Feature: &ARM.HasSWP},
|
||||
{Name: "thumb", Feature: &ARM.HasTHUMB},
|
||||
{Name: "thumbee", Feature: &ARM.HasTHUMBEE},
|
||||
{Name: "tls", Feature: &ARM.HasTLS},
|
||||
{Name: "vfp", Feature: &ARM.HasVFP},
|
||||
{Name: "vfpd32", Feature: &ARM.HasVFPD32},
|
||||
{Name: "vfpv3", Feature: &ARM.HasVFPv3},
|
||||
{Name: "vfpv3d16", Feature: &ARM.HasVFPv3D16},
|
||||
{Name: "vfpv4", Feature: &ARM.HasVFPv4},
|
||||
{Name: "half", Feature: &ARM.HasHALF},
|
||||
{Name: "26bit", Feature: &ARM.Has26BIT},
|
||||
{Name: "fastmul", Feature: &ARM.HasFASTMUL},
|
||||
{Name: "fpa", Feature: &ARM.HasFPA},
|
||||
{Name: "edsp", Feature: &ARM.HasEDSP},
|
||||
{Name: "java", Feature: &ARM.HasJAVA},
|
||||
{Name: "iwmmxt", Feature: &ARM.HasIWMMXT},
|
||||
{Name: "crunch", Feature: &ARM.HasCRUNCH},
|
||||
{Name: "neon", Feature: &ARM.HasNEON},
|
||||
{Name: "idivt", Feature: &ARM.HasIDIVT},
|
||||
{Name: "idiva", Feature: &ARM.HasIDIVA},
|
||||
{Name: "lpae", Feature: &ARM.HasLPAE},
|
||||
{Name: "evtstrm", Feature: &ARM.HasEVTSTRM},
|
||||
{Name: "aes", Feature: &ARM.HasAES},
|
||||
{Name: "crc32", Feature: &ARM.HasCRC32},
|
||||
}
|
||||
|
||||
}
|
172
vendor/golang.org/x/sys/cpu/cpu_arm64.go
generated
vendored
Normal file
172
vendor/golang.org/x/sys/cpu/cpu_arm64.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
import "runtime"
|
||||
|
||||
const cacheLineSize = 64
|
||||
|
||||
func initOptions() {
|
||||
options = []option{
|
||||
{Name: "fp", Feature: &ARM64.HasFP},
|
||||
{Name: "asimd", Feature: &ARM64.HasASIMD},
|
||||
{Name: "evstrm", Feature: &ARM64.HasEVTSTRM},
|
||||
{Name: "aes", Feature: &ARM64.HasAES},
|
||||
{Name: "fphp", Feature: &ARM64.HasFPHP},
|
||||
{Name: "jscvt", Feature: &ARM64.HasJSCVT},
|
||||
{Name: "lrcpc", Feature: &ARM64.HasLRCPC},
|
||||
{Name: "pmull", Feature: &ARM64.HasPMULL},
|
||||
{Name: "sha1", Feature: &ARM64.HasSHA1},
|
||||
{Name: "sha2", Feature: &ARM64.HasSHA2},
|
||||
{Name: "sha3", Feature: &ARM64.HasSHA3},
|
||||
{Name: "sha512", Feature: &ARM64.HasSHA512},
|
||||
{Name: "sm3", Feature: &ARM64.HasSM3},
|
||||
{Name: "sm4", Feature: &ARM64.HasSM4},
|
||||
{Name: "sve", Feature: &ARM64.HasSVE},
|
||||
{Name: "crc32", Feature: &ARM64.HasCRC32},
|
||||
{Name: "atomics", Feature: &ARM64.HasATOMICS},
|
||||
{Name: "asimdhp", Feature: &ARM64.HasASIMDHP},
|
||||
{Name: "cpuid", Feature: &ARM64.HasCPUID},
|
||||
{Name: "asimrdm", Feature: &ARM64.HasASIMDRDM},
|
||||
{Name: "fcma", Feature: &ARM64.HasFCMA},
|
||||
{Name: "dcpop", Feature: &ARM64.HasDCPOP},
|
||||
{Name: "asimddp", Feature: &ARM64.HasASIMDDP},
|
||||
{Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
|
||||
}
|
||||
}
|
||||
|
||||
func archInit() {
|
||||
switch runtime.GOOS {
|
||||
case "freebsd":
|
||||
readARM64Registers()
|
||||
case "linux", "netbsd":
|
||||
doinit()
|
||||
default:
|
||||
// Most platforms don't seem to allow reading these registers.
|
||||
//
|
||||
// OpenBSD:
|
||||
// See https://golang.org/issue/31746
|
||||
setMinimalFeatures()
|
||||
}
|
||||
}
|
||||
|
||||
// setMinimalFeatures fakes the minimal ARM64 features expected by
|
||||
// TestARM64minimalFeatures.
|
||||
func setMinimalFeatures() {
|
||||
ARM64.HasASIMD = true
|
||||
ARM64.HasFP = true
|
||||
}
|
||||
|
||||
func readARM64Registers() {
|
||||
Initialized = true
|
||||
|
||||
parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0())
|
||||
}
|
||||
|
||||
func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
|
||||
// ID_AA64ISAR0_EL1
|
||||
switch extractBits(isar0, 4, 7) {
|
||||
case 1:
|
||||
ARM64.HasAES = true
|
||||
case 2:
|
||||
ARM64.HasAES = true
|
||||
ARM64.HasPMULL = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 8, 11) {
|
||||
case 1:
|
||||
ARM64.HasSHA1 = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 12, 15) {
|
||||
case 1:
|
||||
ARM64.HasSHA2 = true
|
||||
case 2:
|
||||
ARM64.HasSHA2 = true
|
||||
ARM64.HasSHA512 = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 16, 19) {
|
||||
case 1:
|
||||
ARM64.HasCRC32 = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 20, 23) {
|
||||
case 2:
|
||||
ARM64.HasATOMICS = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 28, 31) {
|
||||
case 1:
|
||||
ARM64.HasASIMDRDM = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 32, 35) {
|
||||
case 1:
|
||||
ARM64.HasSHA3 = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 36, 39) {
|
||||
case 1:
|
||||
ARM64.HasSM3 = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 40, 43) {
|
||||
case 1:
|
||||
ARM64.HasSM4 = true
|
||||
}
|
||||
|
||||
switch extractBits(isar0, 44, 47) {
|
||||
case 1:
|
||||
ARM64.HasASIMDDP = true
|
||||
}
|
||||
|
||||
// ID_AA64ISAR1_EL1
|
||||
switch extractBits(isar1, 0, 3) {
|
||||
case 1:
|
||||
ARM64.HasDCPOP = true
|
||||
}
|
||||
|
||||
switch extractBits(isar1, 12, 15) {
|
||||
case 1:
|
||||
ARM64.HasJSCVT = true
|
||||
}
|
||||
|
||||
switch extractBits(isar1, 16, 19) {
|
||||
case 1:
|
||||
ARM64.HasFCMA = true
|
||||
}
|
||||
|
||||
switch extractBits(isar1, 20, 23) {
|
||||
case 1:
|
||||
ARM64.HasLRCPC = true
|
||||
}
|
||||
|
||||
// ID_AA64PFR0_EL1
|
||||
switch extractBits(pfr0, 16, 19) {
|
||||
case 0:
|
||||
ARM64.HasFP = true
|
||||
case 1:
|
||||
ARM64.HasFP = true
|
||||
ARM64.HasFPHP = true
|
||||
}
|
||||
|
||||
switch extractBits(pfr0, 20, 23) {
|
||||
case 0:
|
||||
ARM64.HasASIMD = true
|
||||
case 1:
|
||||
ARM64.HasASIMD = true
|
||||
ARM64.HasASIMDHP = true
|
||||
}
|
||||
|
||||
switch extractBits(pfr0, 32, 35) {
|
||||
case 1:
|
||||
ARM64.HasSVE = true
|
||||
}
|
||||
}
|
||||
|
||||
func extractBits(data uint64, start, end uint) uint {
|
||||
return (uint)(data>>start) & ((1 << (end - start + 1)) - 1)
|
||||
}
|
32
vendor/golang.org/x/sys/cpu/cpu_arm64.s
generated
vendored
Normal file
32
vendor/golang.org/x/sys/cpu/cpu_arm64.s
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func getisar0() uint64
|
||||
TEXT ·getisar0(SB),NOSPLIT,$0-8
|
||||
// get Instruction Set Attributes 0 into x0
|
||||
// mrs x0, ID_AA64ISAR0_EL1 = d5380600
|
||||
WORD $0xd5380600
|
||||
MOVD R0, ret+0(FP)
|
||||
RET
|
||||
|
||||
// func getisar1() uint64
|
||||
TEXT ·getisar1(SB),NOSPLIT,$0-8
|
||||
// get Instruction Set Attributes 1 into x0
|
||||
// mrs x0, ID_AA64ISAR1_EL1 = d5380620
|
||||
WORD $0xd5380620
|
||||
MOVD R0, ret+0(FP)
|
||||
RET
|
||||
|
||||
// func getpfr0() uint64
|
||||
TEXT ·getpfr0(SB),NOSPLIT,$0-8
|
||||
// get Processor Feature Register 0 into x0
|
||||
// mrs x0, ID_AA64PFR0_EL1 = d5380400
|
||||
WORD $0xd5380400
|
||||
MOVD R0, ret+0(FP)
|
||||
RET
|
12
vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
generated
vendored
Normal file
12
vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
package cpu
|
||||
|
||||
func getisar0() uint64
|
||||
func getisar1() uint64
|
||||
func getpfr0() uint64
|
22
vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
generated
vendored
Normal file
22
vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
package cpu
|
||||
|
||||
// haveAsmFunctions reports whether the other functions in this file can
|
||||
// be safely called.
|
||||
func haveAsmFunctions() bool { return true }
|
||||
|
||||
// The following feature detection functions are defined in cpu_s390x.s.
|
||||
// They are likely to be expensive to call so the results should be cached.
|
||||
func stfle() facilityList
|
||||
func kmQuery() queryResult
|
||||
func kmcQuery() queryResult
|
||||
func kmctrQuery() queryResult
|
||||
func kmaQuery() queryResult
|
||||
func kimdQuery() queryResult
|
||||
func klmdQuery() queryResult
|
21
vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
generated
vendored
Normal file
21
vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (386 || amd64 || amd64p32) && gc
|
||||
// +build 386 amd64 amd64p32
|
||||
// +build gc
|
||||
|
||||
package cpu
|
||||
|
||||
// cpuid is implemented in cpu_x86.s for gc compiler
|
||||
// and in cpu_gccgo.c for gccgo.
|
||||
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
|
||||
|
||||
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
|
||||
// and in cpu_gccgo.c for gccgo.
|
||||
func xgetbv() (eax, edx uint32)
|
||||
|
||||
// darwinSupportsAVX512 is implemented in cpu_x86.s for gc compiler
|
||||
// and in cpu_gccgo_x86.go for gccgo.
|
||||
func darwinSupportsAVX512() bool
|
12
vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
generated
vendored
Normal file
12
vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gccgo
|
||||
// +build gccgo
|
||||
|
||||
package cpu
|
||||
|
||||
func getisar0() uint64 { return 0 }
|
||||
func getisar1() uint64 { return 0 }
|
||||
func getpfr0() uint64 { return 0 }
|
23
vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
generated
vendored
Normal file
23
vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gccgo
|
||||
// +build gccgo
|
||||
|
||||
package cpu
|
||||
|
||||
// haveAsmFunctions reports whether the other functions in this file can
|
||||
// be safely called.
|
||||
func haveAsmFunctions() bool { return false }
|
||||
|
||||
// TODO(mundaym): the following feature detection functions are currently
|
||||
// stubs. See https://golang.org/cl/162887 for how to fix this.
|
||||
// They are likely to be expensive to call so the results should be cached.
|
||||
func stfle() facilityList { panic("not implemented for gccgo") }
|
||||
func kmQuery() queryResult { panic("not implemented for gccgo") }
|
||||
func kmcQuery() queryResult { panic("not implemented for gccgo") }
|
||||
func kmctrQuery() queryResult { panic("not implemented for gccgo") }
|
||||
func kmaQuery() queryResult { panic("not implemented for gccgo") }
|
||||
func kimdQuery() queryResult { panic("not implemented for gccgo") }
|
||||
func klmdQuery() queryResult { panic("not implemented for gccgo") }
|
43
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
generated
vendored
Normal file
43
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build 386 amd64 amd64p32
|
||||
// +build gccgo
|
||||
|
||||
#include <cpuid.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// Need to wrap __get_cpuid_count because it's declared as static.
|
||||
int
|
||||
gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
{
|
||||
return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx);
|
||||
}
|
||||
|
||||
// xgetbv reads the contents of an XCR (Extended Control Register)
|
||||
// specified in the ECX register into registers EDX:EAX.
|
||||
// Currently, the only supported value for XCR is 0.
|
||||
//
|
||||
// TODO: Replace with a better alternative:
|
||||
//
|
||||
// #include <xsaveintrin.h>
|
||||
//
|
||||
// #pragma GCC target("xsave")
|
||||
//
|
||||
// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) {
|
||||
// unsigned long long x = _xgetbv(0);
|
||||
// *eax = x & 0xffffffff;
|
||||
// *edx = (x >> 32) & 0xffffffff;
|
||||
// }
|
||||
//
|
||||
// Note that _xgetbv is defined starting with GCC 8.
|
||||
void
|
||||
gccgoXgetbv(uint32_t *eax, uint32_t *edx)
|
||||
{
|
||||
__asm(" xorl %%ecx, %%ecx\n"
|
||||
" xgetbv"
|
||||
: "=a"(*eax), "=d"(*edx));
|
||||
}
|
33
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
generated
vendored
Normal file
33
vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (386 || amd64 || amd64p32) && gccgo
|
||||
// +build 386 amd64 amd64p32
|
||||
// +build gccgo
|
||||
|
||||
package cpu
|
||||
|
||||
//extern gccgoGetCpuidCount
|
||||
func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32)
|
||||
|
||||
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) {
|
||||
var a, b, c, d uint32
|
||||
gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d)
|
||||
return a, b, c, d
|
||||
}
|
||||
|
||||
//extern gccgoXgetbv
|
||||
func gccgoXgetbv(eax, edx *uint32)
|
||||
|
||||
func xgetbv() (eax, edx uint32) {
|
||||
var a, d uint32
|
||||
gccgoXgetbv(&a, &d)
|
||||
return a, d
|
||||
}
|
||||
|
||||
// gccgo doesn't build on Darwin, per:
|
||||
// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76
|
||||
func darwinSupportsAVX512() bool {
|
||||
return false
|
||||
}
|
16
vendor/golang.org/x/sys/cpu/cpu_linux.go
generated
vendored
Normal file
16
vendor/golang.org/x/sys/cpu/cpu_linux.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !386 && !amd64 && !amd64p32 && !arm64
|
||||
// +build !386,!amd64,!amd64p32,!arm64
|
||||
|
||||
package cpu
|
||||
|
||||
func archInit() {
|
||||
if err := readHWCAP(); err != nil {
|
||||
return
|
||||
}
|
||||
doinit()
|
||||
Initialized = true
|
||||
}
|
39
vendor/golang.org/x/sys/cpu/cpu_linux_arm.go
generated
vendored
Normal file
39
vendor/golang.org/x/sys/cpu/cpu_linux_arm.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
func doinit() {
|
||||
ARM.HasSWP = isSet(hwCap, hwcap_SWP)
|
||||
ARM.HasHALF = isSet(hwCap, hwcap_HALF)
|
||||
ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB)
|
||||
ARM.Has26BIT = isSet(hwCap, hwcap_26BIT)
|
||||
ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT)
|
||||
ARM.HasFPA = isSet(hwCap, hwcap_FPA)
|
||||
ARM.HasVFP = isSet(hwCap, hwcap_VFP)
|
||||
ARM.HasEDSP = isSet(hwCap, hwcap_EDSP)
|
||||
ARM.HasJAVA = isSet(hwCap, hwcap_JAVA)
|
||||
ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT)
|
||||
ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH)
|
||||
ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE)
|
||||
ARM.HasNEON = isSet(hwCap, hwcap_NEON)
|
||||
ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3)
|
||||
ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16)
|
||||
ARM.HasTLS = isSet(hwCap, hwcap_TLS)
|
||||
ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4)
|
||||
ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA)
|
||||
ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT)
|
||||
ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32)
|
||||
ARM.HasLPAE = isSet(hwCap, hwcap_LPAE)
|
||||
ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
|
||||
ARM.HasAES = isSet(hwCap2, hwcap2_AES)
|
||||
ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL)
|
||||
ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1)
|
||||
ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2)
|
||||
ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32)
|
||||
}
|
||||
|
||||
func isSet(hwc uint, value uint) bool {
|
||||
return hwc&value != 0
|
||||
}
|
71
vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
generated
vendored
Normal file
71
vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
// HWCAP/HWCAP2 bits. These are exposed by Linux.
|
||||
const (
|
||||
hwcap_FP = 1 << 0
|
||||
hwcap_ASIMD = 1 << 1
|
||||
hwcap_EVTSTRM = 1 << 2
|
||||
hwcap_AES = 1 << 3
|
||||
hwcap_PMULL = 1 << 4
|
||||
hwcap_SHA1 = 1 << 5
|
||||
hwcap_SHA2 = 1 << 6
|
||||
hwcap_CRC32 = 1 << 7
|
||||
hwcap_ATOMICS = 1 << 8
|
||||
hwcap_FPHP = 1 << 9
|
||||
hwcap_ASIMDHP = 1 << 10
|
||||
hwcap_CPUID = 1 << 11
|
||||
hwcap_ASIMDRDM = 1 << 12
|
||||
hwcap_JSCVT = 1 << 13
|
||||
hwcap_FCMA = 1 << 14
|
||||
hwcap_LRCPC = 1 << 15
|
||||
hwcap_DCPOP = 1 << 16
|
||||
hwcap_SHA3 = 1 << 17
|
||||
hwcap_SM3 = 1 << 18
|
||||
hwcap_SM4 = 1 << 19
|
||||
hwcap_ASIMDDP = 1 << 20
|
||||
hwcap_SHA512 = 1 << 21
|
||||
hwcap_SVE = 1 << 22
|
||||
hwcap_ASIMDFHM = 1 << 23
|
||||
)
|
||||
|
||||
func doinit() {
|
||||
if err := readHWCAP(); err != nil {
|
||||
// failed to read /proc/self/auxv, try reading registers directly
|
||||
readARM64Registers()
|
||||
return
|
||||
}
|
||||
|
||||
// HWCAP feature bits
|
||||
ARM64.HasFP = isSet(hwCap, hwcap_FP)
|
||||
ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD)
|
||||
ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM)
|
||||
ARM64.HasAES = isSet(hwCap, hwcap_AES)
|
||||
ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL)
|
||||
ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1)
|
||||
ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2)
|
||||
ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32)
|
||||
ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS)
|
||||
ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP)
|
||||
ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP)
|
||||
ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID)
|
||||
ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM)
|
||||
ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT)
|
||||
ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA)
|
||||
ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC)
|
||||
ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP)
|
||||
ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3)
|
||||
ARM64.HasSM3 = isSet(hwCap, hwcap_SM3)
|
||||
ARM64.HasSM4 = isSet(hwCap, hwcap_SM4)
|
||||
ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP)
|
||||
ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512)
|
||||
ARM64.HasSVE = isSet(hwCap, hwcap_SVE)
|
||||
ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
|
||||
}
|
||||
|
||||
func isSet(hwc uint, value uint) bool {
|
||||
return hwc&value != 0
|
||||
}
|
24
vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
generated
vendored
Normal file
24
vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && (mips64 || mips64le)
|
||||
// +build linux
|
||||
// +build mips64 mips64le
|
||||
|
||||
package cpu
|
||||
|
||||
// HWCAP bits. These are exposed by the Linux kernel 5.4.
|
||||
const (
|
||||
// CPU features
|
||||
hwcap_MIPS_MSA = 1 << 1
|
||||
)
|
||||
|
||||
func doinit() {
|
||||
// HWCAP feature bits
|
||||
MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA)
|
||||
}
|
||||
|
||||
func isSet(hwc uint, value uint) bool {
|
||||
return hwc&value != 0
|
||||
}
|
10
vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
generated
vendored
Normal file
10
vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
|
||||
// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x
|
||||
|
||||
package cpu
|
||||
|
||||
func doinit() {}
|
32
vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
generated
vendored
Normal file
32
vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && (ppc64 || ppc64le)
|
||||
// +build linux
|
||||
// +build ppc64 ppc64le
|
||||
|
||||
package cpu
|
||||
|
||||
// HWCAP/HWCAP2 bits. These are exposed by the kernel.
|
||||
const (
|
||||
// ISA Level
|
||||
_PPC_FEATURE2_ARCH_2_07 = 0x80000000
|
||||
_PPC_FEATURE2_ARCH_3_00 = 0x00800000
|
||||
|
||||
// CPU features
|
||||
_PPC_FEATURE2_DARN = 0x00200000
|
||||
_PPC_FEATURE2_SCV = 0x00100000
|
||||
)
|
||||
|
||||
func doinit() {
|
||||
// HWCAP2 feature bits
|
||||
PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07)
|
||||
PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00)
|
||||
PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN)
|
||||
PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV)
|
||||
}
|
||||
|
||||
func isSet(hwc uint, value uint) bool {
|
||||
return hwc&value != 0
|
||||
}
|
40
vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
generated
vendored
Normal file
40
vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
const (
|
||||
// bit mask values from /usr/include/bits/hwcap.h
|
||||
hwcap_ZARCH = 2
|
||||
hwcap_STFLE = 4
|
||||
hwcap_MSA = 8
|
||||
hwcap_LDISP = 16
|
||||
hwcap_EIMM = 32
|
||||
hwcap_DFP = 64
|
||||
hwcap_ETF3EH = 256
|
||||
hwcap_VX = 2048
|
||||
hwcap_VXE = 8192
|
||||
)
|
||||
|
||||
func initS390Xbase() {
|
||||
// test HWCAP bit vector
|
||||
has := func(featureMask uint) bool {
|
||||
return hwCap&featureMask == featureMask
|
||||
}
|
||||
|
||||
// mandatory
|
||||
S390X.HasZARCH = has(hwcap_ZARCH)
|
||||
|
||||
// optional
|
||||
S390X.HasSTFLE = has(hwcap_STFLE)
|
||||
S390X.HasLDISP = has(hwcap_LDISP)
|
||||
S390X.HasEIMM = has(hwcap_EIMM)
|
||||
S390X.HasETF3EH = has(hwcap_ETF3EH)
|
||||
S390X.HasDFP = has(hwcap_DFP)
|
||||
S390X.HasMSA = has(hwcap_MSA)
|
||||
S390X.HasVX = has(hwcap_VX)
|
||||
if S390X.HasVX {
|
||||
S390X.HasVXE = has(hwcap_VXE)
|
||||
}
|
||||
}
|
16
vendor/golang.org/x/sys/cpu/cpu_mips64x.go
generated
vendored
Normal file
16
vendor/golang.org/x/sys/cpu/cpu_mips64x.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build mips64 || mips64le
|
||||
// +build mips64 mips64le
|
||||
|
||||
package cpu
|
||||
|
||||
const cacheLineSize = 32
|
||||
|
||||
func initOptions() {
|
||||
options = []option{
|
||||
{Name: "msa", Feature: &MIPS64X.HasMSA},
|
||||
}
|
||||
}
|
12
vendor/golang.org/x/sys/cpu/cpu_mipsx.go
generated
vendored
Normal file
12
vendor/golang.org/x/sys/cpu/cpu_mipsx.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build mips || mipsle
|
||||
// +build mips mipsle
|
||||
|
||||
package cpu
|
||||
|
||||
const cacheLineSize = 32
|
||||
|
||||
func initOptions() {}
|
173
vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
generated
vendored
Normal file
173
vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Minimal copy of functionality from x/sys/unix so the cpu package can call
|
||||
// sysctl without depending on x/sys/unix.
|
||||
|
||||
const (
|
||||
_CTL_QUERY = -2
|
||||
|
||||
_SYSCTL_VERS_1 = 0x1000000
|
||||
)
|
||||
|
||||
var _zero uintptr
|
||||
|
||||
func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
|
||||
var _p0 unsafe.Pointer
|
||||
if len(mib) > 0 {
|
||||
_p0 = unsafe.Pointer(&mib[0])
|
||||
} else {
|
||||
_p0 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, errno := syscall.Syscall6(
|
||||
syscall.SYS___SYSCTL,
|
||||
uintptr(_p0),
|
||||
uintptr(len(mib)),
|
||||
uintptr(unsafe.Pointer(old)),
|
||||
uintptr(unsafe.Pointer(oldlen)),
|
||||
uintptr(unsafe.Pointer(new)),
|
||||
uintptr(newlen))
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type sysctlNode struct {
|
||||
Flags uint32
|
||||
Num int32
|
||||
Name [32]int8
|
||||
Ver uint32
|
||||
__rsvd uint32
|
||||
Un [16]byte
|
||||
_sysctl_size [8]byte
|
||||
_sysctl_func [8]byte
|
||||
_sysctl_parent [8]byte
|
||||
_sysctl_desc [8]byte
|
||||
}
|
||||
|
||||
func sysctlNodes(mib []int32) ([]sysctlNode, error) {
|
||||
var olen uintptr
|
||||
|
||||
// Get a list of all sysctl nodes below the given MIB by performing
|
||||
// a sysctl for the given MIB with CTL_QUERY appended.
|
||||
mib = append(mib, _CTL_QUERY)
|
||||
qnode := sysctlNode{Flags: _SYSCTL_VERS_1}
|
||||
qp := (*byte)(unsafe.Pointer(&qnode))
|
||||
sz := unsafe.Sizeof(qnode)
|
||||
if err := sysctl(mib, nil, &olen, qp, sz); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now that we know the size, get the actual nodes.
|
||||
nodes := make([]sysctlNode, olen/sz)
|
||||
np := (*byte)(unsafe.Pointer(&nodes[0]))
|
||||
if err := sysctl(mib, np, &olen, qp, sz); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func nametomib(name string) ([]int32, error) {
|
||||
// Split name into components.
|
||||
var parts []string
|
||||
last := 0
|
||||
for i := 0; i < len(name); i++ {
|
||||
if name[i] == '.' {
|
||||
parts = append(parts, name[last:i])
|
||||
last = i + 1
|
||||
}
|
||||
}
|
||||
parts = append(parts, name[last:])
|
||||
|
||||
mib := []int32{}
|
||||
// Discover the nodes and construct the MIB OID.
|
||||
for partno, part := range parts {
|
||||
nodes, err := sysctlNodes(mib)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
n := make([]byte, 0)
|
||||
for i := range node.Name {
|
||||
if node.Name[i] != 0 {
|
||||
n = append(n, byte(node.Name[i]))
|
||||
}
|
||||
}
|
||||
if string(n) == part {
|
||||
mib = append(mib, int32(node.Num))
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(mib) != partno+1 {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return mib, nil
|
||||
}
|
||||
|
||||
// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's <aarch64/armreg.h>
|
||||
type aarch64SysctlCPUID struct {
|
||||
midr uint64 /* Main ID Register */
|
||||
revidr uint64 /* Revision ID Register */
|
||||
mpidr uint64 /* Multiprocessor Affinity Register */
|
||||
aa64dfr0 uint64 /* A64 Debug Feature Register 0 */
|
||||
aa64dfr1 uint64 /* A64 Debug Feature Register 1 */
|
||||
aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */
|
||||
aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */
|
||||
aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */
|
||||
aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */
|
||||
aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */
|
||||
aa64pfr0 uint64 /* A64 Processor Feature Register 0 */
|
||||
aa64pfr1 uint64 /* A64 Processor Feature Register 1 */
|
||||
aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */
|
||||
mvfr0 uint32 /* Media and VFP Feature Register 0 */
|
||||
mvfr1 uint32 /* Media and VFP Feature Register 1 */
|
||||
mvfr2 uint32 /* Media and VFP Feature Register 2 */
|
||||
pad uint32
|
||||
clidr uint64 /* Cache Level ID Register */
|
||||
ctr uint64 /* Cache Type Register */
|
||||
}
|
||||
|
||||
func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) {
|
||||
mib, err := nametomib(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := aarch64SysctlCPUID{}
|
||||
n := unsafe.Sizeof(out)
|
||||
_, _, errno := syscall.Syscall6(
|
||||
syscall.SYS___SYSCTL,
|
||||
uintptr(unsafe.Pointer(&mib[0])),
|
||||
uintptr(len(mib)),
|
||||
uintptr(unsafe.Pointer(&out)),
|
||||
uintptr(unsafe.Pointer(&n)),
|
||||
uintptr(0),
|
||||
uintptr(0))
|
||||
if errno != 0 {
|
||||
return nil, errno
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func doinit() {
|
||||
cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id")
|
||||
if err != nil {
|
||||
setMinimalFeatures()
|
||||
return
|
||||
}
|
||||
parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0)
|
||||
|
||||
Initialized = true
|
||||
}
|
10
vendor/golang.org/x/sys/cpu/cpu_other_arm.go
generated
vendored
Normal file
10
vendor/golang.org/x/sys/cpu/cpu_other_arm.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !linux && arm
|
||||
// +build !linux,arm
|
||||
|
||||
package cpu
|
||||
|
||||
func archInit() {}
|
10
vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
generated
vendored
Normal file
10
vendor/golang.org/x/sys/cpu/cpu_other_arm64.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !linux && !netbsd && arm64
|
||||
// +build !linux,!netbsd,arm64
|
||||
|
||||
package cpu
|
||||
|
||||
func doinit() {}
|
13
vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
generated
vendored
Normal file
13
vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !linux && (mips64 || mips64le)
|
||||
// +build !linux
|
||||
// +build mips64 mips64le
|
||||
|
||||
package cpu
|
||||
|
||||
func archInit() {
|
||||
Initialized = true
|
||||
}
|
17
vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
generated
vendored
Normal file
17
vendor/golang.org/x/sys/cpu/cpu_ppc64x.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build ppc64 || ppc64le
|
||||
// +build ppc64 ppc64le
|
||||
|
||||
package cpu
|
||||
|
||||
const cacheLineSize = 128
|
||||
|
||||
func initOptions() {
|
||||
options = []option{
|
||||
{Name: "darn", Feature: &PPC64.HasDARN},
|
||||
{Name: "scv", Feature: &PPC64.HasSCV},
|
||||
}
|
||||
}
|
12
vendor/golang.org/x/sys/cpu/cpu_riscv64.go
generated
vendored
Normal file
12
vendor/golang.org/x/sys/cpu/cpu_riscv64.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build riscv64
|
||||
// +build riscv64
|
||||
|
||||
package cpu
|
||||
|
||||
const cacheLineSize = 32
|
||||
|
||||
func initOptions() {}
|
172
vendor/golang.org/x/sys/cpu/cpu_s390x.go
generated
vendored
Normal file
172
vendor/golang.org/x/sys/cpu/cpu_s390x.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
const cacheLineSize = 256
|
||||
|
||||
func initOptions() {
|
||||
options = []option{
|
||||
{Name: "zarch", Feature: &S390X.HasZARCH, Required: true},
|
||||
{Name: "stfle", Feature: &S390X.HasSTFLE, Required: true},
|
||||
{Name: "ldisp", Feature: &S390X.HasLDISP, Required: true},
|
||||
{Name: "eimm", Feature: &S390X.HasEIMM, Required: true},
|
||||
{Name: "dfp", Feature: &S390X.HasDFP},
|
||||
{Name: "etf3eh", Feature: &S390X.HasETF3EH},
|
||||
{Name: "msa", Feature: &S390X.HasMSA},
|
||||
{Name: "aes", Feature: &S390X.HasAES},
|
||||
{Name: "aescbc", Feature: &S390X.HasAESCBC},
|
||||
{Name: "aesctr", Feature: &S390X.HasAESCTR},
|
||||
{Name: "aesgcm", Feature: &S390X.HasAESGCM},
|
||||
{Name: "ghash", Feature: &S390X.HasGHASH},
|
||||
{Name: "sha1", Feature: &S390X.HasSHA1},
|
||||
{Name: "sha256", Feature: &S390X.HasSHA256},
|
||||
{Name: "sha3", Feature: &S390X.HasSHA3},
|
||||
{Name: "sha512", Feature: &S390X.HasSHA512},
|
||||
{Name: "vx", Feature: &S390X.HasVX},
|
||||
{Name: "vxe", Feature: &S390X.HasVXE},
|
||||
}
|
||||
}
|
||||
|
||||
// bitIsSet reports whether the bit at index is set. The bit index
|
||||
// is in big endian order, so bit index 0 is the leftmost bit.
|
||||
func bitIsSet(bits []uint64, index uint) bool {
|
||||
return bits[index/64]&((1<<63)>>(index%64)) != 0
|
||||
}
|
||||
|
||||
// facility is a bit index for the named facility.
|
||||
type facility uint8
|
||||
|
||||
const (
|
||||
// mandatory facilities
|
||||
zarch facility = 1 // z architecture mode is active
|
||||
stflef facility = 7 // store-facility-list-extended
|
||||
ldisp facility = 18 // long-displacement
|
||||
eimm facility = 21 // extended-immediate
|
||||
|
||||
// miscellaneous facilities
|
||||
dfp facility = 42 // decimal-floating-point
|
||||
etf3eh facility = 30 // extended-translation 3 enhancement
|
||||
|
||||
// cryptography facilities
|
||||
msa facility = 17 // message-security-assist
|
||||
msa3 facility = 76 // message-security-assist extension 3
|
||||
msa4 facility = 77 // message-security-assist extension 4
|
||||
msa5 facility = 57 // message-security-assist extension 5
|
||||
msa8 facility = 146 // message-security-assist extension 8
|
||||
msa9 facility = 155 // message-security-assist extension 9
|
||||
|
||||
// vector facilities
|
||||
vx facility = 129 // vector facility
|
||||
vxe facility = 135 // vector-enhancements 1
|
||||
vxe2 facility = 148 // vector-enhancements 2
|
||||
)
|
||||
|
||||
// facilityList contains the result of an STFLE call.
|
||||
// Bits are numbered in big endian order so the
|
||||
// leftmost bit (the MSB) is at index 0.
|
||||
type facilityList struct {
|
||||
bits [4]uint64
|
||||
}
|
||||
|
||||
// Has reports whether the given facilities are present.
|
||||
func (s *facilityList) Has(fs ...facility) bool {
|
||||
if len(fs) == 0 {
|
||||
panic("no facility bits provided")
|
||||
}
|
||||
for _, f := range fs {
|
||||
if !bitIsSet(s.bits[:], uint(f)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// function is the code for the named cryptographic function.
|
||||
type function uint8
|
||||
|
||||
const (
|
||||
// KM{,A,C,CTR} function codes
|
||||
aes128 function = 18 // AES-128
|
||||
aes192 function = 19 // AES-192
|
||||
aes256 function = 20 // AES-256
|
||||
|
||||
// K{I,L}MD function codes
|
||||
sha1 function = 1 // SHA-1
|
||||
sha256 function = 2 // SHA-256
|
||||
sha512 function = 3 // SHA-512
|
||||
sha3_224 function = 32 // SHA3-224
|
||||
sha3_256 function = 33 // SHA3-256
|
||||
sha3_384 function = 34 // SHA3-384
|
||||
sha3_512 function = 35 // SHA3-512
|
||||
shake128 function = 36 // SHAKE-128
|
||||
shake256 function = 37 // SHAKE-256
|
||||
|
||||
// KLMD function codes
|
||||
ghash function = 65 // GHASH
|
||||
)
|
||||
|
||||
// queryResult contains the result of a Query function
|
||||
// call. Bits are numbered in big endian order so the
|
||||
// leftmost bit (the MSB) is at index 0.
|
||||
type queryResult struct {
|
||||
bits [2]uint64
|
||||
}
|
||||
|
||||
// Has reports whether the given functions are present.
|
||||
func (q *queryResult) Has(fns ...function) bool {
|
||||
if len(fns) == 0 {
|
||||
panic("no function codes provided")
|
||||
}
|
||||
for _, f := range fns {
|
||||
if !bitIsSet(q.bits[:], uint(f)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func doinit() {
|
||||
initS390Xbase()
|
||||
|
||||
// We need implementations of stfle, km and so on
|
||||
// to detect cryptographic features.
|
||||
if !haveAsmFunctions() {
|
||||
return
|
||||
}
|
||||
|
||||
// optional cryptographic functions
|
||||
if S390X.HasMSA {
|
||||
aes := []function{aes128, aes192, aes256}
|
||||
|
||||
// cipher message
|
||||
km, kmc := kmQuery(), kmcQuery()
|
||||
S390X.HasAES = km.Has(aes...)
|
||||
S390X.HasAESCBC = kmc.Has(aes...)
|
||||
if S390X.HasSTFLE {
|
||||
facilities := stfle()
|
||||
if facilities.Has(msa4) {
|
||||
kmctr := kmctrQuery()
|
||||
S390X.HasAESCTR = kmctr.Has(aes...)
|
||||
}
|
||||
if facilities.Has(msa8) {
|
||||
kma := kmaQuery()
|
||||
S390X.HasAESGCM = kma.Has(aes...)
|
||||
}
|
||||
}
|
||||
|
||||
// compute message digest
|
||||
kimd := kimdQuery() // intermediate (no padding)
|
||||
klmd := klmdQuery() // last (padding)
|
||||
S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1)
|
||||
S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256)
|
||||
S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512)
|
||||
S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist
|
||||
sha3 := []function{
|
||||
sha3_224, sha3_256, sha3_384, sha3_512,
|
||||
shake128, shake256,
|
||||
}
|
||||
S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...)
|
||||
}
|
||||
}
|
58
vendor/golang.org/x/sys/cpu/cpu_s390x.s
generated
vendored
Normal file
58
vendor/golang.org/x/sys/cpu/cpu_s390x.s
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func stfle() facilityList
|
||||
TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32
|
||||
MOVD $ret+0(FP), R1
|
||||
MOVD $3, R0 // last doubleword index to store
|
||||
XC $32, (R1), (R1) // clear 4 doublewords (32 bytes)
|
||||
WORD $0xb2b01000 // store facility list extended (STFLE)
|
||||
RET
|
||||
|
||||
// func kmQuery() queryResult
|
||||
TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||
MOVD $0, R0 // set function code to 0 (KM-Query)
|
||||
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||
WORD $0xB92E0024 // cipher message (KM)
|
||||
RET
|
||||
|
||||
// func kmcQuery() queryResult
|
||||
TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||
MOVD $0, R0 // set function code to 0 (KMC-Query)
|
||||
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||
WORD $0xB92F0024 // cipher message with chaining (KMC)
|
||||
RET
|
||||
|
||||
// func kmctrQuery() queryResult
|
||||
TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||
MOVD $0, R0 // set function code to 0 (KMCTR-Query)
|
||||
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||
WORD $0xB92D4024 // cipher message with counter (KMCTR)
|
||||
RET
|
||||
|
||||
// func kmaQuery() queryResult
|
||||
TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||
MOVD $0, R0 // set function code to 0 (KMA-Query)
|
||||
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||
WORD $0xb9296024 // cipher message with authentication (KMA)
|
||||
RET
|
||||
|
||||
// func kimdQuery() queryResult
|
||||
TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||
MOVD $0, R0 // set function code to 0 (KIMD-Query)
|
||||
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||
WORD $0xB93E0024 // compute intermediate message digest (KIMD)
|
||||
RET
|
||||
|
||||
// func klmdQuery() queryResult
|
||||
TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16
|
||||
MOVD $0, R0 // set function code to 0 (KLMD-Query)
|
||||
MOVD $ret+0(FP), R1 // address of 16-byte return value
|
||||
WORD $0xB93F0024 // compute last message digest (KLMD)
|
||||
RET
|
18
vendor/golang.org/x/sys/cpu/cpu_wasm.go
generated
vendored
Normal file
18
vendor/golang.org/x/sys/cpu/cpu_wasm.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build wasm
|
||||
// +build wasm
|
||||
|
||||
package cpu
|
||||
|
||||
// We're compiling the cpu package for an unknown (software-abstracted) CPU.
|
||||
// Make CacheLinePad an empty struct and hope that the usual struct alignment
|
||||
// rules are good enough.
|
||||
|
||||
const cacheLineSize = 0
|
||||
|
||||
func initOptions() {}
|
||||
|
||||
func archInit() {}
|
142
vendor/golang.org/x/sys/cpu/cpu_x86.go
generated
vendored
Normal file
142
vendor/golang.org/x/sys/cpu/cpu_x86.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build 386 || amd64 || amd64p32
|
||||
// +build 386 amd64 amd64p32
|
||||
|
||||
package cpu
|
||||
|
||||
import "runtime"
|
||||
|
||||
const cacheLineSize = 64
|
||||
|
||||
func initOptions() {
|
||||
options = []option{
|
||||
{Name: "adx", Feature: &X86.HasADX},
|
||||
{Name: "aes", Feature: &X86.HasAES},
|
||||
{Name: "avx", Feature: &X86.HasAVX},
|
||||
{Name: "avx2", Feature: &X86.HasAVX2},
|
||||
{Name: "avx512", Feature: &X86.HasAVX512},
|
||||
{Name: "avx512f", Feature: &X86.HasAVX512F},
|
||||
{Name: "avx512cd", Feature: &X86.HasAVX512CD},
|
||||
{Name: "avx512er", Feature: &X86.HasAVX512ER},
|
||||
{Name: "avx512pf", Feature: &X86.HasAVX512PF},
|
||||
{Name: "avx512vl", Feature: &X86.HasAVX512VL},
|
||||
{Name: "avx512bw", Feature: &X86.HasAVX512BW},
|
||||
{Name: "avx512dq", Feature: &X86.HasAVX512DQ},
|
||||
{Name: "avx512ifma", Feature: &X86.HasAVX512IFMA},
|
||||
{Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI},
|
||||
{Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW},
|
||||
{Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS},
|
||||
{Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ},
|
||||
{Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ},
|
||||
{Name: "avx512vnni", Feature: &X86.HasAVX512VNNI},
|
||||
{Name: "avx512gfni", Feature: &X86.HasAVX512GFNI},
|
||||
{Name: "avx512vaes", Feature: &X86.HasAVX512VAES},
|
||||
{Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2},
|
||||
{Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG},
|
||||
{Name: "avx512bf16", Feature: &X86.HasAVX512BF16},
|
||||
{Name: "bmi1", Feature: &X86.HasBMI1},
|
||||
{Name: "bmi2", Feature: &X86.HasBMI2},
|
||||
{Name: "erms", Feature: &X86.HasERMS},
|
||||
{Name: "fma", Feature: &X86.HasFMA},
|
||||
{Name: "osxsave", Feature: &X86.HasOSXSAVE},
|
||||
{Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
|
||||
{Name: "popcnt", Feature: &X86.HasPOPCNT},
|
||||
{Name: "rdrand", Feature: &X86.HasRDRAND},
|
||||
{Name: "rdseed", Feature: &X86.HasRDSEED},
|
||||
{Name: "sse3", Feature: &X86.HasSSE3},
|
||||
{Name: "sse41", Feature: &X86.HasSSE41},
|
||||
{Name: "sse42", Feature: &X86.HasSSE42},
|
||||
{Name: "ssse3", Feature: &X86.HasSSSE3},
|
||||
|
||||
// These capabilities should always be enabled on amd64:
|
||||
{Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
|
||||
}
|
||||
}
|
||||
|
||||
func archInit() {
|
||||
|
||||
Initialized = true
|
||||
|
||||
maxID, _, _, _ := cpuid(0, 0)
|
||||
|
||||
if maxID < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
_, _, ecx1, edx1 := cpuid(1, 0)
|
||||
X86.HasSSE2 = isSet(26, edx1)
|
||||
|
||||
X86.HasSSE3 = isSet(0, ecx1)
|
||||
X86.HasPCLMULQDQ = isSet(1, ecx1)
|
||||
X86.HasSSSE3 = isSet(9, ecx1)
|
||||
X86.HasFMA = isSet(12, ecx1)
|
||||
X86.HasSSE41 = isSet(19, ecx1)
|
||||
X86.HasSSE42 = isSet(20, ecx1)
|
||||
X86.HasPOPCNT = isSet(23, ecx1)
|
||||
X86.HasAES = isSet(25, ecx1)
|
||||
X86.HasOSXSAVE = isSet(27, ecx1)
|
||||
X86.HasRDRAND = isSet(30, ecx1)
|
||||
|
||||
var osSupportsAVX, osSupportsAVX512 bool
|
||||
// For XGETBV, OSXSAVE bit is required and sufficient.
|
||||
if X86.HasOSXSAVE {
|
||||
eax, _ := xgetbv()
|
||||
// Check if XMM and YMM registers have OS support.
|
||||
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Check darwin commpage for AVX512 support. Necessary because:
|
||||
// https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201
|
||||
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
|
||||
} else {
|
||||
// Check if OPMASK and ZMM registers have OS support.
|
||||
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
|
||||
}
|
||||
}
|
||||
|
||||
X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
|
||||
|
||||
if maxID < 7 {
|
||||
return
|
||||
}
|
||||
|
||||
_, ebx7, ecx7, edx7 := cpuid(7, 0)
|
||||
X86.HasBMI1 = isSet(3, ebx7)
|
||||
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
|
||||
X86.HasBMI2 = isSet(8, ebx7)
|
||||
X86.HasERMS = isSet(9, ebx7)
|
||||
X86.HasRDSEED = isSet(18, ebx7)
|
||||
X86.HasADX = isSet(19, ebx7)
|
||||
|
||||
X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
|
||||
if X86.HasAVX512 {
|
||||
X86.HasAVX512F = true
|
||||
X86.HasAVX512CD = isSet(28, ebx7)
|
||||
X86.HasAVX512ER = isSet(27, ebx7)
|
||||
X86.HasAVX512PF = isSet(26, ebx7)
|
||||
X86.HasAVX512VL = isSet(31, ebx7)
|
||||
X86.HasAVX512BW = isSet(30, ebx7)
|
||||
X86.HasAVX512DQ = isSet(17, ebx7)
|
||||
X86.HasAVX512IFMA = isSet(21, ebx7)
|
||||
X86.HasAVX512VBMI = isSet(1, ecx7)
|
||||
X86.HasAVX5124VNNIW = isSet(2, edx7)
|
||||
X86.HasAVX5124FMAPS = isSet(3, edx7)
|
||||
X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7)
|
||||
X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7)
|
||||
X86.HasAVX512VNNI = isSet(11, ecx7)
|
||||
X86.HasAVX512GFNI = isSet(8, ecx7)
|
||||
X86.HasAVX512VAES = isSet(9, ecx7)
|
||||
X86.HasAVX512VBMI2 = isSet(6, ecx7)
|
||||
X86.HasAVX512BITALG = isSet(12, ecx7)
|
||||
|
||||
eax71, _, _, _ := cpuid(7, 1)
|
||||
X86.HasAVX512BF16 = isSet(5, eax71)
|
||||
}
|
||||
}
|
||||
|
||||
func isSet(bitpos uint, value uint32) bool {
|
||||
return value&(1<<bitpos) != 0
|
||||
}
|
52
vendor/golang.org/x/sys/cpu/cpu_x86.s
generated
vendored
Normal file
52
vendor/golang.org/x/sys/cpu/cpu_x86.s
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (386 || amd64 || amd64p32) && gc
|
||||
// +build 386 amd64 amd64p32
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
|
||||
TEXT ·cpuid(SB), NOSPLIT, $0-24
|
||||
MOVL eaxArg+0(FP), AX
|
||||
MOVL ecxArg+4(FP), CX
|
||||
CPUID
|
||||
MOVL AX, eax+8(FP)
|
||||
MOVL BX, ebx+12(FP)
|
||||
MOVL CX, ecx+16(FP)
|
||||
MOVL DX, edx+20(FP)
|
||||
RET
|
||||
|
||||
// func xgetbv() (eax, edx uint32)
|
||||
TEXT ·xgetbv(SB),NOSPLIT,$0-8
|
||||
MOVL $0, CX
|
||||
XGETBV
|
||||
MOVL AX, eax+0(FP)
|
||||
MOVL DX, edx+4(FP)
|
||||
RET
|
||||
|
||||
// func darwinSupportsAVX512() bool
|
||||
TEXT ·darwinSupportsAVX512(SB), NOSPLIT, $0-1
|
||||
MOVB $0, ret+0(FP) // default to false
|
||||
#ifdef GOOS_darwin // return if not darwin
|
||||
#ifdef GOARCH_amd64 // return if not amd64
|
||||
// These values from:
|
||||
// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h
|
||||
#define commpage64_base_address 0x00007fffffe00000
|
||||
#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010)
|
||||
#define commpage64_version (commpage64_base_address+0x01E)
|
||||
#define hasAVX512F 0x0000004000000000
|
||||
MOVQ $commpage64_version, BX
|
||||
CMPW (BX), $13 // cpu_capabilities64 undefined in versions < 13
|
||||
JL no_avx512
|
||||
MOVQ $commpage64_cpu_capabilities64, BX
|
||||
MOVQ $hasAVX512F, CX
|
||||
TESTQ (BX), CX
|
||||
JZ no_avx512
|
||||
MOVB $1, ret+0(FP)
|
||||
no_avx512:
|
||||
#endif
|
||||
#endif
|
||||
RET
|
10
vendor/golang.org/x/sys/cpu/cpu_zos.go
generated
vendored
Normal file
10
vendor/golang.org/x/sys/cpu/cpu_zos.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
func archInit() {
|
||||
doinit()
|
||||
Initialized = true
|
||||
}
|
25
vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go
generated
vendored
Normal file
25
vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
func initS390Xbase() {
|
||||
// get the facilities list
|
||||
facilities := stfle()
|
||||
|
||||
// mandatory
|
||||
S390X.HasZARCH = facilities.Has(zarch)
|
||||
S390X.HasSTFLE = facilities.Has(stflef)
|
||||
S390X.HasLDISP = facilities.Has(ldisp)
|
||||
S390X.HasEIMM = facilities.Has(eimm)
|
||||
|
||||
// optional
|
||||
S390X.HasETF3EH = facilities.Has(etf3eh)
|
||||
S390X.HasDFP = facilities.Has(dfp)
|
||||
S390X.HasMSA = facilities.Has(msa)
|
||||
S390X.HasVX = facilities.Has(vx)
|
||||
if S390X.HasVX {
|
||||
S390X.HasVXE = facilities.Has(vxe)
|
||||
}
|
||||
}
|
56
vendor/golang.org/x/sys/cpu/hwcap_linux.go
generated
vendored
Normal file
56
vendor/golang.org/x/sys/cpu/hwcap_linux.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cpu
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
const (
|
||||
_AT_HWCAP = 16
|
||||
_AT_HWCAP2 = 26
|
||||
|
||||
procAuxv = "/proc/self/auxv"
|
||||
|
||||
uintSize = int(32 << (^uint(0) >> 63))
|
||||
)
|
||||
|
||||
// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2
|
||||
// These are initialized in cpu_$GOARCH.go
|
||||
// and should not be changed after they are initialized.
|
||||
var hwCap uint
|
||||
var hwCap2 uint
|
||||
|
||||
func readHWCAP() error {
|
||||
buf, err := ioutil.ReadFile(procAuxv)
|
||||
if err != nil {
|
||||
// e.g. on android /proc/self/auxv is not accessible, so silently
|
||||
// ignore the error and leave Initialized = false. On some
|
||||
// architectures (e.g. arm64) doinit() implements a fallback
|
||||
// readout and will set Initialized = true again.
|
||||
return err
|
||||
}
|
||||
bo := hostByteOrder()
|
||||
for len(buf) >= 2*(uintSize/8) {
|
||||
var tag, val uint
|
||||
switch uintSize {
|
||||
case 32:
|
||||
tag = uint(bo.Uint32(buf[0:]))
|
||||
val = uint(bo.Uint32(buf[4:]))
|
||||
buf = buf[8:]
|
||||
case 64:
|
||||
tag = uint(bo.Uint64(buf[0:]))
|
||||
val = uint(bo.Uint64(buf[8:]))
|
||||
buf = buf[16:]
|
||||
}
|
||||
switch tag {
|
||||
case _AT_HWCAP:
|
||||
hwCap = val
|
||||
case _AT_HWCAP2:
|
||||
hwCap2 = val
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
27
vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
generated
vendored
Normal file
27
vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Recreate a getsystemcfg syscall handler instead of
|
||||
// using the one provided by x/sys/unix to avoid having
|
||||
// the dependency between them. (See golang.org/issue/32102)
|
||||
// Morever, this file will be used during the building of
|
||||
// gccgo's libgo and thus must not used a CGo method.
|
||||
|
||||
//go:build aix && gccgo
|
||||
// +build aix,gccgo
|
||||
|
||||
package cpu
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
//extern getsystemcfg
|
||||
func gccgoGetsystemcfg(label uint32) (r uint64)
|
||||
|
||||
func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) {
|
||||
r1 = uintptr(gccgoGetsystemcfg(uint32(label)))
|
||||
e1 = syscall.GetErrno()
|
||||
return
|
||||
}
|
36
vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
generated
vendored
Normal file
36
vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Minimal copy of x/sys/unix so the cpu package can make a
|
||||
// system call on AIX without depending on x/sys/unix.
|
||||
// (See golang.org/issue/32102)
|
||||
|
||||
//go:build aix && ppc64 && gc
|
||||
// +build aix,ppc64,gc
|
||||
|
||||
package cpu
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o"
|
||||
|
||||
//go:linkname libc_getsystemcfg libc_getsystemcfg
|
||||
|
||||
type syscallFunc uintptr
|
||||
|
||||
var libc_getsystemcfg syscallFunc
|
||||
|
||||
type errno = syscall.Errno
|
||||
|
||||
// Implemented in runtime/syscall_aix.go.
|
||||
func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno)
|
||||
func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno)
|
||||
|
||||
func callgetsystemcfg(label int) (r1 uintptr, e1 errno) {
|
||||
r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0)
|
||||
return
|
||||
}
|
102
vendor/golang.org/x/sys/execabs/execabs.go
generated
vendored
Normal file
102
vendor/golang.org/x/sys/execabs/execabs.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package execabs is a drop-in replacement for os/exec
|
||||
// that requires PATH lookups to find absolute paths.
|
||||
// That is, execabs.Command("cmd") runs the same PATH lookup
|
||||
// as exec.Command("cmd"), but if the result is a path
|
||||
// which is relative, the Run and Start methods will report
|
||||
// an error instead of running the executable.
|
||||
//
|
||||
// See https://blog.golang.org/path-security for more information
|
||||
// about when it may be necessary or appropriate to use this package.
|
||||
package execabs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ErrNotFound is the error resulting if a path search failed to find an executable file.
|
||||
// It is an alias for exec.ErrNotFound.
|
||||
var ErrNotFound = exec.ErrNotFound
|
||||
|
||||
// Cmd represents an external command being prepared or run.
|
||||
// It is an alias for exec.Cmd.
|
||||
type Cmd = exec.Cmd
|
||||
|
||||
// Error is returned by LookPath when it fails to classify a file as an executable.
|
||||
// It is an alias for exec.Error.
|
||||
type Error = exec.Error
|
||||
|
||||
// An ExitError reports an unsuccessful exit by a command.
|
||||
// It is an alias for exec.ExitError.
|
||||
type ExitError = exec.ExitError
|
||||
|
||||
func relError(file, path string) error {
|
||||
return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path)
|
||||
}
|
||||
|
||||
// LookPath searches for an executable named file in the directories
|
||||
// named by the PATH environment variable. If file contains a slash,
|
||||
// it is tried directly and the PATH is not consulted. The result will be
|
||||
// an absolute path.
|
||||
//
|
||||
// LookPath differs from exec.LookPath in its handling of PATH lookups,
|
||||
// which are used for file names without slashes. If exec.LookPath's
|
||||
// PATH lookup would have returned an executable from the current directory,
|
||||
// LookPath instead returns an error.
|
||||
func LookPath(file string) (string, error) {
|
||||
path, err := exec.LookPath(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if filepath.Base(file) == file && !filepath.IsAbs(path) {
|
||||
return "", relError(file, path)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func fixCmd(name string, cmd *exec.Cmd) {
|
||||
if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) {
|
||||
// exec.Command was called with a bare binary name and
|
||||
// exec.LookPath returned a path which is not absolute.
|
||||
// Set cmd.lookPathErr and clear cmd.Path so that it
|
||||
// cannot be run.
|
||||
lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer()))
|
||||
if *lookPathErr == nil {
|
||||
*lookPathErr = relError(name, cmd.Path)
|
||||
}
|
||||
cmd.Path = ""
|
||||
}
|
||||
}
|
||||
|
||||
// CommandContext is like Command but includes a context.
|
||||
//
|
||||
// The provided context is used to kill the process (by calling os.Process.Kill)
|
||||
// if the context becomes done before the command completes on its own.
|
||||
func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd {
|
||||
cmd := exec.CommandContext(ctx, name, arg...)
|
||||
fixCmd(name, cmd)
|
||||
return cmd
|
||||
|
||||
}
|
||||
|
||||
// Command returns the Cmd struct to execute the named program with the given arguments.
|
||||
// See exec.Command for most details.
|
||||
//
|
||||
// Command differs from exec.Command in its handling of PATH lookups,
|
||||
// which are used when the program name contains no slashes.
|
||||
// If exec.Command would have returned an exec.Cmd configured to run an
|
||||
// executable from the current directory, Command instead
|
||||
// returns an exec.Cmd that will return an error from Start or Run.
|
||||
func Command(name string, arg ...string) *exec.Cmd {
|
||||
cmd := exec.Command(name, arg...)
|
||||
fixCmd(name, cmd)
|
||||
return cmd
|
||||
}
|
30
vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
generated
vendored
Normal file
30
vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package unsafeheader contains header declarations for the Go runtime's
|
||||
// slice and string implementations.
|
||||
//
|
||||
// This package allows x/sys to use types equivalent to
|
||||
// reflect.SliceHeader and reflect.StringHeader without introducing
|
||||
// a dependency on the (relatively heavy) "reflect" package.
|
||||
package unsafeheader
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Slice is the runtime representation of a slice.
|
||||
// It cannot be used safely or portably and its representation may change in a later release.
|
||||
type Slice struct {
|
||||
Data unsafe.Pointer
|
||||
Len int
|
||||
Cap int
|
||||
}
|
||||
|
||||
// String is the runtime representation of a string.
|
||||
// It cannot be used safely or portably and its representation may change in a later release.
|
||||
type String struct {
|
||||
Data unsafe.Pointer
|
||||
Len int
|
||||
}
|
2
vendor/golang.org/x/sys/unix/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/sys/unix/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
_obj/
|
||||
unix.test
|
184
vendor/golang.org/x/sys/unix/README.md
generated
vendored
Normal file
184
vendor/golang.org/x/sys/unix/README.md
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
# Building `sys/unix`
|
||||
|
||||
The sys/unix package provides access to the raw system call interface of the
|
||||
underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
|
||||
|
||||
Porting Go to a new architecture/OS combination or adding syscalls, types, or
|
||||
constants to an existing architecture/OS pair requires some manual effort;
|
||||
however, there are tools that automate much of the process.
|
||||
|
||||
## Build Systems
|
||||
|
||||
There are currently two ways we generate the necessary files. We are currently
|
||||
migrating the build system to use containers so the builds are reproducible.
|
||||
This is being done on an OS-by-OS basis. Please update this documentation as
|
||||
components of the build system change.
|
||||
|
||||
### Old Build System (currently for `GOOS != "linux"`)
|
||||
|
||||
The old build system generates the Go files based on the C header files
|
||||
present on your system. This means that files
|
||||
for a given GOOS/GOARCH pair must be generated on a system with that OS and
|
||||
architecture. This also means that the generated code can differ from system
|
||||
to system, based on differences in the header files.
|
||||
|
||||
To avoid this, if you are using the old build system, only generate the Go
|
||||
files on an installation with unmodified header files. It is also important to
|
||||
keep track of which version of the OS the files were generated from (ex.
|
||||
Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
|
||||
and have each OS upgrade correspond to a single change.
|
||||
|
||||
To build the files for your current OS and architecture, make sure GOOS and
|
||||
GOARCH are set correctly and run `mkall.sh`. This will generate the files for
|
||||
your specific system. Running `mkall.sh -n` shows the commands that will be run.
|
||||
|
||||
Requirements: bash, go
|
||||
|
||||
### New Build System (currently for `GOOS == "linux"`)
|
||||
|
||||
The new build system uses a Docker container to generate the go files directly
|
||||
from source checkouts of the kernel and various system libraries. This means
|
||||
that on any platform that supports Docker, all the files using the new build
|
||||
system can be generated at once, and generated files will not change based on
|
||||
what the person running the scripts has installed on their computer.
|
||||
|
||||
The OS specific files for the new build system are located in the `${GOOS}`
|
||||
directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
|
||||
the kernel or system library updates, modify the Dockerfile at
|
||||
`${GOOS}/Dockerfile` to checkout the new release of the source.
|
||||
|
||||
To build all the files under the new build system, you must be on an amd64/Linux
|
||||
system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
|
||||
then generate all of the files for all of the GOOS/GOARCH pairs in the new build
|
||||
system. Running `mkall.sh -n` shows the commands that will be run.
|
||||
|
||||
Requirements: bash, go, docker
|
||||
|
||||
## Component files
|
||||
|
||||
This section describes the various files used in the code generation process.
|
||||
It also contains instructions on how to modify these files to add a new
|
||||
architecture/OS or to add additional syscalls, types, or constants. Note that
|
||||
if you are using the new build system, the scripts/programs cannot be called normally.
|
||||
They must be called from within the docker container.
|
||||
|
||||
### asm files
|
||||
|
||||
The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
|
||||
call dispatch. There are three entry points:
|
||||
```
|
||||
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
|
||||
func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
|
||||
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
|
||||
```
|
||||
The first and second are the standard ones; they differ only in how many
|
||||
arguments can be passed to the kernel. The third is for low-level use by the
|
||||
ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
|
||||
let it know that a system call is running.
|
||||
|
||||
When porting Go to a new architecture/OS, this file must be implemented for
|
||||
each GOOS/GOARCH pair.
|
||||
|
||||
### mksysnum
|
||||
|
||||
Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
|
||||
for the old system). This program takes in a list of header files containing the
|
||||
syscall number declarations and parses them to produce the corresponding list of
|
||||
Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
|
||||
constants.
|
||||
|
||||
Adding new syscall numbers is mostly done by running the build on a sufficiently
|
||||
new installation of the target OS (or updating the source checkouts for the
|
||||
new build system). However, depending on the OS, you may need to update the
|
||||
parsing in mksysnum.
|
||||
|
||||
### mksyscall.go
|
||||
|
||||
The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
|
||||
hand-written Go files which implement system calls (for unix, the specific OS,
|
||||
or the specific OS/Architecture pair respectively) that need special handling
|
||||
and list `//sys` comments giving prototypes for ones that can be generated.
|
||||
|
||||
The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
|
||||
them into syscalls. This requires the name of the prototype in the comment to
|
||||
match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
|
||||
prototype can be exported (capitalized) or not.
|
||||
|
||||
Adding a new syscall often just requires adding a new `//sys` function prototype
|
||||
with the desired arguments and a capitalized name so it is exported. However, if
|
||||
you want the interface to the syscall to be different, often one will make an
|
||||
unexported `//sys` prototype, and then write a custom wrapper in
|
||||
`syscall_${GOOS}.go`.
|
||||
|
||||
### types files
|
||||
|
||||
For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
|
||||
`types_${GOOS}.go` on the old system). This file includes standard C headers and
|
||||
creates Go type aliases to the corresponding C types. The file is then fed
|
||||
through godef to get the Go compatible definitions. Finally, the generated code
|
||||
is fed though mkpost.go to format the code correctly and remove any hidden or
|
||||
private identifiers. This cleaned-up code is written to
|
||||
`ztypes_${GOOS}_${GOARCH}.go`.
|
||||
|
||||
The hardest part about preparing this file is figuring out which headers to
|
||||
include and which symbols need to be `#define`d to get the actual data
|
||||
structures that pass through to the kernel system calls. Some C libraries
|
||||
preset alternate versions for binary compatibility and translate them on the
|
||||
way in and out of system calls, but there is almost always a `#define` that can
|
||||
get the real ones.
|
||||
See `types_darwin.go` and `linux/types.go` for examples.
|
||||
|
||||
To add a new type, add in the necessary include statement at the top of the
|
||||
file (if it is not already there) and add in a type alias line. Note that if
|
||||
your type is significantly different on different architectures, you may need
|
||||
some `#if/#elif` macros in your include statements.
|
||||
|
||||
### mkerrors.sh
|
||||
|
||||
This script is used to generate the system's various constants. This doesn't
|
||||
just include the error numbers and error strings, but also the signal numbers
|
||||
and a wide variety of miscellaneous constants. The constants come from the list
|
||||
of include files in the `includes_${uname}` variable. A regex then picks out
|
||||
the desired `#define` statements, and generates the corresponding Go constants.
|
||||
The error numbers and strings are generated from `#include <errno.h>`, and the
|
||||
signal numbers and strings are generated from `#include <signal.h>`. All of
|
||||
these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
|
||||
`_errors.c`, which prints out all the constants.
|
||||
|
||||
To add a constant, add the header that includes it to the appropriate variable.
|
||||
Then, edit the regex (if necessary) to match the desired constant. Avoid making
|
||||
the regex too broad to avoid matching unintended constants.
|
||||
|
||||
### mkmerge.go
|
||||
|
||||
This program is used to extract duplicate const, func, and type declarations
|
||||
from the generated architecture-specific files listed below, and merge these
|
||||
into a common file for each OS.
|
||||
|
||||
The merge is performed in the following steps:
|
||||
1. Construct the set of common code that is idential in all architecture-specific files.
|
||||
2. Write this common code to the merged file.
|
||||
3. Remove the common code from all architecture-specific files.
|
||||
|
||||
|
||||
## Generated files
|
||||
|
||||
### `zerrors_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A file containing all of the system's generated error numbers, error strings,
|
||||
signal numbers, and constants. Generated by `mkerrors.sh` (see above).
|
||||
|
||||
### `zsyscall_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A file containing all the generated syscalls for a specific GOOS and GOARCH.
|
||||
Generated by `mksyscall.go` (see above).
|
||||
|
||||
### `zsysnum_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A list of numeric constants for all the syscall number of the specific GOOS
|
||||
and GOARCH. Generated by mksysnum (see above).
|
||||
|
||||
### `ztypes_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A file containing Go types for passing into (or returning from) syscalls.
|
||||
Generated by godefs and the types file (see above).
|
86
vendor/golang.org/x/sys/unix/affinity_linux.go
generated
vendored
Normal file
86
vendor/golang.org/x/sys/unix/affinity_linux.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// CPU affinity functions
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
|
||||
|
||||
// CPUSet represents a CPU affinity mask.
|
||||
type CPUSet [cpuSetSize]cpuMask
|
||||
|
||||
func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
|
||||
_, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
|
||||
if e != 0 {
|
||||
return errnoErr(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
|
||||
// If pid is 0 the calling thread is used.
|
||||
func SchedGetaffinity(pid int, set *CPUSet) error {
|
||||
return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
|
||||
}
|
||||
|
||||
// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
|
||||
// If pid is 0 the calling thread is used.
|
||||
func SchedSetaffinity(pid int, set *CPUSet) error {
|
||||
return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
|
||||
}
|
||||
|
||||
// Zero clears the set s, so that it contains no CPUs.
|
||||
func (s *CPUSet) Zero() {
|
||||
for i := range s {
|
||||
s[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func cpuBitsIndex(cpu int) int {
|
||||
return cpu / _NCPUBITS
|
||||
}
|
||||
|
||||
func cpuBitsMask(cpu int) cpuMask {
|
||||
return cpuMask(1 << (uint(cpu) % _NCPUBITS))
|
||||
}
|
||||
|
||||
// Set adds cpu to the set s.
|
||||
func (s *CPUSet) Set(cpu int) {
|
||||
i := cpuBitsIndex(cpu)
|
||||
if i < len(s) {
|
||||
s[i] |= cpuBitsMask(cpu)
|
||||
}
|
||||
}
|
||||
|
||||
// Clear removes cpu from the set s.
|
||||
func (s *CPUSet) Clear(cpu int) {
|
||||
i := cpuBitsIndex(cpu)
|
||||
if i < len(s) {
|
||||
s[i] &^= cpuBitsMask(cpu)
|
||||
}
|
||||
}
|
||||
|
||||
// IsSet reports whether cpu is in the set s.
|
||||
func (s *CPUSet) IsSet(cpu int) bool {
|
||||
i := cpuBitsIndex(cpu)
|
||||
if i < len(s) {
|
||||
return s[i]&cpuBitsMask(cpu) != 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Count returns the number of CPUs in the set s.
|
||||
func (s *CPUSet) Count() int {
|
||||
c := 0
|
||||
for _, b := range s {
|
||||
c += bits.OnesCount64(uint64(b))
|
||||
}
|
||||
return c
|
||||
}
|
15
vendor/golang.org/x/sys/unix/aliases.go
generated
vendored
Normal file
15
vendor/golang.org/x/sys/unix/aliases.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
|
||||
// +build go1.9
|
||||
|
||||
package unix
|
||||
|
||||
import "syscall"
|
||||
|
||||
type Signal = syscall.Signal
|
||||
type Errno = syscall.Errno
|
||||
type SysProcAttr = syscall.SysProcAttr
|
18
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
generated
vendored
Normal file
18
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
|
||||
//
|
||||
|
||||
TEXT ·syscall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·syscall6(SB)
|
||||
|
||||
TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
|
||||
JMP syscall·rawSyscall6(SB)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user