1
0
mirror of https://github.com/woodpecker-ci/woodpecker.git synced 2024-12-24 10:07:21 +02:00

CLI is pulled in for simpler(?) workflow

This commit is contained in:
Laszlo Fogas 2019-04-06 21:32:14 +02:00
parent 9e61b6a678
commit 2922505fe2
160 changed files with 33760 additions and 369 deletions

23
.cli.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/sh
set -e
set -x
# disable CGO for cross-compiling
export CGO_ENABLED=0
# compile for all architectures
GOOS=linux GOARCH=amd64 go build -ldflags "-X main.version=${DRONE_TAG##v}" -o cli/release/linux/amd64/drone github.com/laszlocph/drone-oss-08/cli/drone
GOOS=linux GOARCH=arm64 go build -ldflags "-X main.version=${DRONE_TAG##v}" -o cli/release/linux/arm64/drone github.com/laszlocph/drone-oss-08/cli/drone
GOOS=linux GOARCH=arm go build -ldflags "-X main.version=${DRONE_TAG##v}" -o cli/release/linux/arm/drone github.com/laszlocph/drone-oss-08/cli/drone
GOOS=windows GOARCH=amd64 go build -ldflags "-X main.version=${DRONE_TAG##v}" -o cli/release/windows/amd64/drone github.com/laszlocph/drone-oss-08/cli/drone
GOOS=darwin GOARCH=amd64 go build -ldflags "-X main.version=${DRONE_TAG##v}" -o cli/release/darwin/amd64/drone github.com/laszlocph/drone-oss-08/cli/drone
# tar binary files prior to upload
tar -cvzf cli/release/drone_linux_amd64.tar.gz -C cli/release/linux/amd64 drone
tar -cvzf cli/release/drone_linux_arm64.tar.gz -C cli/release/linux/arm64 drone
tar -cvzf cli/release/drone_linux_arm.tar.gz -C cli/release/linux/arm drone
tar -cvzf cli/release/drone_windows_amd64.tar.gz -C cli/release/windows/amd64 drone
tar -cvzf cli/release/drone_darwin_amd64.tar.gz -C cli/release/darwin/amd64 drone
# generate shas for tar files
sha256sum cli/release/*.tar.gz > cli/release/drone_checksums.txt

1
.gitignore vendored
View File

@ -5,6 +5,7 @@ drone/drone
.env .env
extras/ extras/
release/ release/
cli/release/
server/swagger/files/*.json server/swagger/files/*.json
.idea/ .idea/

57
cli/.drone.yml Normal file
View File

@ -0,0 +1,57 @@
workspace:
base: /go
path: src/github.com/drone/drone-cli
pipeline:
build:
image: golang:1.9
commands: sh .drone.sh
publish_latest:
image: plugins/docker
repo: drone/cli
secrets: [docker_username, docker_password]
auto_tag: true
when:
event: [push, tag]
publish_alpine:
image: plugins/docker
repo: drone/cli
secrets: [docker_username, docker_password]
auto_tag: true
auto_tag_suffix: alpine
dockerfile: Dockerfile.alpine
when:
event: [push, tag]
publish_linux_arm:
image: plugins/docker
repo: drone/cli
secrets: [docker_username, docker_password]
auto_tag: true
auto_tag_suffix: linux-arm
dockerfile: Dockerfile.linux.arm
when:
event: [push, tag]
publish_linux_arm64:
image: plugins/docker
repo: drone/cli
secrets: [docker_username, docker_password]
auto_tag: true
auto_tag_suffix: linux-arm64
dockerfile: Dockerfile.linux.arm64
when:
event: [push, tag]
release:
image: plugins/github-release
files:
- release/drone_*.tar.gz
- release/drone_checksums.txt
secrets:
- source: github_token
target: github_release_api_key
when:
event: tag

202
cli/LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
cli/README.md Normal file
View File

@ -0,0 +1 @@
Command line client for the Drone continuous integration server. Please see the official documentation at http://docs.drone.io/cli-installation/

View File

@ -0,0 +1,14 @@
package autoscale
import "github.com/urfave/cli"
// Command exports the user command set.
var Command = cli.Command{
Name: "autoscale",
Usage: "manage autoscaling",
Subcommands: []cli.Command{
autoscalePauseCmd,
autoscaleResumeCmd,
autoscaleVersionCmd,
},
}

View File

@ -0,0 +1,21 @@
package autoscale
import (
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var autoscalePauseCmd = cli.Command{
Name: "pause",
Usage: "pause the autoscaler",
Action: autoscalePause,
}
func autoscalePause(c *cli.Context) error {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
return client.AutoscalePause()
}

View File

@ -0,0 +1,21 @@
package autoscale
import (
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var autoscaleResumeCmd = cli.Command{
Name: "resume",
Usage: "resume the autoscaler",
Action: autoscaleResume,
}
func autoscaleResume(c *cli.Context) error {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
return client.AutoscaleResume()
}

View File

@ -0,0 +1,47 @@
package autoscale
import (
"os"
"text/template"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var autoscaleVersionCmd = cli.Command{
Name: "version",
Usage: "server version",
Action: autoscaleVersion,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplAutoscaleVersion,
Hidden: true,
},
},
}
func autoscaleVersion(c *cli.Context) error {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
version, err := client.AutoscaleVersion()
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, version)
}
var tmplAutoscaleVersion = `Version: {{ .Version }}
Commit: {{ .Commit }}
Source: {{ .Source }}
`

22
cli/drone/build/build.go Normal file
View File

@ -0,0 +1,22 @@
package build
import "github.com/urfave/cli"
// Command exports the build command set.
var Command = cli.Command{
Name: "build",
Usage: "manage builds",
Subcommands: []cli.Command{
buildListCmd,
buildLastCmd,
buildLogsCmd,
buildInfoCmd,
buildStopCmd,
buildStartCmd,
buildApproveCmd,
buildDeclineCmd,
buildQueueCmd,
buildKillCmd,
buildPsCmd,
},
}

View File

@ -0,0 +1,41 @@
package build
import (
"fmt"
"strconv"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildApproveCmd = cli.Command{
Name: "approve",
Usage: "approve a build",
ArgsUsage: "<repo/name> <build>",
Action: buildApprove,
}
func buildApprove(c *cli.Context) (err error) {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
number, err := strconv.Atoi(c.Args().Get(1))
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
_, err = client.BuildApprove(owner, name, number)
if err != nil {
return err
}
fmt.Printf("Approving build %s/%s#%d\n", owner, name, number)
return nil
}

View File

@ -0,0 +1,41 @@
package build
import (
"fmt"
"strconv"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildDeclineCmd = cli.Command{
Name: "decline",
Usage: "decline a build",
ArgsUsage: "<repo/name> <build>",
Action: buildDecline,
}
func buildDecline(c *cli.Context) (err error) {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
number, err := strconv.Atoi(c.Args().Get(1))
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
_, err = client.BuildDecline(owner, name, number)
if err != nil {
return err
}
fmt.Printf("Declining build %s/%s#%d\n", owner, name, number)
return nil
}

View File

@ -0,0 +1,75 @@
package build
import (
"os"
"strconv"
"text/template"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildInfoCmd = cli.Command{
Name: "info",
Usage: "show build details",
ArgsUsage: "<repo/name> [build]",
Action: buildInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplBuildInfo,
},
},
}
func buildInfo(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
buildArg := c.Args().Get(1)
client, err := internal.NewClient(c)
if err != nil {
return err
}
var number int
if buildArg == "last" || len(buildArg) == 0 {
// Fetch the build number from the last build
build, err := client.BuildLast(owner, name, "")
if err != nil {
return err
}
number = build.Number
} else {
number, err = strconv.Atoi(buildArg)
if err != nil {
return err
}
}
build, err := client.Build(owner, name, number)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format"))
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, build)
}
// template for build information
var tmplBuildInfo = `Number: {{ .Number }}
Status: {{ .Status }}
Event: {{ .Event }}
Commit: {{ .Commit }}
Branch: {{ .Branch }}
Ref: {{ .Ref }}
Message: {{ .Message }}
Author: {{ .Author }}
`

View File

@ -0,0 +1,42 @@
package build
import (
"fmt"
"strconv"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildKillCmd = cli.Command{
Name: "kill",
Usage: "force kill a build",
ArgsUsage: "<repo/name> <build>",
Action: buildKill,
Hidden: true,
}
func buildKill(c *cli.Context) (err error) {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
number, err := strconv.Atoi(c.Args().Get(1))
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
err = client.BuildKill(owner, name, number)
if err != nil {
return err
}
fmt.Printf("Force killing build %s/%s#%d\n", owner, name, number)
return nil
}

View File

@ -0,0 +1,52 @@
package build
import (
"os"
"text/template"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildLastCmd = cli.Command{
Name: "last",
Usage: "show latest build details",
ArgsUsage: "<repo/name>",
Action: buildLast,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplBuildInfo,
},
cli.StringFlag{
Name: "branch",
Usage: "branch name",
Value: "master",
},
},
}
func buildLast(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
build, err := client.BuildLast(owner, name, c.String("branch"))
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format"))
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, build)
}

View File

@ -0,0 +1,98 @@
package build
import (
"os"
"text/template"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildListCmd = cli.Command{
Name: "ls",
Usage: "show build history",
ArgsUsage: "<repo/name>",
Action: buildList,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplBuildList,
},
cli.StringFlag{
Name: "branch",
Usage: "branch filter",
},
cli.StringFlag{
Name: "event",
Usage: "event filter",
},
cli.StringFlag{
Name: "status",
Usage: "status filter",
},
cli.IntFlag{
Name: "limit",
Usage: "limit the list size",
Value: 25,
},
},
}
func buildList(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
builds, err := client.BuildList(owner, name)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
branch := c.String("branch")
event := c.String("event")
status := c.String("status")
limit := c.Int("limit")
var count int
for _, build := range builds {
if count >= limit {
break
}
if branch != "" && build.Branch != branch {
continue
}
if event != "" && build.Event != event {
continue
}
if status != "" && build.Status != status {
continue
}
tmpl.Execute(os.Stdout, build)
count++
}
return nil
}
// template for build list information
var tmplBuildList = "\x1b[33mBuild #{{ .Number }} \x1b[0m" + `
Status: {{ .Status }}
Event: {{ .Event }}
Commit: {{ .Commit }}
Branch: {{ .Branch }}
Ref: {{ .Ref }}
Author: {{ .Author }} {{ if .Email }}<{{.Email}}>{{ end }}
Message: {{ .Message }}
`

View File

@ -0,0 +1,18 @@
package build
import (
"fmt"
"github.com/urfave/cli"
)
var buildLogsCmd = cli.Command{
Name: "logs",
Usage: "show build logs",
ArgsUsage: "<repo/name> [build] [job]",
Action: buildLogs,
}
func buildLogs(c *cli.Context) error {
return fmt.Errorf("Command temporarily disabled. See https://github.com/drone/drone/issues/2005")
}

View File

@ -0,0 +1,82 @@
package build
import (
"os"
"strconv"
"text/template"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildPsCmd = cli.Command{
Name: "ps",
Usage: "show build steps",
ArgsUsage: "<repo/name> [build]",
Action: buildPs,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplBuildPs,
},
},
}
func buildPs(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
buildArg := c.Args().Get(1)
var number int
if buildArg == "last" || len(buildArg) == 0 {
// Fetch the build number from the last build
build, err := client.BuildLast(owner, name, "")
if err != nil {
return err
}
number = build.Number
} else {
number, err = strconv.Atoi(buildArg)
if err != nil {
return err
}
}
build, err := client.Build(owner, name, number)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
for _, proc := range build.Procs {
for _, child := range proc.Children {
if err := tmpl.Execute(os.Stdout, child); err != nil {
return err
}
}
}
return nil
}
// template for build ps information
var tmplBuildPs = "\x1b[33mProc #{{ .PID }} \x1b[0m" + `
Step: {{ .Name }}
State: {{ .State }}
`

View File

@ -0,0 +1,63 @@
package build
import (
"fmt"
"os"
"text/template"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildQueueCmd = cli.Command{
Name: "queue",
Usage: "show build queue",
ArgsUsage: " ",
Action: buildQueue,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplBuildQueue,
},
},
}
func buildQueue(c *cli.Context) error {
client, err := internal.NewClient(c)
if err != nil {
return err
}
builds, err := client.BuildQueue()
if err != nil {
return err
}
if len(builds) == 0 {
fmt.Println("there are no pending or running builds")
return nil
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
for _, build := range builds {
tmpl.Execute(os.Stdout, build)
}
return nil
}
// template for build list information
var tmplBuildQueue = "\x1b[33m{{ .FullName }} #{{ .Number }} \x1b[0m" + `
Status: {{ .Status }}
Event: {{ .Event }}
Commit: {{ .Commit }}
Branch: {{ .Branch }}
Ref: {{ .Ref }}
Author: {{ .Author }} {{ if .Email }}<{{.Email}}>{{ end }}
Message: {{ .Message }}
`

View File

@ -0,0 +1,65 @@
package build
import (
"errors"
"fmt"
"strconv"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildStartCmd = cli.Command{
Name: "start",
Usage: "start a build",
ArgsUsage: "<repo/name> [build]",
Action: buildStart,
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "param, p",
Usage: "custom parameters to be injected into the job environment. Format: KEY=value",
},
},
}
func buildStart(c *cli.Context) (err error) {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
buildArg := c.Args().Get(1)
var number int
if buildArg == "last" {
// Fetch the build number from the last build
build, err := client.BuildLast(owner, name, "")
if err != nil {
return err
}
number = build.Number
} else {
if len(buildArg) == 0 {
return errors.New("missing job number")
}
number, err = strconv.Atoi(buildArg)
if err != nil {
return err
}
}
params := internal.ParseKeyPair(c.StringSlice("param"))
build, err := client.BuildStart(owner, name, number, params)
if err != nil {
return err
}
fmt.Printf("Starting build %s/%s#%d\n", owner, name, build.Number)
return nil
}

View File

@ -0,0 +1,45 @@
package build
import (
"fmt"
"strconv"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var buildStopCmd = cli.Command{
Name: "stop",
Usage: "stop a build",
ArgsUsage: "<repo/name> [build] [job]",
Action: buildStop,
}
func buildStop(c *cli.Context) (err error) {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
number, err := strconv.Atoi(c.Args().Get(1))
if err != nil {
return err
}
job, _ := strconv.Atoi(c.Args().Get(2))
if job == 0 {
job = 1
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
err = client.BuildStop(owner, name, number, job)
if err != nil {
return err
}
fmt.Printf("Stopping build %s/%s#%d.%d\n", owner, name, number, job)
return nil
}

125
cli/drone/deploy/deploy.go Normal file
View File

@ -0,0 +1,125 @@
package deploy
import (
"fmt"
"html/template"
"os"
"strconv"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/urfave/cli"
)
// Command exports the deploy command.
var Command = cli.Command{
Name: "deploy",
Usage: "deploy code",
ArgsUsage: "<repo/name> <build> <environment>",
Action: deploy,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplDeployInfo,
},
cli.StringFlag{
Name: "branch",
Usage: "branch filter",
Value: "master",
},
cli.StringFlag{
Name: "event",
Usage: "event filter",
Value: drone.EventPush,
},
cli.StringFlag{
Name: "status",
Usage: "status filter",
Value: drone.StatusSuccess,
},
cli.StringSliceFlag{
Name: "param, p",
Usage: "custom parameters to be injected into the job environment. Format: KEY=value",
},
},
}
func deploy(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
branch := c.String("branch")
event := c.String("event")
status := c.String("status")
buildArg := c.Args().Get(1)
var number int
if buildArg == "last" {
// Fetch the build number from the last build
builds, berr := client.BuildList(owner, name)
if berr != nil {
return berr
}
for _, build := range builds {
if branch != "" && build.Branch != branch {
continue
}
if event != "" && build.Event != event {
continue
}
if status != "" && build.Status != status {
continue
}
if build.Number > number {
number = build.Number
}
}
if number == 0 {
return fmt.Errorf("Cannot deploy failure build")
}
} else {
number, err = strconv.Atoi(buildArg)
if err != nil {
return err
}
}
env := c.Args().Get(2)
if env == "" {
return fmt.Errorf("Please specify the target environment (ie production)")
}
params := internal.ParseKeyPair(c.StringSlice("param"))
deploy, err := client.Deploy(owner, name, number, env, params)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format"))
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, deploy)
}
// template for deployment information
var tmplDeployInfo = `Number: {{ .Number }}
Status: {{ .Status }}
Commit: {{ .Commit }}
Branch: {{ .Branch }}
Ref: {{ .Ref }}
Message: {{ .Message }}
Author: {{ .Author }}
Target: {{ .Deploy }}
`

497
cli/drone/exec/exec.go Normal file
View File

@ -0,0 +1,497 @@
package exec
import (
"context"
"io"
"log"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/backend"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/backend/docker"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/frontend"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/frontend/yaml"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/frontend/yaml/compiler"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/frontend/yaml/linter"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/interrupt"
"github.com/laszlocph/drone-oss-08/cncd/pipeline/pipeline/multipart"
"github.com/drone/envsubst"
"github.com/urfave/cli"
)
// Command exports the exec command.
var Command = cli.Command{
Name: "exec",
Usage: "execute a local build",
ArgsUsage: "[path/to/.drone.yml]",
Action: func(c *cli.Context) {
if err := exec(c); err != nil {
log.Fatalln(err)
}
},
Flags: []cli.Flag{
cli.BoolTFlag{
Name: "local",
Usage: "build from local directory",
EnvVar: "DRONE_LOCAL",
},
cli.DurationFlag{
Name: "timeout",
Usage: "build timeout",
Value: time.Hour,
EnvVar: "DRONE_TIMEOUT",
},
cli.StringSliceFlag{
Name: "volumes",
Usage: "build volumes",
EnvVar: "DRONE_VOLUMES",
},
cli.StringSliceFlag{
Name: "network",
Usage: "external networks",
EnvVar: "DRONE_NETWORKS",
},
cli.StringFlag{
Name: "prefix",
Value: "drone",
Usage: "prefix containers created by drone",
EnvVar: "DRONE_DOCKER_PREFIX",
Hidden: true,
},
cli.StringSliceFlag{
Name: "privileged",
Usage: "privileged plugins",
Value: &cli.StringSlice{
"plugins/docker",
"plugins/gcr",
"plugins/ecr",
},
},
//
// Please note the below flags are mirrored in the pipec and
// should be kept synchronized. Do not edit directly
// https://github.com/cncd/pipeline/pipec
//
//
// workspace default
//
cli.StringFlag{
Name: "workspace-base",
Value: "/drone",
EnvVar: "DRONE_WORKSPACE_BASE",
},
cli.StringFlag{
Name: "workspace-path",
Value: "src",
EnvVar: "DRONE_WORKSPACE_PATH",
},
//
// netrc parameters
//
cli.StringFlag{
Name: "netrc-username",
EnvVar: "DRONE_NETRC_USERNAME",
},
cli.StringFlag{
Name: "netrc-password",
EnvVar: "DRONE_NETRC_PASSWORD",
},
cli.StringFlag{
Name: "netrc-machine",
EnvVar: "DRONE_NETRC_MACHINE",
},
//
// metadata parameters
//
cli.StringFlag{
Name: "system-arch",
Value: "linux/amd64",
EnvVar: "DRONE_SYSTEM_ARCH",
},
cli.StringFlag{
Name: "system-name",
Value: "pipec",
EnvVar: "DRONE_SYSTEM_NAME",
},
cli.StringFlag{
Name: "system-link",
Value: "https://github.com/cncd/pipec",
EnvVar: "DRONE_SYSTEM_LINK",
},
cli.StringFlag{
Name: "repo-name",
EnvVar: "DRONE_REPO_NAME",
},
cli.StringFlag{
Name: "repo-link",
EnvVar: "DRONE_REPO_LINK",
},
cli.StringFlag{
Name: "repo-remote-url",
EnvVar: "DRONE_REPO_REMOTE",
},
cli.StringFlag{
Name: "repo-private",
EnvVar: "DRONE_REPO_PRIVATE",
},
cli.IntFlag{
Name: "build-number",
EnvVar: "DRONE_BUILD_NUMBER",
},
cli.IntFlag{
Name: "parent-build-number",
EnvVar: "DRONE_PARENT_BUILD_NUMBER",
},
cli.Int64Flag{
Name: "build-created",
EnvVar: "DRONE_BUILD_CREATED",
},
cli.Int64Flag{
Name: "build-started",
EnvVar: "DRONE_BUILD_STARTED",
},
cli.Int64Flag{
Name: "build-finished",
EnvVar: "DRONE_BUILD_FINISHED",
},
cli.StringFlag{
Name: "build-status",
EnvVar: "DRONE_BUILD_STATUS",
},
cli.StringFlag{
Name: "build-event",
EnvVar: "DRONE_BUILD_EVENT",
},
cli.StringFlag{
Name: "build-link",
EnvVar: "DRONE_BUILD_LINK",
},
cli.StringFlag{
Name: "build-target",
EnvVar: "DRONE_BUILD_TARGET",
},
cli.StringFlag{
Name: "commit-sha",
EnvVar: "DRONE_COMMIT_SHA",
},
cli.StringFlag{
Name: "commit-ref",
EnvVar: "DRONE_COMMIT_REF",
},
cli.StringFlag{
Name: "commit-refspec",
EnvVar: "DRONE_COMMIT_REFSPEC",
},
cli.StringFlag{
Name: "commit-branch",
EnvVar: "DRONE_COMMIT_BRANCH",
},
cli.StringFlag{
Name: "commit-message",
EnvVar: "DRONE_COMMIT_MESSAGE",
},
cli.StringFlag{
Name: "commit-author-name",
EnvVar: "DRONE_COMMIT_AUTHOR_NAME",
},
cli.StringFlag{
Name: "commit-author-avatar",
EnvVar: "DRONE_COMMIT_AUTHOR_AVATAR",
},
cli.StringFlag{
Name: "commit-author-email",
EnvVar: "DRONE_COMMIT_AUTHOR_EMAIL",
},
cli.IntFlag{
Name: "prev-build-number",
EnvVar: "DRONE_PREV_BUILD_NUMBER",
},
cli.Int64Flag{
Name: "prev-build-created",
EnvVar: "DRONE_PREV_BUILD_CREATED",
},
cli.Int64Flag{
Name: "prev-build-started",
EnvVar: "DRONE_PREV_BUILD_STARTED",
},
cli.Int64Flag{
Name: "prev-build-finished",
EnvVar: "DRONE_PREV_BUILD_FINISHED",
},
cli.StringFlag{
Name: "prev-build-status",
EnvVar: "DRONE_PREV_BUILD_STATUS",
},
cli.StringFlag{
Name: "prev-build-event",
EnvVar: "DRONE_PREV_BUILD_EVENT",
},
cli.StringFlag{
Name: "prev-build-link",
EnvVar: "DRONE_PREV_BUILD_LINK",
},
cli.StringFlag{
Name: "prev-commit-sha",
EnvVar: "DRONE_PREV_COMMIT_SHA",
},
cli.StringFlag{
Name: "prev-commit-ref",
EnvVar: "DRONE_PREV_COMMIT_REF",
},
cli.StringFlag{
Name: "prev-commit-refspec",
EnvVar: "DRONE_PREV_COMMIT_REFSPEC",
},
cli.StringFlag{
Name: "prev-commit-branch",
EnvVar: "DRONE_PREV_COMMIT_BRANCH",
},
cli.StringFlag{
Name: "prev-commit-message",
EnvVar: "DRONE_PREV_COMMIT_MESSAGE",
},
cli.StringFlag{
Name: "prev-commit-author-name",
EnvVar: "DRONE_PREV_COMMIT_AUTHOR_NAME",
},
cli.StringFlag{
Name: "prev-commit-author-avatar",
EnvVar: "DRONE_PREV_COMMIT_AUTHOR_AVATAR",
},
cli.StringFlag{
Name: "prev-commit-author-email",
EnvVar: "DRONE_PREV_COMMIT_AUTHOR_EMAIL",
},
cli.IntFlag{
Name: "job-number",
EnvVar: "DRONE_JOB_NUMBER",
},
cli.StringSliceFlag{
Name: "env, e",
EnvVar: "DRONE_ENV",
},
},
}
func exec(c *cli.Context) error {
file := c.Args().First()
if file == "" {
file = ".drone.yml"
}
metadata := metadataFromContext(c)
environ := metadata.Environ()
secrets := []compiler.Secret{}
for k, v := range metadata.EnvironDrone() {
environ[k] = v
}
for key, val := range metadata.Job.Matrix {
environ[key] = val
secrets = append(secrets, compiler.Secret{
Name: key,
Value: val,
})
}
drone_env := make(map[string]string)
for _, env := range c.StringSlice("env") {
envs := strings.SplitN(env, "=", 2)
drone_env[envs[0]] = envs[1]
}
tmpl, err := envsubst.ParseFile(file)
if err != nil {
return err
}
confstr, err := tmpl.Execute(func(name string) string {
return environ[name]
})
if err != nil {
return err
}
conf, err := yaml.ParseString(confstr)
if err != nil {
return err
}
// configure volumes for local execution
volumes := c.StringSlice("volumes")
if c.Bool("local") {
var (
workspaceBase = conf.Workspace.Base
workspacePath = conf.Workspace.Path
)
if workspaceBase == "" {
workspaceBase = c.String("workspace-base")
}
if workspacePath == "" {
workspacePath = c.String("workspace-path")
}
dir, _ := filepath.Abs(filepath.Dir(file))
if runtime.GOOS == "windows" {
dir = convertPathForWindows(dir)
}
volumes = append(volumes, c.String("prefix")+"_default:"+workspaceBase)
volumes = append(volumes, dir+":"+path.Join(workspaceBase, workspacePath))
}
// lint the yaml file
if lerr := linter.New(linter.WithTrusted(true)).Lint(conf); lerr != nil {
return lerr
}
// compiles the yaml file
compiled := compiler.New(
compiler.WithEscalated(
c.StringSlice("privileged")...,
),
compiler.WithVolumes(volumes...),
compiler.WithWorkspace(
c.String("workspace-base"),
c.String("workspace-path"),
),
compiler.WithNetworks(
c.StringSlice("network")...,
),
compiler.WithPrefix(
c.String("prefix"),
),
compiler.WithProxy(),
compiler.WithLocal(
c.Bool("local"),
),
compiler.WithNetrc(
c.String("netrc-username"),
c.String("netrc-password"),
c.String("netrc-machine"),
),
compiler.WithMetadata(metadata),
compiler.WithSecret(secrets...),
compiler.WithEnviron(drone_env),
).Compile(conf)
engine, err := docker.NewEnv()
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), c.Duration("timeout"))
defer cancel()
ctx = interrupt.WithContext(ctx)
return pipeline.New(compiled,
pipeline.WithContext(ctx),
pipeline.WithTracer(pipeline.DefaultTracer),
pipeline.WithLogger(defaultLogger),
pipeline.WithEngine(engine),
).Run()
}
// return the metadata from the cli context.
func metadataFromContext(c *cli.Context) frontend.Metadata {
return frontend.Metadata{
Repo: frontend.Repo{
Name: c.String("repo-name"),
Link: c.String("repo-link"),
Remote: c.String("repo-remote-url"),
Private: c.Bool("repo-private"),
},
Curr: frontend.Build{
Number: c.Int("build-number"),
Parent: c.Int("parent-build-number"),
Created: c.Int64("build-created"),
Started: c.Int64("build-started"),
Finished: c.Int64("build-finished"),
Status: c.String("build-status"),
Event: c.String("build-event"),
Link: c.String("build-link"),
Target: c.String("build-target"),
Commit: frontend.Commit{
Sha: c.String("commit-sha"),
Ref: c.String("commit-ref"),
Refspec: c.String("commit-refspec"),
Branch: c.String("commit-branch"),
Message: c.String("commit-message"),
Author: frontend.Author{
Name: c.String("commit-author-name"),
Email: c.String("commit-author-email"),
Avatar: c.String("commit-author-avatar"),
},
},
},
Prev: frontend.Build{
Number: c.Int("prev-build-number"),
Created: c.Int64("prev-build-created"),
Started: c.Int64("prev-build-started"),
Finished: c.Int64("prev-build-finished"),
Status: c.String("prev-build-status"),
Event: c.String("prev-build-event"),
Link: c.String("prev-build-link"),
Commit: frontend.Commit{
Sha: c.String("prev-commit-sha"),
Ref: c.String("prev-commit-ref"),
Refspec: c.String("prev-commit-refspec"),
Branch: c.String("prev-commit-branch"),
Message: c.String("prev-commit-message"),
Author: frontend.Author{
Name: c.String("prev-commit-author-name"),
Email: c.String("prev-commit-author-email"),
Avatar: c.String("prev-commit-author-avatar"),
},
},
},
Job: frontend.Job{
Number: c.Int("job-number"),
Matrix: availableEnvironment(),
},
Sys: frontend.System{
Name: c.String("system-name"),
Link: c.String("system-link"),
Arch: c.String("system-arch"),
},
}
}
func availableEnvironment() map[string]string {
result := make(map[string]string, 0)
for _, env := range os.Environ() {
pair := strings.SplitN(env, "=", 2)
result[pair[0]] = pair[1]
}
return result
}
func convertPathForWindows(path string) string {
base := filepath.VolumeName(path)
if len(base) == 2 {
path = strings.TrimPrefix(path, base)
base = strings.ToLower(strings.TrimSuffix(base, ":"))
return "/" + base + filepath.ToSlash(path)
}
return filepath.ToSlash(path)
}
var defaultLogger = pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error {
part, err := rc.NextPart()
if err != nil {
return err
}
logstream := NewLineWriter(proc.Alias)
io.Copy(logstream, part)
return nil
})

67
cli/drone/exec/line.go Normal file
View File

@ -0,0 +1,67 @@
package exec
import (
"fmt"
"os"
"strings"
"time"
)
// Identifies the type of line in the logs.
const (
LineStdout int = iota
LineStderr
LineExitCode
LineMetadata
LineProgress
)
// Line is a line of console output.
type Line struct {
Proc string `json:"proc,omitempty"`
Time int64 `json:"time,omitempty"`
Type int `json:"type,omitempty"`
Pos int `json:"pos,omityempty"`
Out string `json:"out,omitempty"`
}
// LineWriter sends logs to the client.
type LineWriter struct {
name string
num int
now time.Time
rep *strings.Replacer
lines []*Line
}
// NewLineWriter returns a new line reader.
func NewLineWriter(name string) *LineWriter {
w := new(LineWriter)
w.name = name
w.num = 0
w.now = time.Now().UTC()
return w
}
func (w *LineWriter) Write(p []byte) (n int, err error) {
out := string(p)
if w.rep != nil {
out = w.rep.Replace(out)
}
line := &Line{
Out: out,
Proc: w.name,
Pos: w.num,
Time: int64(time.Since(w.now).Seconds()),
Type: LineStdout,
}
fmt.Fprintf(os.Stderr, "[%s:L%d:%ds] %s", w.name, w.num, int64(time.Since(w.now).Seconds()), out)
w.num++
w.lines = append(w.lines, line)
return len(p), nil
}

49
cli/drone/info/info.go Normal file
View File

@ -0,0 +1,49 @@
package info
import (
"os"
"text/template"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
// Command exports the info command.
var Command = cli.Command{
Name: "info",
Usage: "show information about the current user",
ArgsUsage: " ",
Action: info,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplInfo,
Hidden: true,
},
},
}
func info(c *cli.Context) error {
client, err := internal.NewClient(c)
if err != nil {
return err
}
user, err := client.Self()
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, user)
}
// template for user information
var tmplInfo = `User: {{ .Login }}
Email: {{ .Email }}`

113
cli/drone/internal/util.go Normal file
View File

@ -0,0 +1,113 @@
package internal
import (
"crypto/tls"
"fmt"
"net/http"
"strings"
"github.com/jackspirou/syscerts"
"github.com/urfave/cli"
"golang.org/x/net/proxy"
"golang.org/x/oauth2"
"github.com/drone/drone-go/drone"
)
// NewClient returns a new client from the CLI context.
func NewClient(c *cli.Context) (drone.Client, error) {
var (
skip = c.GlobalBool("skip-verify")
socks = c.GlobalString("socks-proxy")
socksoff = c.GlobalBool("socks-proxy-off")
token = c.GlobalString("token")
server = c.GlobalString("server")
)
server = strings.TrimRight(server, "/")
// if no server url is provided we can default
// to the hosted Drone service.
if len(server) == 0 {
return nil, fmt.Errorf("Error: you must provide the Drone server address.")
}
if len(token) == 0 {
return nil, fmt.Errorf("Error: you must provide your Drone access token.")
}
// attempt to find system CA certs
certs := syscerts.SystemRootsPool()
tlsConfig := &tls.Config{
RootCAs: certs,
InsecureSkipVerify: skip,
}
config := new(oauth2.Config)
auther := config.Client(
oauth2.NoContext,
&oauth2.Token{
AccessToken: token,
},
)
trans, _ := auther.Transport.(*oauth2.Transport)
if len(socks) != 0 && !socksoff {
dialer, err := proxy.SOCKS5("tcp", socks, nil, proxy.Direct)
if err != nil {
return nil, err
}
trans.Base = &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
Dial: dialer.Dial,
}
} else {
trans.Base = &http.Transport{
TLSClientConfig: tlsConfig,
Proxy: http.ProxyFromEnvironment,
}
}
return drone.NewClient(server, auther), nil
}
// NewAutoscaleClient returns a new client from the CLI context.
func NewAutoscaleClient(c *cli.Context) (drone.Client, error) {
client, err := NewClient(c)
if err != nil {
return nil, err
}
autoscaler := c.GlobalString("autoscaler")
if autoscaler == "" {
return nil, fmt.Errorf("Please provide the autoscaler address")
}
client.SetAddress(
strings.TrimSuffix(autoscaler, "/"),
)
return client, nil
}
// ParseRepo parses the repository owner and name from a string.
func ParseRepo(str string) (user, repo string, err error) {
var parts = strings.Split(str, "/")
if len(parts) != 2 {
err = fmt.Errorf("Error: Invalid or missing repository. eg octocat/hello-world.")
return
}
user = parts[0]
repo = parts[1]
return
}
// ParseKeyPair parses a key=value pair.
func ParseKeyPair(p []string) map[string]string {
params := map[string]string{}
for _, i := range p {
parts := strings.SplitN(i, "=", 2)
if len(parts) != 2 {
continue
}
params[parts[0]] = parts[1]
}
return params
}

View File

@ -0,0 +1,20 @@
package internal
import "testing"
func TestParseKeyPair(t *testing.T) {
s := []string{"FOO=bar", "BAR=", "BAZ=qux=quux", "INVALID"}
p := ParseKeyPair(s)
if p["FOO"] != "bar" {
t.Errorf("Wanted %q, got %q.", "bar", p["FOO"])
}
if p["BAZ"] != "qux=quux" {
t.Errorf("Wanted %q, got %q.", "qux=quux", p["BAZ"])
}
if _, exists := p["BAR"]; !exists {
t.Error("Missing a key with no value. Keys with empty values are also valid.")
}
if _, exists := p["INVALID"]; exists {
t.Error("Keys without an equal sign suffix are invalid.")
}
}

12
cli/drone/log/log.go Normal file
View File

@ -0,0 +1,12 @@
package log
import "github.com/urfave/cli"
// Command exports the build command set.
var Command = cli.Command{
Name: "log",
Usage: "manage logs",
Subcommands: []cli.Command{
logPurgeCmd,
},
}

View File

@ -0,0 +1,41 @@
package log
import (
"fmt"
"strconv"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var logPurgeCmd = cli.Command{
Name: "purge",
Usage: "purge a log",
ArgsUsage: "<repo/name> <build>",
Action: logPurge,
}
func logPurge(c *cli.Context) (err error) {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
number, err := strconv.Atoi(c.Args().Get(1))
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
err = client.LogsPurge(owner, name, number)
if err != nil {
return err
}
fmt.Printf("Purging logs for build %s/%s#%d\n", owner, name, number)
return nil
}

86
cli/drone/main.go Normal file
View File

@ -0,0 +1,86 @@
package main
import (
"fmt"
"os"
"github.com/laszlocph/drone-oss-08/cli/drone/autoscale"
"github.com/laszlocph/drone-oss-08/cli/drone/build"
"github.com/laszlocph/drone-oss-08/cli/drone/deploy"
"github.com/laszlocph/drone-oss-08/cli/drone/exec"
"github.com/laszlocph/drone-oss-08/cli/drone/info"
"github.com/laszlocph/drone-oss-08/cli/drone/log"
"github.com/laszlocph/drone-oss-08/cli/drone/registry"
"github.com/laszlocph/drone-oss-08/cli/drone/repo"
"github.com/laszlocph/drone-oss-08/cli/drone/secret"
"github.com/laszlocph/drone-oss-08/cli/drone/server"
"github.com/laszlocph/drone-oss-08/cli/drone/user"
_ "github.com/joho/godotenv/autoload"
"github.com/urfave/cli"
)
// drone version number
var version string
func main() {
app := cli.NewApp()
app.Name = "drone"
app.Version = version
app.Usage = "command line utility"
app.EnableBashCompletion = true
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "t, token",
Usage: "server auth token",
EnvVar: "DRONE_TOKEN",
},
cli.StringFlag{
Name: "s, server",
Usage: "server address",
EnvVar: "DRONE_SERVER",
},
cli.StringFlag{
Name: "autoscaler",
Usage: "autoscaler address",
EnvVar: "DRONE_AUTOSCALER",
},
cli.BoolFlag{
Name: "skip-verify",
Usage: "skip ssl verfification",
EnvVar: "DRONE_SKIP_VERIFY",
Hidden: true,
},
cli.StringFlag{
Name: "socks-proxy",
Usage: "socks proxy address",
EnvVar: "SOCKS_PROXY",
Hidden: true,
},
cli.BoolFlag{
Name: "socks-proxy-off",
Usage: "socks proxy ignored",
EnvVar: "SOCKS_PROXY_OFF",
Hidden: true,
},
}
app.Commands = []cli.Command{
build.Command,
log.Command,
deploy.Command,
exec.Command,
info.Command,
registry.Command,
secret.Command,
repo.Command,
user.Command,
server.Command,
autoscale.Command,
}
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@ -0,0 +1,16 @@
package registry
import "github.com/urfave/cli"
// Command exports the registry command set.
var Command = cli.Command{
Name: "registry",
Usage: "manage registries",
Subcommands: []cli.Command{
registryCreateCmd,
registryDeleteCmd,
registryUpdateCmd,
registryInfoCmd,
registryListCmd,
},
}

View File

@ -0,0 +1,75 @@
package registry
import (
"io/ioutil"
"strings"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/urfave/cli"
)
var registryCreateCmd = cli.Command{
Name: "add",
Usage: "adds a registry",
ArgsUsage: "[repo/name]",
Action: registryCreate,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "hostname",
Usage: "registry hostname",
Value: "docker.io",
},
cli.StringFlag{
Name: "username",
Usage: "registry username",
},
cli.StringFlag{
Name: "password",
Usage: "registry password",
},
},
}
func registryCreate(c *cli.Context) error {
var (
hostname = c.String("hostname")
username = c.String("username")
password = c.String("password")
reponame = c.String("repository")
)
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
registry := &drone.Registry{
Address: hostname,
Username: username,
Password: password,
}
if strings.HasPrefix(registry.Password, "@") {
path := strings.TrimPrefix(registry.Password, "@")
out, ferr := ioutil.ReadFile(path)
if ferr != nil {
return ferr
}
registry.Password = string(out)
}
_, err = client.RegistryCreate(owner, name, registry)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,62 @@
package registry
import (
"html/template"
"os"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var registryInfoCmd = cli.Command{
Name: "info",
Usage: "display registry info",
ArgsUsage: "[repo/name]",
Action: registryInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "hostname",
Usage: "registry hostname",
Value: "docker.io",
},
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplRegistryList,
Hidden: true,
},
},
}
func registryInfo(c *cli.Context) error {
var (
hostname = c.String("hostname")
reponame = c.String("repository")
format = c.String("format") + "\n"
)
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
registry, err := client.Registry(owner, name, hostname)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(format)
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, registry)
}

View File

@ -0,0 +1,65 @@
package registry
import (
"html/template"
"os"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var registryListCmd = cli.Command{
Name: "ls",
Usage: "list registries",
ArgsUsage: "[repo/name]",
Action: registryList,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplRegistryList,
Hidden: true,
},
},
}
func registryList(c *cli.Context) error {
var (
format = c.String("format") + "\n"
reponame = c.String("repository")
)
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
list, err := client.RegistryList(owner, name)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(format)
if err != nil {
return err
}
for _, registry := range list {
tmpl.Execute(os.Stdout, registry)
}
return nil
}
// template for build list information
var tmplRegistryList = "\x1b[33m{{ .Address }} \x1b[0m" + `
Username: {{ .Username }}
Email: {{ .Email }}
`

View File

@ -0,0 +1,44 @@
package registry
import (
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var registryDeleteCmd = cli.Command{
Name: "rm",
Usage: "remove a registry",
ArgsUsage: "[repo/name]",
Action: registryDelete,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "hostname",
Usage: "registry hostname",
Value: "docker.io",
},
},
}
func registryDelete(c *cli.Context) error {
var (
hostname = c.String("hostname")
reponame = c.String("repository")
)
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
return client.RegistryDelete(owner, name, hostname)
}

View File

@ -0,0 +1,75 @@
package registry
import (
"io/ioutil"
"strings"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/urfave/cli"
)
var registryUpdateCmd = cli.Command{
Name: "update",
Usage: "update a registry",
ArgsUsage: "[repo/name]",
Action: registryUpdate,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "hostname",
Usage: "registry hostname",
Value: "docker.io",
},
cli.StringFlag{
Name: "username",
Usage: "registry username",
},
cli.StringFlag{
Name: "password",
Usage: "registry password",
},
},
}
func registryUpdate(c *cli.Context) error {
var (
hostname = c.String("hostname")
username = c.String("username")
password = c.String("password")
reponame = c.String("repository")
)
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
registry := &drone.Registry{
Address: hostname,
Username: username,
Password: password,
}
if strings.HasPrefix(registry.Password, "@") {
path := strings.TrimPrefix(registry.Password, "@")
out, ferr := ioutil.ReadFile(path)
if ferr != nil {
return ferr
}
registry.Password = string(out)
}
_, err = client.RegistryUpdate(owner, name, registry)
if err != nil {
return err
}
return nil
}

19
cli/drone/repo/repo.go Normal file
View File

@ -0,0 +1,19 @@
package repo
import "github.com/urfave/cli"
// Command exports the repository command.
var Command = cli.Command{
Name: "repo",
Usage: "manage repositories",
Subcommands: []cli.Command{
repoListCmd,
repoInfoCmd,
repoAddCmd,
repoUpdateCmd,
repoRemoveCmd,
repoRepairCmd,
repoChownCmd,
repoSyncCmd,
},
}

View File

@ -0,0 +1,34 @@
package repo
import (
"fmt"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var repoAddCmd = cli.Command{
Name: "add",
Usage: "add a repository",
ArgsUsage: "<repo/name>",
Action: repoAdd,
}
func repoAdd(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
if _, err := client.RepoPost(owner, name); err != nil {
return err
}
fmt.Printf("Successfully activated repository %s/%s\n", owner, name)
return nil
}

View File

@ -0,0 +1,34 @@
package repo
import (
"fmt"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var repoChownCmd = cli.Command{
Name: "chown",
Usage: "assume ownership of a repository",
ArgsUsage: "<repo/name>",
Action: repoChown,
}
func repoChown(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
if _, err := client.RepoChown(owner, name); err != nil {
return err
}
fmt.Printf("Successfully assumed ownership of repository %s/%s\n", owner, name)
return nil
}

View File

@ -0,0 +1,59 @@
package repo
import (
"os"
"text/template"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var repoInfoCmd = cli.Command{
Name: "info",
Usage: "show repository details",
ArgsUsage: "<repo/name>",
Action: repoInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplRepoInfo,
},
},
}
func repoInfo(c *cli.Context) error {
arg := c.Args().First()
owner, name, err := internal.ParseRepo(arg)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
repo, err := client.Repo(owner, name)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format"))
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, repo)
}
// template for repo information
var tmplRepoInfo = `Owner: {{ .Owner }}
Repo: {{ .Name }}
Type: {{ .Kind }}
Config: {{ .Config }}
Visibility: {{ .Visibility }}
Private: {{ .IsPrivate }}
Trusted: {{ .IsTrusted }}
Gated: {{ .IsGated }}
Remote: {{ .Clone }}
`

View File

@ -0,0 +1,56 @@
package repo
import (
"os"
"text/template"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var repoListCmd = cli.Command{
Name: "ls",
Usage: "list all repos",
ArgsUsage: " ",
Action: repoList,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplRepoList,
},
cli.StringFlag{
Name: "org",
Usage: "filter by organization",
},
},
}
func repoList(c *cli.Context) error {
client, err := internal.NewClient(c)
if err != nil {
return err
}
repos, err := client.RepoList()
if err != nil || len(repos) == 0 {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
org := c.String("org")
for _, repo := range repos {
if org != "" && org != repo.Owner {
continue
}
tmpl.Execute(os.Stdout, repo)
}
return nil
}
// template for repository list items
var tmplRepoList = `{{ .FullName }}`

View File

@ -0,0 +1,26 @@
package repo
import (
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var repoRepairCmd = cli.Command{
Name: "repair",
Usage: "repair repository webhooks",
ArgsUsage: "<repo/name>",
Action: repoRepair,
}
func repoRepair(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
return client.RepoRepair(owner, name)
}

35
cli/drone/repo/repo_rm.go Normal file
View File

@ -0,0 +1,35 @@
package repo
import (
"fmt"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var repoRemoveCmd = cli.Command{
Name: "rm",
Usage: "remove a repository",
ArgsUsage: "<repo/name>",
Action: repoRemove,
}
func repoRemove(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
if err := client.RepoDel(owner, name); err != nil {
return err
}
fmt.Printf("Successfully removed repository %s/%s\n", owner, name)
return nil
}

View File

@ -0,0 +1,49 @@
package repo
import (
"os"
"text/template"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/urfave/cli"
)
var repoSyncCmd = cli.Command{
Name: "sync",
Usage: "synchronize the repository list",
ArgsUsage: " ",
Action: repoSync,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplRepoList,
},
},
}
func repoSync(c *cli.Context) error {
client, err := internal.NewClient(c)
if err != nil {
return err
}
repos, err := client.RepoListOpts(true, true)
if err != nil || len(repos) == 0 {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
org := c.String("org")
for _, repo := range repos {
if org != "" && org != repo.Owner {
continue
}
tmpl.Execute(os.Stdout, repo)
}
return nil
}

View File

@ -0,0 +1,104 @@
package repo
import (
"fmt"
"time"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/urfave/cli"
)
var repoUpdateCmd = cli.Command{
Name: "update",
Usage: "update a repository",
ArgsUsage: "<repo/name>",
Action: repoUpdate,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "trusted",
Usage: "repository is trusted",
},
cli.BoolFlag{
Name: "gated",
Usage: "repository is gated",
},
cli.DurationFlag{
Name: "timeout",
Usage: "repository timeout",
},
cli.StringFlag{
Name: "visibility",
Usage: "repository visibility",
},
cli.StringFlag{
Name: "config",
Usage: "repository configuration path (e.g. .drone.yml)",
},
cli.IntFlag{
Name: "build-counter",
Usage: "repository starting build number",
},
cli.BoolFlag{
Name: "unsafe",
Usage: "validate updating the build-counter is unsafe",
},
},
}
func repoUpdate(c *cli.Context) error {
repo := c.Args().First()
owner, name, err := internal.ParseRepo(repo)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
var (
visibility = c.String("visibility")
config = c.String("config")
timeout = c.Duration("timeout")
trusted = c.Bool("trusted")
gated = c.Bool("gated")
buildCounter = c.Int("build-counter")
unsafe = c.Bool("unsafe")
)
patch := new(drone.RepoPatch)
if c.IsSet("trusted") {
patch.IsTrusted = &trusted
}
if c.IsSet("gated") {
patch.IsGated = &gated
}
if c.IsSet("timeout") {
v := int64(timeout / time.Minute)
patch.Timeout = &v
}
if c.IsSet("config") {
patch.Config = &config
}
if c.IsSet("visibility") {
switch visibility {
case "public", "private", "internal":
patch.Visibility = &visibility
}
}
if c.IsSet("build-counter") && !unsafe {
fmt.Printf("Setting the build counter is an unsafe operation that could put your repository in an inconsistent state. Please use --unsafe to proceed")
}
if c.IsSet("build-counter") && unsafe {
patch.BuildCounter = &buildCounter
}
if _, err := client.RepoPatch(owner, name, patch); err != nil {
return err
}
fmt.Printf("Successfully updated repository %s/%s\n", owner, name)
return nil
}

View File

@ -0,0 +1,16 @@
package secret
import "github.com/urfave/cli"
// Command exports the secret command.
var Command = cli.Command{
Name: "secret",
Usage: "manage secrets",
Subcommands: []cli.Command{
secretCreateCmd,
secretDeleteCmd,
secretUpdateCmd,
secretInfoCmd,
secretListCmd,
},
}

View File

@ -0,0 +1,80 @@
package secret
import (
"io/ioutil"
"strings"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/urfave/cli"
)
var secretCreateCmd = cli.Command{
Name: "add",
Usage: "adds a secret",
ArgsUsage: "[repo/name]",
Action: secretCreate,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "name",
Usage: "secret name",
},
cli.StringFlag{
Name: "value",
Usage: "secret value",
},
cli.StringSliceFlag{
Name: "event",
Usage: "secret limited to these events",
},
cli.StringSliceFlag{
Name: "image",
Usage: "secret limited to these images",
},
},
}
func secretCreate(c *cli.Context) error {
reponame := c.String("repository")
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
secret := &drone.Secret{
Name: c.String("name"),
Value: c.String("value"),
Images: c.StringSlice("image"),
Events: c.StringSlice("event"),
}
if len(secret.Events) == 0 {
secret.Events = defaultSecretEvents
}
if strings.HasPrefix(secret.Value, "@") {
path := strings.TrimPrefix(secret.Value, "@")
out, ferr := ioutil.ReadFile(path)
if ferr != nil {
return ferr
}
secret.Value = string(out)
}
_, err = client.SecretCreate(owner, name, secret)
return err
}
var defaultSecretEvents = []string{
drone.EventPush,
drone.EventTag,
drone.EventDeploy,
}

View File

@ -0,0 +1,61 @@
package secret
import (
"html/template"
"os"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var secretInfoCmd = cli.Command{
Name: "info",
Usage: "display secret info",
ArgsUsage: "[repo/name]",
Action: secretInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "name",
Usage: "secret name",
},
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplSecretList,
Hidden: true,
},
},
}
func secretInfo(c *cli.Context) error {
var (
secretName = c.String("name")
repoName = c.String("repository")
format = c.String("format") + "\n"
)
if repoName == "" {
repoName = c.Args().First()
}
owner, name, err := internal.ParseRepo(repoName)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
secret, err := client.Secret(owner, name, secretName)
if err != nil {
return err
}
tmpl, err := template.New("_").Funcs(secretFuncMap).Parse(format)
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, secret)
}

View File

@ -0,0 +1,76 @@
package secret
import (
"html/template"
"os"
"strings"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var secretListCmd = cli.Command{
Name: "ls",
Usage: "list secrets",
ArgsUsage: "[repo/name]",
Action: secretList,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplSecretList,
Hidden: true,
},
},
}
func secretList(c *cli.Context) error {
var (
format = c.String("format") + "\n"
reponame = c.String("repository")
)
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
list, err := client.SecretList(owner, name)
if err != nil {
return err
}
tmpl, err := template.New("_").Funcs(secretFuncMap).Parse(format)
if err != nil {
return err
}
for _, registry := range list {
tmpl.Execute(os.Stdout, registry)
}
return nil
}
// template for secret list items
var tmplSecretList = "\x1b[33m{{ .Name }} \x1b[0m" + `
Events: {{ list .Events }}
{{- if .Images }}
Images: {{ list .Images }}
{{- else }}
Images: <any>
{{- end }}
`
var secretFuncMap = template.FuncMap{
"list": func(s []string) string {
return strings.Join(s, ", ")
},
}

View File

@ -0,0 +1,43 @@
package secret
import (
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var secretDeleteCmd = cli.Command{
Name: "rm",
Usage: "remove a secret",
ArgsUsage: "[repo/name]",
Action: secretDelete,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "name",
Usage: "secret name",
},
},
}
func secretDelete(c *cli.Context) error {
var (
secret = c.String("name")
reponame = c.String("repository")
)
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
return client.SecretDelete(owner, name, secret)
}

View File

@ -0,0 +1,71 @@
package secret
import (
"io/ioutil"
"strings"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
"github.com/urfave/cli"
)
var secretUpdateCmd = cli.Command{
Name: "update",
Usage: "update a secret",
ArgsUsage: "[repo/name]",
Action: secretUpdate,
Flags: []cli.Flag{
cli.StringFlag{
Name: "repository",
Usage: "repository name (e.g. octocat/hello-world)",
},
cli.StringFlag{
Name: "name",
Usage: "secret name",
},
cli.StringFlag{
Name: "value",
Usage: "secret value",
},
cli.StringSliceFlag{
Name: "event",
Usage: "secret limited to these events",
},
cli.StringSliceFlag{
Name: "image",
Usage: "secret limited to these images",
},
},
}
func secretUpdate(c *cli.Context) error {
reponame := c.String("repository")
if reponame == "" {
reponame = c.Args().First()
}
owner, name, err := internal.ParseRepo(reponame)
if err != nil {
return err
}
client, err := internal.NewClient(c)
if err != nil {
return err
}
secret := &drone.Secret{
Name: c.String("name"),
Value: c.String("value"),
Images: c.StringSlice("image"),
Events: c.StringSlice("event"),
}
if strings.HasPrefix(secret.Value, "@") {
path := strings.TrimPrefix(secret.Value, "@")
out, ferr := ioutil.ReadFile(path)
if ferr != nil {
return ferr
}
secret.Value = string(out)
}
_, err = client.SecretUpdate(owner, name, secret)
return err
}

View File

@ -0,0 +1,17 @@
package server
import "github.com/urfave/cli"
// Command exports the user command set.
var Command = cli.Command{
Name: "server",
Usage: "manage servers",
Subcommands: []cli.Command{
serverListCmd,
serverInfoCmd,
serverOpenCmd,
serverCreateCmd,
serverDestroyCmd,
serverEnvCmd,
},
}

View File

@ -0,0 +1,46 @@
package server
import (
"os"
"text/template"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var serverCreateCmd = cli.Command{
Name: "create",
Usage: "crate a new server",
Action: serverCreate,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplServerCreate,
Hidden: true,
},
},
}
func serverCreate(c *cli.Context) error {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
server, err := client.ServerCreate()
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, server)
}
var tmplServerCreate = `Name: {{ .Name }}
State: {{ .State }}
`

View File

@ -0,0 +1,61 @@
package server
import (
"fmt"
"os"
"text/template"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var serverDestroyCmd = cli.Command{
Name: "destroy",
Usage: "destroy a server",
ArgsUsage: "<servername>",
Action: serverDestroy,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplServerDestroy,
Hidden: true,
},
},
}
func serverDestroy(c *cli.Context) error {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
name := c.Args().First()
if len(name) == 0 {
return fmt.Errorf("Missing or invalid server name")
}
err = client.ServerDelete(name)
if err != nil {
return err
}
server, err := client.Server(name)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, server)
}
var tmplServerDestroy = `Name: {{ .Name }}
Address: {{ .Address }}
Region: {{ .Region }}
Size: {{.Size}}
State: {{ .State }}
`

View File

@ -0,0 +1,143 @@
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/user"
"path"
"text/template"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
)
var serverEnvCmd = cli.Command{
Name: "env",
ArgsUsage: "<servername>",
Action: serverEnv,
Flags: []cli.Flag{
cli.StringFlag{
Name: "shell",
Usage: "shell [bash, fish, powershell]",
Value: "bash",
},
cli.BoolFlag{
Name: "no-proxy",
Usage: "configure the noproxy variable",
},
cli.BoolFlag{
Name: "clear",
Usage: "clear the certificate cache",
},
},
}
func serverEnv(c *cli.Context) error {
u, err := user.Current()
if err != nil {
return err
}
name := c.Args().First()
if len(name) == 0 {
return fmt.Errorf("Missing or invalid server name")
}
home := path.Join(u.HomeDir, ".drone", "certs")
base := path.Join(home, name)
if c.Bool("clean") {
os.RemoveAll(home)
}
server := new(drone.Server)
if _, err := os.Stat(base); err == nil {
data, err := ioutil.ReadFile(path.Join(base, "server.json"))
if err != nil {
return err
}
err = json.Unmarshal(data, server)
if err != nil {
return err
}
} else {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
server, err = client.Server(name)
if err != nil {
return err
}
data, err := json.Marshal(server)
if err != nil {
return err
}
err = os.MkdirAll(base, 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(path.Join(base, "server.json"), data, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(path.Join(base, "ca.pem"), server.CACert, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(path.Join(base, "cert.pem"), server.TLSCert, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(path.Join(base, "key.pem"), server.TLSKey, 0644)
if err != nil {
return err
}
}
return shellT.Execute(os.Stdout, map[string]interface{}{
"Name": server.Name,
"Address": server.Address,
"Path": base,
"Shell": c.String("shell"),
"NoProxy": c.Bool("no-proxy"),
})
}
var shellT = template.Must(template.New("_").Parse(`
{{- if eq .Shell "fish" -}}
sex -x DOCKER_TLS "1";
set -x DOCKER_TLS_VERIFY "";
set -x DOCKER_CERT_PATH {{ printf "%q" .Path }};
set -x DOCKER_HOST "tcp://{{ .Address }}:2376";
{{ if .NoProxy -}}
set -x NO_PROXY {{ printf "%q" .Address }};
{{ end }}
# Run this command to configure your shell:
# eval "$(drone server env {{ .Name }} --shell=fish)"
{{- else if eq .Shell "powershell" -}}
$Env:DOCKER_TLS = "1"
$Env:DOCKER_TLS_VERIFY = ""
$Env:DOCKER_CERT_PATH = {{ printf "%q" .Path }}
$Env:DOCKER_HOST = "tcp://{{ .Address }}:2376"
{{ if .NoProxy -}}
$Env:NO_PROXY = {{ printf "%q" .Address }}
{{ end }}
# Run this command to configure your shell:
# drone server env {{ .Name }} --shell=powershell | Invoke-Expression
{{- else -}}
export DOCKER_TLS=1
export DOCKER_TLS_VERIFY=
export DOCKER_CERT_PATH={{ .Path }}
export DOCKER_HOST=tcp://{{ .Address }}:2376
{{ if .NoProxy -}}
export NO_PROXY={{ .Address }}
{{ end }}
# Run this command to configure your shell:
# eval "$(drone server env {{ .Name }})"
{{- end }}
`))

View File

@ -0,0 +1,60 @@
package server
import (
"fmt"
"os"
"text/template"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var serverInfoCmd = cli.Command{
Name: "info",
Usage: "show server details",
ArgsUsage: "<servername>",
Action: serverInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplServerInfo,
Hidden: true,
},
},
}
func serverInfo(c *cli.Context) error {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
name := c.Args().First()
if len(name) == 0 {
return fmt.Errorf("Missing or invalid server name")
}
server, err := client.Server(name)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, server)
}
// template for server information
var tmplServerInfo = `Name: {{ .Name }}
Address: {{ .Address }}
Region: {{ .Region }}
Size: {{.Size}}
State: {{ .State }}
{{ if .Error -}}
Error: {{ .Error }}
{{ end -}}
`

View File

@ -0,0 +1,110 @@
package server
import (
"fmt"
"os"
"text/tabwriter"
"text/template"
"time"
"github.com/docker/go-units"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
"github.com/drone/drone-go/drone"
)
var serverListCmd = cli.Command{
Name: "ls",
Usage: "list all servers",
ArgsUsage: " ",
Action: serverList,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "a",
Usage: "include stopped servers",
},
cli.BoolFlag{
Name: "l",
Usage: "list in long format",
},
cli.BoolTFlag{
Name: "H",
Usage: "include columne headers",
},
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplServerList,
Hidden: true,
},
cli.BoolFlag{
Name: "la",
Hidden: true,
},
},
}
func serverList(c *cli.Context) error {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
a := c.Bool("a")
l := c.Bool("l")
h := c.BoolT("H")
if c.BoolT("la") {
l = true
a = true
}
servers, err := client.ServerList()
if err != nil || len(servers) == 0 {
return err
}
if l && h {
printLong(servers, a, h)
return nil
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
for _, server := range servers {
if !a && server.State == "stopped" {
continue
}
tmpl.Execute(os.Stdout, server)
}
return nil
}
func printLong(servers []*drone.Server, a, h bool) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0)
if h {
fmt.Fprintln(w, "Name\tAddress\tState\tCreated")
}
for _, server := range servers {
if !a && server.State == "stopped" {
continue
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\n",
server.Name,
server.Address,
server.State,
units.HumanDuration(
time.Now().Sub(
time.Unix(server.Created, 0),
),
),
)
}
w.Flush()
}
// template for server list items
var tmplServerList = `{{ .Name }}`

View File

@ -0,0 +1,49 @@
package server
import (
"fmt"
"net/url"
"github.com/pkg/browser"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
//
// support for cadvisor was temporarily disabled, so
// this command has been hidden from the --help menu
// until available.
//
var serverOpenCmd = cli.Command{
Name: "open",
Usage: "open server dashboard",
ArgsUsage: "<servername>",
Action: serverOpen,
Hidden: true,
}
func serverOpen(c *cli.Context) error {
client, err := internal.NewAutoscaleClient(c)
if err != nil {
return err
}
name := c.Args().First()
if len(name) == 0 {
return fmt.Errorf("Missing or invalid server name")
}
server, err := client.Server(name)
if err != nil {
return err
}
uri := new(url.URL)
uri.Scheme = "http"
uri.Host = server.Address + ":8080"
uri.User = url.UserPassword("admin", server.Secret)
return browser.OpenURL(uri.String())
}

15
cli/drone/user/user.go Normal file
View File

@ -0,0 +1,15 @@
package user
import "github.com/urfave/cli"
// Command exports the user command set.
var Command = cli.Command{
Name: "user",
Usage: "manage users",
Subcommands: []cli.Command{
userListCmd,
userInfoCmd,
userAddCmd,
userRemoveCmd,
},
}

View File

@ -0,0 +1,33 @@
package user
import (
"fmt"
"github.com/drone/drone-go/drone"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var userAddCmd = cli.Command{
Name: "add",
Usage: "adds a user",
ArgsUsage: "<username>",
Action: userAdd,
}
func userAdd(c *cli.Context) error {
login := c.Args().First()
client, err := internal.NewClient(c)
if err != nil {
return err
}
user, err := client.UserPost(&drone.User{Login: login})
if err != nil {
return err
}
fmt.Printf("Successfully added user %s\n", user.Login)
return nil
}

View File

@ -0,0 +1,52 @@
package user
import (
"fmt"
"os"
"text/template"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var userInfoCmd = cli.Command{
Name: "info",
Usage: "show user details",
ArgsUsage: "<username>",
Action: userInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplUserInfo,
},
},
}
func userInfo(c *cli.Context) error {
client, err := internal.NewClient(c)
if err != nil {
return err
}
login := c.Args().First()
if len(login) == 0 {
return fmt.Errorf("Missing or invalid user login")
}
user, err := client.User(login)
if err != nil {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
return tmpl.Execute(os.Stdout, user)
}
// template for user information
var tmplUserInfo = `User: {{ .Login }}
Email: {{ .Email }}`

View File

@ -0,0 +1,48 @@
package user
import (
"os"
"text/template"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var userListCmd = cli.Command{
Name: "ls",
Usage: "list all users",
ArgsUsage: " ",
Action: userList,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplUserList,
},
},
}
func userList(c *cli.Context) error {
client, err := internal.NewClient(c)
if err != nil {
return err
}
users, err := client.UserList()
if err != nil || len(users) == 0 {
return err
}
tmpl, err := template.New("_").Parse(c.String("format") + "\n")
if err != nil {
return err
}
for _, user := range users {
tmpl.Execute(os.Stdout, user)
}
return nil
}
// template for user list items
var tmplUserList = `{{ .Login }}`

31
cli/drone/user/user_rm.go Normal file
View File

@ -0,0 +1,31 @@
package user
import (
"fmt"
"github.com/urfave/cli"
"github.com/laszlocph/drone-oss-08/cli/drone/internal"
)
var userRemoveCmd = cli.Command{
Name: "rm",
Usage: "remove a user",
ArgsUsage: "<username>",
Action: userRemove,
}
func userRemove(c *cli.Context) error {
login := c.Args().First()
client, err := internal.NewClient(c)
if err != nil {
return err
}
if err := client.UserDel(login); err != nil {
return err
}
fmt.Printf("Successfully removed user %s\n", login)
return nil
}

202
vendor/github.com/drone/drone-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

35
vendor/github.com/drone/drone-go/README.md generated vendored Normal file
View File

@ -0,0 +1,35 @@
# drone-go
```Go
import (
"github.com/drone/drone-go/drone"
"golang.org/x/oauth2"
)
const (
token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"
host = "http://drone.company.com"
)
func main() {
// create an http client with oauth authentication.
config := new(oauth2.Config)
auther := config.Client(
oauth2.NoContext,
&oauth2.Token{
AccessToken: token,
},
)
// create the drone client with authenticator
client := drone.NewClient(host, auther)
// gets the current user
user, err := client.Self()
fmt.Println(user, err)
// gets the named repository information
repo, err := client.Repo("drone", "drone-go")
fmt.Println(repo, err)
}
```

541
vendor/github.com/drone/drone-go/drone/client.go generated vendored Normal file
View File

@ -0,0 +1,541 @@
package drone
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
)
const (
pathSelf = "%s/api/user"
pathFeed = "%s/api/user/feed"
pathRepos = "%s/api/user/repos"
pathRepo = "%s/api/repos/%s/%s"
pathRepoMove = "%s/api/repos/%s/%s/move?to=%s"
pathChown = "%s/api/repos/%s/%s/chown"
pathRepair = "%s/api/repos/%s/%s/repair"
pathBuilds = "%s/api/repos/%s/%s/builds"
pathBuild = "%s/api/repos/%s/%s/builds/%v"
pathApprove = "%s/api/repos/%s/%s/builds/%d/approve"
pathDecline = "%s/api/repos/%s/%s/builds/%d/decline"
pathJob = "%s/api/repos/%s/%s/builds/%d/%d"
pathLog = "%s/api/repos/%s/%s/logs/%d/%d"
pathLogPurge = "%s/api/repos/%s/%s/logs/%d"
pathRepoSecrets = "%s/api/repos/%s/%s/secrets"
pathRepoSecret = "%s/api/repos/%s/%s/secrets/%s"
pathRepoRegistries = "%s/api/repos/%s/%s/registry"
pathRepoRegistry = "%s/api/repos/%s/%s/registry/%s"
pathUsers = "%s/api/users"
pathUser = "%s/api/users/%s"
pathBuildQueue = "%s/api/builds"
pathServers = "%s/api/servers"
pathServer = "%s/api/servers/%s"
pathScalerPause = "%s/api/pause"
pathScalerResume = "%s/api/resume"
pathVarz = "%s/varz"
pathVersion = "%s/version"
)
type client struct {
client *http.Client
addr string
}
// // Options provides a list of client options.
// type Options struct {
// token string
// proxy string
// pool *x509.CertPool
// conf *tls.Config
// skip bool
// }
//
// // Option defines client options.
// type Option func(opts *Options)
//
// // WithToken returns an option to set the token.
// func WithToken(token string) Option {
// return func(opts *Options) {
// opts.token = token
// }
// }
//
// // WithTLS returns an option to use custom tls configuration.
// func WithTLS(conf *tls.Config) Option {
// return func(opts *Options) {
// opts.conf = conf
// }
// }
//
// // WithSocks returns a client option to provide a socks5 proxy.
// func WithSocks(proxy string) Option {
// return func(opts *Options) {
// opts.proxy = proxy
// }
// }
//
// // WithSkipVerify returns a client option to skip ssl verification.
// func WithSkipVerify(skip bool) Option {
// return func(opts *Options) {
// opts.skip = skip
// }
// }
//
// // WithCertPool returns a client option to provide a custom cert pool.
// func WithCertPool(pool *x509.CertPool) Option {
// return func(opts *Options) {
// opts.pool = pool
// }
// }
// New returns a client at the specified url.
func New(uri string) Client {
return &client{http.DefaultClient, strings.TrimSuffix(uri, "/")}
}
// NewClient returns a client at the specified url.
func NewClient(uri string, cli *http.Client) Client {
return &client{cli, strings.TrimSuffix(uri, "/")}
}
// SetClient sets the http.Client.
func (c *client) SetClient(client *http.Client) {
c.client = client
}
// SetAddress sets the server address.
func (c *client) SetAddress(addr string) {
c.addr = addr
}
// Self returns the currently authenticated user.
func (c *client) Self() (*User, error) {
out := new(User)
uri := fmt.Sprintf(pathSelf, c.addr)
err := c.get(uri, out)
return out, err
}
// User returns a user by login.
func (c *client) User(login string) (*User, error) {
out := new(User)
uri := fmt.Sprintf(pathUser, c.addr, login)
err := c.get(uri, out)
return out, err
}
// UserList returns a list of all registered users.
func (c *client) UserList() ([]*User, error) {
var out []*User
uri := fmt.Sprintf(pathUsers, c.addr)
err := c.get(uri, &out)
return out, err
}
// UserPost creates a new user account.
func (c *client) UserPost(in *User) (*User, error) {
out := new(User)
uri := fmt.Sprintf(pathUsers, c.addr)
err := c.post(uri, in, out)
return out, err
}
// UserPatch updates a user account.
func (c *client) UserPatch(in *User) (*User, error) {
out := new(User)
uri := fmt.Sprintf(pathUser, c.addr, in.Login)
err := c.patch(uri, in, out)
return out, err
}
// UserDel deletes a user account.
func (c *client) UserDel(login string) error {
uri := fmt.Sprintf(pathUser, c.addr, login)
err := c.delete(uri)
return err
}
// Repo returns a repository by name.
func (c *client) Repo(owner string, name string) (*Repo, error) {
out := new(Repo)
uri := fmt.Sprintf(pathRepo, c.addr, owner, name)
err := c.get(uri, out)
return out, err
}
// RepoList returns a list of all repositories to which
// the user has explicit access in the host system.
func (c *client) RepoList() ([]*Repo, error) {
var out []*Repo
uri := fmt.Sprintf(pathRepos, c.addr)
err := c.get(uri, &out)
return out, err
}
// RepoListOpts returns a list of all repositories to which
// the user has explicit access in the host system.
func (c *client) RepoListOpts(sync, all bool) ([]*Repo, error) {
var out []*Repo
uri := fmt.Sprintf(pathRepos+"?flush=%v&all=%v", c.addr, sync, all)
err := c.get(uri, &out)
return out, err
}
// RepoPost activates a repository.
func (c *client) RepoPost(owner string, name string) (*Repo, error) {
out := new(Repo)
uri := fmt.Sprintf(pathRepo, c.addr, owner, name)
err := c.post(uri, nil, out)
return out, err
}
// RepoChown updates a repository owner.
func (c *client) RepoChown(owner string, name string) (*Repo, error) {
out := new(Repo)
uri := fmt.Sprintf(pathChown, c.addr, owner, name)
err := c.post(uri, nil, out)
return out, err
}
// RepoRepair repais the repository hooks.
func (c *client) RepoRepair(owner string, name string) error {
uri := fmt.Sprintf(pathRepair, c.addr, owner, name)
return c.post(uri, nil, nil)
}
// RepoPatch updates a repository.
func (c *client) RepoPatch(owner, name string, in *RepoPatch) (*Repo, error) {
out := new(Repo)
uri := fmt.Sprintf(pathRepo, c.addr, owner, name)
err := c.patch(uri, in, out)
return out, err
}
// RepoDel deletes a repository.
func (c *client) RepoDel(owner, name string) error {
uri := fmt.Sprintf(pathRepo, c.addr, owner, name)
err := c.delete(uri)
return err
}
// RepoMove moves a repository
func (c *client) RepoMove(owner, name, newFullName string) error {
uri := fmt.Sprintf(pathRepoMove, c.addr, owner, name, newFullName)
return c.post(uri, nil, nil)
}
// Build returns a repository build by number.
func (c *client) Build(owner, name string, num int) (*Build, error) {
out := new(Build)
uri := fmt.Sprintf(pathBuild, c.addr, owner, name, num)
err := c.get(uri, out)
return out, err
}
// Build returns the latest repository build by branch.
func (c *client) BuildLast(owner, name, branch string) (*Build, error) {
out := new(Build)
uri := fmt.Sprintf(pathBuild, c.addr, owner, name, "latest")
if len(branch) != 0 {
uri += "?branch=" + branch
}
err := c.get(uri, out)
return out, err
}
// BuildList returns a list of recent builds for the
// the specified repository.
func (c *client) BuildList(owner, name string) ([]*Build, error) {
var out []*Build
uri := fmt.Sprintf(pathBuilds, c.addr, owner, name)
err := c.get(uri, &out)
return out, err
}
// BuildQueue returns a list of enqueued builds.
func (c *client) BuildQueue() ([]*Activity, error) {
var out []*Activity
uri := fmt.Sprintf(pathBuildQueue, c.addr)
err := c.get(uri, &out)
return out, err
}
// BuildStart re-starts a stopped build.
func (c *client) BuildStart(owner, name string, num int, params map[string]string) (*Build, error) {
out := new(Build)
val := mapValues(params)
uri := fmt.Sprintf(pathBuild, c.addr, owner, name, num)
err := c.post(uri+"?"+val.Encode(), nil, out)
return out, err
}
// BuildStop cancels the running job.
func (c *client) BuildStop(owner, name string, num, job int) error {
uri := fmt.Sprintf(pathJob, c.addr, owner, name, num, job)
err := c.delete(uri)
return err
}
// BuildApprove approves a blocked build.
func (c *client) BuildApprove(owner, name string, num int) (*Build, error) {
out := new(Build)
uri := fmt.Sprintf(pathApprove, c.addr, owner, name, num)
err := c.post(uri, nil, out)
return out, err
}
// BuildDecline declines a blocked build.
func (c *client) BuildDecline(owner, name string, num int) (*Build, error) {
out := new(Build)
uri := fmt.Sprintf(pathDecline, c.addr, owner, name, num)
err := c.post(uri, nil, out)
return out, err
}
// BuildKill force kills the running build.
func (c *client) BuildKill(owner, name string, num int) error {
uri := fmt.Sprintf(pathBuild, c.addr, owner, name, num)
err := c.delete(uri)
return err
}
// BuildLogs returns the build logs for the specified job.
func (c *client) BuildLogs(owner, name string, num, job int) (io.ReadCloser, error) {
return nil, errors.New("Method not implemented")
}
// Deploy triggers a deployment for an existing build using the
// specified target environment.
func (c *client) Deploy(owner, name string, num int, env string, params map[string]string) (*Build, error) {
out := new(Build)
val := mapValues(params)
val.Set("event", "deployment")
val.Set("deploy_to", env)
uri := fmt.Sprintf(pathBuild, c.addr, owner, name, num)
err := c.post(uri+"?"+val.Encode(), nil, out)
return out, err
}
// LogsPurge purges the build logs for the specified build.
func (c *client) LogsPurge(owner, name string, num int) error {
uri := fmt.Sprintf(pathLogPurge, c.addr, owner, name, num)
err := c.delete(uri)
return err
}
// Registry returns a registry by hostname.
func (c *client) Registry(owner, name, hostname string) (*Registry, error) {
out := new(Registry)
uri := fmt.Sprintf(pathRepoRegistry, c.addr, owner, name, hostname)
err := c.get(uri, out)
return out, err
}
// RegistryList returns a list of all repository registries.
func (c *client) RegistryList(owner string, name string) ([]*Registry, error) {
var out []*Registry
uri := fmt.Sprintf(pathRepoRegistries, c.addr, owner, name)
err := c.get(uri, &out)
return out, err
}
// RegistryCreate creates a registry.
func (c *client) RegistryCreate(owner, name string, in *Registry) (*Registry, error) {
out := new(Registry)
uri := fmt.Sprintf(pathRepoRegistries, c.addr, owner, name)
err := c.post(uri, in, out)
return out, err
}
// RegistryUpdate updates a registry.
func (c *client) RegistryUpdate(owner, name string, in *Registry) (*Registry, error) {
out := new(Registry)
uri := fmt.Sprintf(pathRepoRegistry, c.addr, owner, name, in.Address)
err := c.patch(uri, in, out)
return out, err
}
// RegistryDelete deletes a registry.
func (c *client) RegistryDelete(owner, name, hostname string) error {
uri := fmt.Sprintf(pathRepoRegistry, c.addr, owner, name, hostname)
return c.delete(uri)
}
// Secret returns a secret by name.
func (c *client) Secret(owner, name, secret string) (*Secret, error) {
out := new(Secret)
uri := fmt.Sprintf(pathRepoSecret, c.addr, owner, name, secret)
err := c.get(uri, out)
return out, err
}
// SecretList returns a list of all repository secrets.
func (c *client) SecretList(owner string, name string) ([]*Secret, error) {
var out []*Secret
uri := fmt.Sprintf(pathRepoSecrets, c.addr, owner, name)
err := c.get(uri, &out)
return out, err
}
// SecretCreate creates a secret.
func (c *client) SecretCreate(owner, name string, in *Secret) (*Secret, error) {
out := new(Secret)
uri := fmt.Sprintf(pathRepoSecrets, c.addr, owner, name)
err := c.post(uri, in, out)
return out, err
}
// SecretUpdate updates a secret.
func (c *client) SecretUpdate(owner, name string, in *Secret) (*Secret, error) {
out := new(Secret)
uri := fmt.Sprintf(pathRepoSecret, c.addr, owner, name, in.Name)
err := c.patch(uri, in, out)
return out, err
}
// SecretDelete deletes a secret.
func (c *client) SecretDelete(owner, name, secret string) error {
uri := fmt.Sprintf(pathRepoSecret, c.addr, owner, name, secret)
return c.delete(uri)
}
// Server returns the named servers details.
func (c *client) Server(name string) (*Server, error) {
out := new(Server)
uri := fmt.Sprintf(pathServer, c.addr, name)
err := c.get(uri, &out)
return out, err
}
// ServerList returns a list of all active build servers.
func (c *client) ServerList() ([]*Server, error) {
var out []*Server
uri := fmt.Sprintf(pathServers, c.addr)
err := c.get(uri, &out)
return out, err
}
// ServerCreate creates a new server.
func (c *client) ServerCreate() (*Server, error) {
out := new(Server)
uri := fmt.Sprintf(pathServers, c.addr)
err := c.post(uri, nil, out)
return out, err
}
// ServerDelete terminates a server.
func (c *client) ServerDelete(name string) error {
uri := fmt.Sprintf(pathServer, c.addr, name)
return c.delete(uri)
}
// AutoscalePause pauses the autoscaler.
func (c *client) AutoscalePause() error {
uri := fmt.Sprintf(pathScalerPause, c.addr)
return c.post(uri, nil, nil)
}
// AutoscaleResume resumes the autoscaler.
func (c *client) AutoscaleResume() error {
uri := fmt.Sprintf(pathScalerResume, c.addr)
return c.post(uri, nil, nil)
}
// AutoscaleVersion resumes the autoscaler.
func (c *client) AutoscaleVersion() (*Version, error) {
out := new(Version)
uri := fmt.Sprintf(pathVersion, c.addr)
err := c.get(uri, out)
return out, err
}
//
// http request helper functions
//
// helper function for making an http GET request.
func (c *client) get(rawurl string, out interface{}) error {
return c.do(rawurl, "GET", nil, out)
}
// helper function for making an http POST request.
func (c *client) post(rawurl string, in, out interface{}) error {
return c.do(rawurl, "POST", in, out)
}
// helper function for making an http PUT request.
func (c *client) put(rawurl string, in, out interface{}) error {
return c.do(rawurl, "PUT", in, out)
}
// helper function for making an http PATCH request.
func (c *client) patch(rawurl string, in, out interface{}) error {
return c.do(rawurl, "PATCH", in, out)
}
// helper function for making an http DELETE request.
func (c *client) delete(rawurl string) error {
return c.do(rawurl, "DELETE", nil, nil)
}
// helper function to make an http request
func (c *client) do(rawurl, method string, in, out interface{}) error {
body, err := c.open(rawurl, method, in, out)
if err != nil {
return err
}
defer body.Close()
if out != nil {
return json.NewDecoder(body).Decode(out)
}
return nil
}
// helper function to open an http request
func (c *client) open(rawurl, method string, in, out interface{}) (io.ReadCloser, error) {
uri, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
req, err := http.NewRequest(method, uri.String(), nil)
if err != nil {
return nil, err
}
if in != nil {
decoded, derr := json.Marshal(in)
if derr != nil {
return nil, derr
}
buf := bytes.NewBuffer(decoded)
req.Body = ioutil.NopCloser(buf)
req.ContentLength = int64(len(decoded))
req.Header.Set("Content-Length", strconv.Itoa(len(decoded)))
req.Header.Set("Content-Type", "application/json")
}
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode > http.StatusPartialContent {
defer resp.Body.Close()
out, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("client error %d: %s", resp.StatusCode, string(out))
}
return resp.Body, nil
}
// mapValues converts a map to url.Values
func mapValues(params map[string]string) url.Values {
values := url.Values{}
for key, val := range params {
values.Add(key, val)
}
return values
}

21
vendor/github.com/drone/drone-go/drone/const.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package drone
// Event values.
const (
EventPush = "push"
EventPull = "pull_request"
EventTag = "tag"
EventDeploy = "deployment"
)
// Status values.
const (
StatusBlocked = "blocked"
StatusSkipped = "skipped"
StatusPending = "pending"
StatusRunning = "running"
StatusSuccess = "success"
StatusFailure = "failure"
StatusKilled = "killed"
StatusError = "error"
)

146
vendor/github.com/drone/drone-go/drone/interface.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
package drone
import "net/http"
// Client is used to communicate with a Drone server.
type Client interface {
// SetClient sets the http.Client.
SetClient(*http.Client)
// SetAddress sets the server address.
SetAddress(string)
// Self returns the currently authenticated user.
Self() (*User, error)
// User returns a user by login.
User(string) (*User, error)
// UserList returns a list of all registered users.
UserList() ([]*User, error)
// UserPost creates a new user account.
UserPost(*User) (*User, error)
// UserPatch updates a user account.
UserPatch(*User) (*User, error)
// UserDel deletes a user account.
UserDel(string) error
// Repo returns a repository by name.
Repo(string, string) (*Repo, error)
// RepoList returns a list of all repositories to which the user has explicit
// access in the host system.
RepoList() ([]*Repo, error)
// RepoListOpts returns a list of all repositories to which the user has
// explicit access in the host system.
RepoListOpts(bool, bool) ([]*Repo, error)
// RepoPost activates a repository.
RepoPost(string, string) (*Repo, error)
// RepoPatch updates a repository.
RepoPatch(string, string, *RepoPatch) (*Repo, error)
// RepoMove moves the repository
RepoMove(string, string, string) error
// RepoChown updates a repository owner.
RepoChown(string, string) (*Repo, error)
// RepoRepair repairs the repository hooks.
RepoRepair(string, string) error
// RepoDel deletes a repository.
RepoDel(string, string) error
// Build returns a repository build by number.
Build(string, string, int) (*Build, error)
// BuildLast returns the latest repository build by branch. An empty branch
// will result in the default branch.
BuildLast(string, string, string) (*Build, error)
// BuildList returns a list of recent builds for the
// the specified repository.
BuildList(string, string) ([]*Build, error)
// BuildQueue returns a list of enqueued builds.
BuildQueue() ([]*Activity, error)
// BuildStart re-starts a stopped build.
BuildStart(string, string, int, map[string]string) (*Build, error)
// BuildStop stops the specified running job for given build.
BuildStop(string, string, int, int) error
// BuildApprove approves a blocked build.
BuildApprove(string, string, int) (*Build, error)
// BuildDecline declines a blocked build.
BuildDecline(string, string, int) (*Build, error)
// BuildKill force kills the running build.
BuildKill(string, string, int) error
// Deploy triggers a deployment for an existing build using the specified
// target environment.
Deploy(string, string, int, string, map[string]string) (*Build, error)
// LogsPurge purges the build logs for the specified build.
LogsPurge(string, string, int) error
// Registry returns a registry by hostname.
Registry(owner, name, hostname string) (*Registry, error)
// RegistryList returns a list of all repository registries.
RegistryList(owner, name string) ([]*Registry, error)
// RegistryCreate creates a registry.
RegistryCreate(owner, name string, registry *Registry) (*Registry, error)
// RegistryUpdate updates a registry.
RegistryUpdate(owner, name string, registry *Registry) (*Registry, error)
// RegistryDelete deletes a registry.
RegistryDelete(owner, name, hostname string) error
// Secret returns a secret by name.
Secret(owner, name, secret string) (*Secret, error)
// SecretList returns a list of all repository secrets.
SecretList(owner, name string) ([]*Secret, error)
// SecretCreate creates a registry.
SecretCreate(owner, name string, secret *Secret) (*Secret, error)
// SecretUpdate updates a registry.
SecretUpdate(owner, name string, secret *Secret) (*Secret, error)
// SecretDelete deletes a secret.
SecretDelete(owner, name, secret string) error
// Server returns the named servers details.
Server(name string) (*Server, error)
// ServerList returns a list of all active build servers.
ServerList() ([]*Server, error)
// ServerCreate creates a new server.
ServerCreate() (*Server, error)
// ServerDelete terminates a server.
ServerDelete(name string) error
// AutoscalePause pauses the autoscaler.
AutoscalePause() error
// AutoscaleResume resumes the autoscaler.
AutoscaleResume() error
// AutoscaleVersion returns the autoscaler version.
AutoscaleVersion() (*Version, error)
}

172
vendor/github.com/drone/drone-go/drone/types.go generated vendored Normal file
View File

@ -0,0 +1,172 @@
package drone
type (
// User represents a user account.
User struct {
ID int64 `json:"id"`
Login string `json:"login"`
Email string `json:"email"`
Avatar string `json:"avatar_url"`
Active bool `json:"active"`
Admin bool `json:"admin"`
}
// Repo represents a repository.
Repo struct {
ID int64 `json:"id,omitempty"`
Owner string `json:"owner"`
Name string `json:"name"`
FullName string `json:"full_name"`
Avatar string `json:"avatar_url,omitempty"`
Link string `json:"link_url,omitempty"`
Kind string `json:"scm,omitempty"`
Clone string `json:"clone_url,omitempty"`
Branch string `json:"default_branch,omitempty"`
Timeout int64 `json:"timeout,omitempty"`
Visibility string `json:"visibility"`
IsPrivate bool `json:"private,omitempty"`
IsTrusted bool `json:"trusted"`
IsStarred bool `json:"starred,omitempty"`
IsGated bool `json:"gated"`
AllowPull bool `json:"allow_pr"`
AllowPush bool `json:"allow_push"`
AllowDeploy bool `json:"allow_deploys"`
AllowTag bool `json:"allow_tags"`
Config string `json:"config_file"`
}
// RepoPatch defines a repository patch request.
RepoPatch struct {
Config *string `json:"config_file,omitempty"`
IsTrusted *bool `json:"trusted,omitempty"`
IsGated *bool `json:"gated,omitempty"`
Timeout *int64 `json:"timeout,omitempty"`
Visibility *string `json:"visibility"`
AllowPull *bool `json:"allow_pr,omitempty"`
AllowPush *bool `json:"allow_push,omitempty"`
AllowDeploy *bool `json:"allow_deploy,omitempty"`
AllowTag *bool `json:"allow_tag,omitempty"`
BuildCounter *int `json:"build_counter,omitempty"`
}
// Build defines a build object.
Build struct {
ID int64 `json:"id"`
Number int `json:"number"`
Parent int `json:"parent"`
Event string `json:"event"`
Status string `json:"status"`
Error string `json:"error"`
Enqueued int64 `json:"enqueued_at"`
Created int64 `json:"created_at"`
Started int64 `json:"started_at"`
Finished int64 `json:"finished_at"`
Deploy string `json:"deploy_to"`
Commit string `json:"commit"`
Branch string `json:"branch"`
Ref string `json:"ref"`
Refspec string `json:"refspec"`
Remote string `json:"remote"`
Title string `json:"title"`
Message string `json:"message"`
Timestamp int64 `json:"timestamp"`
Sender string `json:"sender"`
Author string `json:"author"`
Avatar string `json:"author_avatar"`
Email string `json:"author_email"`
Link string `json:"link_url"`
Reviewer string `json:"reviewed_by"`
Reviewed int64 `json:"reviewed_at"`
Procs []*Proc `json:"procs,omitempty"`
}
// Proc represents a process in the build pipeline.
Proc struct {
ID int64 `json:"id"`
PID int `json:"pid"`
PPID int `json:"ppid"`
PGID int `json:"pgid"`
Name string `json:"name"`
State string `json:"state"`
Error string `json:"error,omitempty"`
ExitCode int `json:"exit_code"`
Started int64 `json:"start_time,omitempty"`
Stopped int64 `json:"end_time,omitempty"`
Machine string `json:"machine,omitempty"`
Platform string `json:"platform,omitempty"`
Environ map[string]string `json:"environ,omitempty"`
Children []*Proc `json:"children,omitempty"`
}
// Registry represents a docker registry with credentials.
Registry struct {
ID int64 `json:"id"`
Address string `json:"address"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
Email string `json:"email"`
Token string `json:"token"`
}
// Secret represents a secret variable, such as a password or token.
Secret struct {
ID int64 `json:"id"`
Name string `json:"name"`
Value string `json:"value,omitempty"`
Images []string `json:"image"`
Events []string `json:"event"`
}
// Activity represents an item in the user's feed or timeline.
Activity struct {
Owner string `json:"owner"`
Name string `json:"name"`
FullName string `json:"full_name"`
Number int `json:"number,omitempty"`
Event string `json:"event,omitempty"`
Status string `json:"status,omitempty"`
Created int64 `json:"created_at,omitempty"`
Started int64 `json:"started_at,omitempty"`
Finished int64 `json:"finished_at,omitempty"`
Commit string `json:"commit,omitempty"`
Branch string `json:"branch,omitempty"`
Ref string `json:"ref,omitempty"`
Refspec string `json:"refspec,omitempty"`
Remote string `json:"remote,omitempty"`
Title string `json:"title,omitempty"`
Message string `json:"message,omitempty"`
Author string `json:"author,omitempty"`
Avatar string `json:"author_avatar,omitempty"`
Email string `json:"author_email,omitempty"`
}
// Server represents a server node.
Server struct {
ID string `json:"id"`
Provider string `json:"provider"`
State string `json:"state"`
Name string `json:"name"`
Image string `json:"image"`
Region string `json:"region"`
Size string `json:"size"`
Address string `json:"address"`
Capacity int `json:"capacity"`
Secret string `json:"secret"`
Error string `json:"error"`
CAKey []byte `json:"ca_key"`
CACert []byte `json:"ca_cert"`
TLSKey []byte `json:"tls_key"`
TLSCert []byte `json:"tls_cert"`
Created int64 `json:"created"`
Updated int64 `json:"updated"`
Started int64 `json:"started"`
Stopped int64 `json:"stopped"`
}
// Version provides system version details.
Version struct {
Source string `json:"source,omitempty"`
Version string `json:"version,omitempty"`
Commit string `json:"commit,omitempty"`
}
)

28
vendor/github.com/golang/protobuf/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright 2010 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

253
vendor/github.com/golang/protobuf/proto/clone.go generated vendored Normal file
View File

@ -0,0 +1,253 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: RawMessage.
package proto
import (
"fmt"
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(src Message) Message {
in := reflect.ValueOf(src)
if in.IsNil() {
return src
}
out := reflect.New(in.Type().Elem())
dst := out.Interface().(Message)
Merge(dst, src)
return dst
}
// Merger is the interface representing objects that can merge messages of the same type.
type Merger interface {
// Merge merges src into this message.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
//
// Merge may panic if called with a different argument type than the receiver.
Merge(src Message)
}
// generatedMerger is the custom merge method that generated protos will have.
// We must add this method since a generate Merge method will conflict with
// many existing protos that have a Merge data field already defined.
type generatedMerger interface {
XXX_Merge(src Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
if m, ok := dst.(Merger); ok {
m.Merge(src)
return
}
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
}
if in.IsNil() {
return // Merge from nil src is a noop
}
if m, ok := dst.(generatedMerger); ok {
m.XXX_Merge(src)
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

427
vendor/github.com/golang/protobuf/proto/decode.go generated vendored Normal file
View File

@ -0,0 +1,427 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
// Unmarshal implementations should not clear the receiver.
// Any unmarshaled data should be merged into the receiver.
// Callers of Unmarshal that do not want to retain existing data
// should Reset the receiver before calling Unmarshal.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// newUnmarshaler is the interface representing objects that can
// unmarshal themselves. The semantics are identical to Unmarshaler.
//
// This exists to support protoc-gen-go generated messages.
// The proto package will stop type-asserting to this interface in the future.
//
// DO NOT DEPEND ON THIS.
type newUnmarshaler interface {
XXX_Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
// StartGroup tag is already consumed. This function consumes
// EndGroup tag.
func (p *Buffer) DecodeGroup(pb Message) error {
b := p.buf[p.index:]
x, y := findEndGroup(b)
if x < 0 {
return io.ErrUnexpectedEOF
}
err := Unmarshal(b[:x], pb)
p.index += y
return err
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(newUnmarshaler); ok {
err := u.XXX_Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
// Slow workaround for messages that aren't Unmarshalers.
// This includes some hand-coded .pb.go files and
// bootstrap protos.
// TODO: fix all of those and then add Unmarshal to
// the Message interface. Then:
// The cast above and code below can be deleted.
// The old unmarshaler can be deleted.
// Clients can call Unmarshal directly (can already do that, actually).
var info InternalMessageInfo
err := info.Unmarshal(pb, p.buf[p.index:])
p.index = len(p.buf)
return err
}

63
vendor/github.com/golang/protobuf/proto/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2018 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import "errors"
// Deprecated: do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func RegisterMessageSetType(Message, int32, string) {}

350
vendor/github.com/golang/protobuf/proto/discard.go generated vendored Normal file
View File

@ -0,0 +1,350 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2017 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
type generatedDiscarder interface {
XXX_DiscardUnknown()
}
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
//
// For proto2 messages, the unknown fields of message extensions are only
// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
if m, ok := m.(generatedDiscarder); ok {
m.XXX_DiscardUnknown()
return
}
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
// but the master branch has no implementation for InternalMessageInfo,
// so it would be more work to replicate that approach.
discardLegacy(m)
}
// DiscardUnknown recursively discards all unknown fields.
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
di := atomicLoadDiscardInfo(&a.discard)
if di == nil {
di = getDiscardInfo(reflect.TypeOf(m).Elem())
atomicStoreDiscardInfo(&a.discard, di)
}
di.discard(toPointer(&m))
}
type discardInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []discardFieldInfo
unrecognized field
}
type discardFieldInfo struct {
field field // Offset of field, guaranteed to be valid
discard func(src pointer)
}
var (
discardInfoMap = map[reflect.Type]*discardInfo{}
discardInfoLock sync.Mutex
)
func getDiscardInfo(t reflect.Type) *discardInfo {
discardInfoLock.Lock()
defer discardInfoLock.Unlock()
di := discardInfoMap[t]
if di == nil {
di = &discardInfo{typ: t}
discardInfoMap[t] = di
}
return di
}
func (di *discardInfo) discard(src pointer) {
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&di.initialized) == 0 {
di.computeDiscardInfo()
}
for _, fi := range di.fields {
sfp := src.offset(fi.field)
fi.discard(sfp)
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
// Ignore lock since DiscardUnknown is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
DiscardUnknown(m)
}
}
}
if di.unrecognized.IsValid() {
*src.offset(di.unrecognized).toBytes() = nil
}
}
func (di *discardInfo) computeDiscardInfo() {
di.lock.Lock()
defer di.lock.Unlock()
if di.initialized != 0 {
return
}
t := di.typ
n := t.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
dfi := discardFieldInfo{field: toField(&f)}
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
case isSlice: // E.g., []*pb.T
di := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sps := src.getPointerSlice()
for _, sp := range sps {
if !sp.isNil() {
di.discard(sp)
}
}
}
default: // E.g., *pb.T
di := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sp := src.getPointer()
if !sp.isNil() {
di.discard(sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
default: // E.g., map[K]V
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
dfi.discard = func(src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
DiscardUnknown(val.Interface().(Message))
}
}
} else {
dfi.discard = func(pointer) {} // Noop
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
default: // E.g., interface{}
// TODO: Make this faster?
dfi.discard = func(src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
DiscardUnknown(sv.Interface().(Message))
}
}
}
}
default:
continue
}
di.fields = append(di.fields, dfi)
}
di.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
di.unrecognized = toField(&f)
}
atomic.StoreInt32(&di.initialized, 1)
}
func discardLegacy(m Message) {
v := reflect.ValueOf(m)
if v.Kind() != reflect.Ptr || v.IsNil() {
return
}
v = v.Elem()
if v.Kind() != reflect.Struct {
return
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
vf := v.Field(i)
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
case isSlice: // E.g., []*pb.T
for j := 0; j < vf.Len(); j++ {
discardLegacy(vf.Index(j).Interface().(Message))
}
default: // E.g., *pb.T
discardLegacy(vf.Interface().(Message))
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
default: // E.g., map[K]V
tv := vf.Type().Elem()
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
for _, key := range vf.MapKeys() {
val := vf.MapIndex(key)
discardLegacy(val.Interface().(Message))
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
default: // E.g., test_proto.isCommunique_Union interface
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
if !vf.IsNil() {
vf = vf.Elem() // E.g., test_proto.Communique_Msg
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
if vf.Kind() == reflect.Ptr {
discardLegacy(vf.Interface().(Message))
}
}
}
}
}
}
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
if vf.Type() != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
vf.Set(reflect.ValueOf([]byte(nil)))
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(m); err == nil {
// Ignore lock since discardLegacy is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
discardLegacy(m)
}
}
}
}

203
vendor/github.com/golang/protobuf/proto/encode.go generated vendored Normal file
View File

@ -0,0 +1,203 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"errors"
"reflect"
)
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
// errOneofHasNil is the error returned if Marshal is called with
// a struct with a oneof field containing a nil element.
errOneofHasNil = errors.New("proto: oneof field has nil value")
// ErrNil is the error returned if Marshal is called with nil.
ErrNil = errors.New("proto: Marshal called with nil")
// ErrTooLarge is the error returned if Marshal is called with a
// message that encodes to >2GB.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
)
// The fundamental encoders that put bytes on the wire.
// Those that take integer types all accept uint64 and are
// therefore of type valueEncoder.
const maxVarintBytes = 10 // maximum length of a varint
// EncodeVarint returns the varint encoding of x.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
// Not used by the package itself, but helpful to clients
// wishing to use the same encoding.
func EncodeVarint(x uint64) []byte {
var buf [maxVarintBytes]byte
var n int
for n = 0; x > 127; n++ {
buf[n] = 0x80 | uint8(x&0x7F)
x >>= 7
}
buf[n] = uint8(x)
n++
return buf[0:n]
}
// EncodeVarint writes a varint-encoded integer to the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) EncodeVarint(x uint64) error {
for x >= 1<<7 {
p.buf = append(p.buf, uint8(x&0x7f|0x80))
x >>= 7
}
p.buf = append(p.buf, uint8(x))
return nil
}
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
switch {
case x < 1<<7:
return 1
case x < 1<<14:
return 2
case x < 1<<21:
return 3
case x < 1<<28:
return 4
case x < 1<<35:
return 5
case x < 1<<42:
return 6
case x < 1<<49:
return 7
case x < 1<<56:
return 8
case x < 1<<63:
return 9
}
return 10
}
// EncodeFixed64 writes a 64-bit integer to the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) EncodeFixed64(x uint64) error {
p.buf = append(p.buf,
uint8(x),
uint8(x>>8),
uint8(x>>16),
uint8(x>>24),
uint8(x>>32),
uint8(x>>40),
uint8(x>>48),
uint8(x>>56))
return nil
}
// EncodeFixed32 writes a 32-bit integer to the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) EncodeFixed32(x uint64) error {
p.buf = append(p.buf,
uint8(x),
uint8(x>>8),
uint8(x>>16),
uint8(x>>24))
return nil
}
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
// to the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
// to the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) EncodeZigzag32(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
}
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) EncodeRawBytes(b []byte) error {
p.EncodeVarint(uint64(len(b)))
p.buf = append(p.buf, b...)
return nil
}
// EncodeStringBytes writes an encoded string to the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) EncodeStringBytes(s string) error {
p.EncodeVarint(uint64(len(s)))
p.buf = append(p.buf, s...)
return nil
}
// Marshaler is the interface representing objects that can marshal themselves.
type Marshaler interface {
Marshal() ([]byte, error)
}
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
siz := Size(pb)
p.EncodeVarint(uint64(siz))
return p.Marshal(pb)
}
// All protocol buffer fields are nillable, but be careful.
func isNil(v reflect.Value) bool {
switch v.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}

301
vendor/github.com/golang/protobuf/proto/equal.go generated vendored Normal file
View File

@ -0,0 +1,301 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_InternalExtensions")
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
return bytes.Equal(u1, u2)
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
// Maps may have nil values in them, so check for nil.
if v1.IsNil() && v2.IsNil() {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// x1 and x2 are InternalExtensions.
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
em1, _ := x1.extensionsRead()
em2, _ := x2.extensionsRead()
return equalExtMap(base, em1, em2)
}
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1 := extensionAsLegacyType(e1.value)
m2 := extensionAsLegacyType(e2.value)
if m1 == nil && m2 == nil {
// Both have only encoded form.
if bytes.Equal(e1.enc, e2.enc) {
continue
}
// The bytes are different, but the extensions might still be
// equal. We need to decode them to compare.
}
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
// If both have only encoded form and the bytes are the same,
// it is handled above. We get here when the bytes are different.
// We don't know how to decode it, so just compare them as byte
// slices.
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
return false
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}
return true
}

607
vendor/github.com/golang/protobuf/proto/extensions.go generated vendored Normal file
View File

@ -0,0 +1,607 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"io"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer generated by the current
// proto compiler that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
extensionsWrite() map[int32]Extension
extensionsRead() (map[int32]Extension, sync.Locker)
}
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
// version of the proto compiler that may be extended.
type extendableProtoV1 interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
type extensionAdapter struct {
extendableProtoV1
}
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
return e.ExtensionMap()
}
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
return e.ExtensionMap(), notLocker{}
}
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
type notLocker struct{}
func (n notLocker) Lock() {}
func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
func extendable(p interface{}) (extendableProto, error) {
switch p := p.(type) {
case extendableProto:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return p, nil
case extendableProtoV1:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return extensionAdapter{p}, nil
}
// Don't allocate a specific error containing %T:
// this is the hot path for Clone and MarshalText.
return nil, errNotExtendable
}
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
func isNilPtr(x interface{}) bool {
v := reflect.ValueOf(x)
return v.Kind() == reflect.Ptr && v.IsNil()
}
// XXX_InternalExtensions is an internal representation of proto extensions.
//
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
//
// The methods of XXX_InternalExtensions are not concurrency safe in general,
// but calls to logically read-only methods such as has and get may be executed concurrently.
type XXX_InternalExtensions struct {
// The struct must be indirect so that if a user inadvertently copies a
// generated message and its embedded XXX_InternalExtensions, they
// avoid the mayhem of a copied mutex.
//
// The mutex serializes all logically read-only operations to p.extensionMap.
// It is up to the client to ensure that write operations to p.extensionMap are
// mutually exclusive with other accesses.
p *struct {
mu sync.Mutex
extensionMap map[int32]Extension
}
}
// extensionsWrite returns the extension map, creating it on first use.
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
if e.p == nil {
e.p = new(struct {
mu sync.Mutex
extensionMap map[int32]Extension
})
e.p.extensionMap = make(map[int32]Extension)
}
return e.p.extensionMap
}
// extensionsRead returns the extensions map for read-only use. It may be nil.
// The caller must hold the returned mutex's lock when accessing Elements within the map.
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
if e.p == nil {
return nil, nil
}
return e.p.extensionMap, &e.p.mu
}
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
Filename string // name of the file in which the extension is defined
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
// value is a concrete value for the extension field. Let the type of
// desc.ExtensionType be the "API type" and the type of Extension.value
// be the "storage type". The API type and storage type are the same except:
// * For scalars (except []byte), the API type uses *T,
// while the storage type uses T.
// * For repeated fields, the API type uses []T, while the storage type
// uses *[]T.
//
// The reason for the divergence is so that the storage type more naturally
// matches what is expected of when retrieving the values through the
// protobuf reflection APIs.
//
// The value may only be populated if desc is also populated.
value interface{}
// enc is the raw bytes for the extension field.
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
epb, err := extendable(base)
if err != nil {
return
}
extmap := epb.extensionsWrite()
extmap[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
var pbi interface{} = pb
// Check the extended type.
if ea, ok := pbi.(extensionAdapter); ok {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
epb, err := extendable(pb)
if err != nil {
return false
}
extmap, mu := epb.extensionsRead()
if extmap == nil {
return false
}
mu.Lock()
_, ok := extmap[extension.Field]
mu.Unlock()
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
epb, err := extendable(pb)
if err != nil {
return
}
// TODO: Check types, field numbers, etc.?
extmap := epb.extensionsWrite()
delete(extmap, extension.Field)
}
// GetExtension retrieves a proto2 extended field from pb.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes of the field extension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
if extension.ExtendedType != nil {
// can only check type if this is a complete descriptor
if err := checkExtensionTypes(epb, extension); err != nil {
return nil, err
}
}
emap, mu := epb.extensionsRead()
if emap == nil {
return defaultExtensionValue(extension)
}
mu.Lock()
defer mu.Unlock()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return extensionAsLegacyType(e.value), nil
}
if extension.ExtensionType == nil {
// incomplete descriptor
return e.enc, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = extensionAsStorageType(v)
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return extensionAsLegacyType(e.value), nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
if extension.ExtensionType == nil {
// incomplete descriptor, so no default
return nil, ErrMissingExtension
}
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
unmarshal := typeUnmarshaler(t, extension.Tag)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate space to store the pointer/slice.
value := reflect.New(t).Elem()
var err error
for {
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
wire := int(x) & 7
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
if err != nil {
return nil, err
}
if len(b) == 0 {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
registeredExtensions := RegisteredExtensions(pb)
emap, mu := epb.extensionsRead()
if emap == nil {
return nil, nil
}
mu.Lock()
defer mu.Unlock()
extensions := make([]*ExtensionDesc, 0, len(emap))
for extid, e := range emap {
desc := e.desc
if desc == nil {
desc = registeredExtensions[extid]
if desc == nil {
desc = &ExtensionDesc{Field: extid}
}
}
extensions = append(extensions, desc)
}
return extensions, nil
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
epb, err := extendable(pb)
if err != nil {
return err
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
return nil
}
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
epb, err := extendable(pb)
if err != nil {
return
}
m := epb.extensionsWrite()
for k := range m {
delete(m, k)
}
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}
// extensionAsLegacyType converts an value in the storage type as the API type.
// See Extension.value.
func extensionAsLegacyType(v interface{}) interface{} {
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
// Represent primitive types as a pointer to the value.
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
case reflect.Ptr:
// Represent slice types as the value itself.
switch rv.Type().Elem().Kind() {
case reflect.Slice:
if rv.IsNil() {
v = reflect.Zero(rv.Type().Elem()).Interface()
} else {
v = rv.Elem().Interface()
}
}
}
return v
}
// extensionAsStorageType converts an value in the API type as the storage type.
// See Extension.value.
func extensionAsStorageType(v interface{}) interface{} {
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Ptr:
// Represent slice types as the value itself.
switch rv.Type().Elem().Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
if rv.IsNil() {
v = reflect.Zero(rv.Type().Elem()).Interface()
} else {
v = rv.Elem().Interface()
}
}
case reflect.Slice:
// Represent slice types as a pointer to the value.
if rv.Type().Elem().Kind() != reflect.Uint8 {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
}
}
return v
}

965
vendor/github.com/golang/protobuf/proto/lib.go generated vendored Normal file
View File

@ -0,0 +1,965 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; }
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
}
*x = FOO(value)
return nil
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
}
return ""
}
func (m *Test) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return Default_Test_Type
}
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
}
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
}
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := &pb.Test{}
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
package proto
import (
"encoding/json"
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
// Marshal reports this when a required field is not initialized.
// Unmarshal reports this when a required field is missing from the wire data.
type RequiredNotSetError struct{ field string }
func (e *RequiredNotSetError) Error() string {
if e.field == "" {
return fmt.Sprintf("proto: required field not set")
}
return fmt.Sprintf("proto: required field %q not set", e.field)
}
func (e *RequiredNotSetError) RequiredNotSet() bool {
return true
}
type invalidUTF8Error struct{ field string }
func (e *invalidUTF8Error) Error() string {
if e.field == "" {
return "proto: invalid UTF-8 detected"
}
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
}
func (e *invalidUTF8Error) InvalidUTF8() bool {
return true
}
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
// This error should not be exposed to the external API as such errors should
// be recreated with the field information.
var errInvalidUTF8 = &invalidUTF8Error{}
// isNonFatal reports whether the error is either a RequiredNotSet error
// or a InvalidUTF8 error.
func isNonFatal(err error) bool {
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
return true
}
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
return true
}
return false
}
type nonFatal struct{ E error }
// Merge merges err into nf and reports whether it was successful.
// Otherwise it returns false for any fatal non-nil errors.
func (nf *nonFatal) Merge(err error) (ok bool) {
if err == nil {
return true // not an error
}
if !isNonFatal(err) {
return false // fatal error
}
if nf.E == nil {
nf.E = err // store first instance of non-fatal error
}
return true
}
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
deterministic bool
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
// SetDeterministic sets whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexographical order. This is an implementation detail and
// subject to change.
func (p *Buffer) SetDeterministic(deterministic bool) {
p.deterministic = deterministic
}
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := p.index
if index == len(p.buf) {
break
}
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
case WireVarint:
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// mapKeys returns a sort.Interface to be used for sorting the map keys.
// Map fields may have key types of non-float scalars, strings and enums.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{vs: vs}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
case reflect.Bool:
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
case reflect.String:
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
default:
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
const (
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion3 = true
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion1 = true
)
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
// This type is not subject to any compatibility guarantee.
type InternalMessageInfo struct {
marshal *marshalInfo
unmarshal *unmarshalInfo
merge *mergeInfo
discard *discardInfo
}

181
vendor/github.com/golang/protobuf/proto/message_set.go generated vendored Normal file
View File

@ -0,0 +1,181 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"errors"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure messageSet is a Message.
var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *messageSet) Has(pb Message) bool {
return ms.find(pb) != nil
}
func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return errNoMessageTypeID
}
return nil // TODO: return error instead?
}
func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return errNoMessageTypeID
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m = exts.extensionsWrite()
case map[int32]Extension:
m = exts
default:
return errors.New("proto: not an extension map")
}
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}

View File

@ -0,0 +1,360 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"reflect"
"sync"
)
const unsafeAllowed = false
// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// zeroField is a noop when calling pointer.offset.
var zeroField = field([]int{})
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// The pointer type is for the table-driven decoder.
// The implementation here uses a reflect.Value of pointer type to
// create a generic pointer. In pointer_unsafe.go we use unsafe
// instead of reflect to implement the same (but faster) interface.
type pointer struct {
v reflect.Value
}
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
return pointer{v: reflect.ValueOf(*i)}
}
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
if deref {
u = u.Elem()
}
return pointer{v: u}
}
// valToPointer converts v to a pointer. v must be of pointer type.
func valToPointer(v reflect.Value) pointer {
return pointer{v: v}
}
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
}
func (p pointer) isNil() bool {
return p.v.IsNil()
}
// grow updates the slice s in place to make it one element longer.
// s must be addressable.
// Returns the (addressable) new element.
func grow(s reflect.Value) reflect.Value {
n, m := s.Len(), s.Cap()
if n < m {
s.SetLen(n + 1)
} else {
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
}
return s.Index(n)
}
func (p pointer) toInt64() *int64 {
return p.v.Interface().(*int64)
}
func (p pointer) toInt64Ptr() **int64 {
return p.v.Interface().(**int64)
}
func (p pointer) toInt64Slice() *[]int64 {
return p.v.Interface().(*[]int64)
}
var int32ptr = reflect.TypeOf((*int32)(nil))
func (p pointer) toInt32() *int32 {
return p.v.Convert(int32ptr).Interface().(*int32)
}
// The toInt32Ptr/Slice methods don't work because of enums.
// Instead, we must use set/get methods for the int32ptr/slice case.
/*
func (p pointer) toInt32Ptr() **int32 {
return p.v.Interface().(**int32)
}
func (p pointer) toInt32Slice() *[]int32 {
return p.v.Interface().(*[]int32)
}
*/
func (p pointer) getInt32Ptr() *int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().(*int32)
}
// an enum
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
}
func (p pointer) setInt32Ptr(v int32) {
// Allocate value in a *int32. Possibly convert that to a *enum.
// Then assign it to a **int32 or **enum.
// Note: we can convert *int32 to *enum, but we can't convert
// **int32 to **enum!
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
}
// getInt32Slice copies []int32 from p as a new slice.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) getInt32Slice() []int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().([]int32)
}
// an enum
// Allocate a []int32, then assign []enum's values into it.
// Note: we can't convert []enum to []int32.
slice := p.v.Elem()
s := make([]int32, slice.Len())
for i := 0; i < slice.Len(); i++ {
s[i] = int32(slice.Index(i).Int())
}
return s
}
// setInt32Slice copies []int32 into p as a new slice.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) setInt32Slice(v []int32) {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
p.v.Elem().Set(reflect.ValueOf(v))
return
}
// an enum
// Allocate a []enum, then assign []int32's values into it.
// Note: we can't convert []enum to []int32.
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
for i, x := range v {
slice.Index(i).SetInt(int64(x))
}
p.v.Elem().Set(slice)
}
func (p pointer) appendInt32Slice(v int32) {
grow(p.v.Elem()).SetInt(int64(v))
}
func (p pointer) toUint64() *uint64 {
return p.v.Interface().(*uint64)
}
func (p pointer) toUint64Ptr() **uint64 {
return p.v.Interface().(**uint64)
}
func (p pointer) toUint64Slice() *[]uint64 {
return p.v.Interface().(*[]uint64)
}
func (p pointer) toUint32() *uint32 {
return p.v.Interface().(*uint32)
}
func (p pointer) toUint32Ptr() **uint32 {
return p.v.Interface().(**uint32)
}
func (p pointer) toUint32Slice() *[]uint32 {
return p.v.Interface().(*[]uint32)
}
func (p pointer) toBool() *bool {
return p.v.Interface().(*bool)
}
func (p pointer) toBoolPtr() **bool {
return p.v.Interface().(**bool)
}
func (p pointer) toBoolSlice() *[]bool {
return p.v.Interface().(*[]bool)
}
func (p pointer) toFloat64() *float64 {
return p.v.Interface().(*float64)
}
func (p pointer) toFloat64Ptr() **float64 {
return p.v.Interface().(**float64)
}
func (p pointer) toFloat64Slice() *[]float64 {
return p.v.Interface().(*[]float64)
}
func (p pointer) toFloat32() *float32 {
return p.v.Interface().(*float32)
}
func (p pointer) toFloat32Ptr() **float32 {
return p.v.Interface().(**float32)
}
func (p pointer) toFloat32Slice() *[]float32 {
return p.v.Interface().(*[]float32)
}
func (p pointer) toString() *string {
return p.v.Interface().(*string)
}
func (p pointer) toStringPtr() **string {
return p.v.Interface().(**string)
}
func (p pointer) toStringSlice() *[]string {
return p.v.Interface().(*[]string)
}
func (p pointer) toBytes() *[]byte {
return p.v.Interface().(*[]byte)
}
func (p pointer) toBytesSlice() *[][]byte {
return p.v.Interface().(*[][]byte)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return p.v.Interface().(*XXX_InternalExtensions)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return p.v.Interface().(*map[int32]Extension)
}
func (p pointer) getPointer() pointer {
return pointer{v: p.v.Elem()}
}
func (p pointer) setPointer(q pointer) {
p.v.Elem().Set(q.v)
}
func (p pointer) appendPointer(q pointer) {
grow(p.v.Elem()).Set(q.v)
}
// getPointerSlice copies []*T from p as a new []pointer.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) getPointerSlice() []pointer {
if p.v.IsNil() {
return nil
}
n := p.v.Elem().Len()
s := make([]pointer, n)
for i := 0; i < n; i++ {
s[i] = pointer{v: p.v.Elem().Index(i)}
}
return s
}
// setPointerSlice copies []pointer into p as a new []*T.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) setPointerSlice(v []pointer) {
if v == nil {
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
return
}
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
for _, p := range v {
s = reflect.Append(s, p.v)
}
p.v.Elem().Set(s)
}
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
if p.v.Elem().IsNil() {
return pointer{v: p.v.Elem()}
}
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
}
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
// TODO: check that p.v.Type().Elem() == t?
return p.v
}
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
var atomicLock sync.Mutex

View File

@ -0,0 +1,313 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"sync/atomic"
"unsafe"
)
const unsafeAllowed = true
// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// zeroField is a noop when calling pointer.offset.
const zeroField = field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != invalidField
}
// The pointer type below is for the new table-driven encoder/decoder.
// The implementation here uses unsafe.Pointer to create a generic pointer.
// In pointer_reflect.go we use reflect instead of unsafe to implement
// the same (but slower) interface.
type pointer struct {
p unsafe.Pointer
}
// size of pointer
var ptrSize = unsafe.Sizeof(uintptr(0))
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
// Super-tricky - read pointer out of data word of interface value.
// Saves ~25ns over the equivalent:
// return valToPointer(reflect.ValueOf(*i))
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
} else {
// The interface is not of pointer type. The data word is the pointer
// to the data.
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
if deref {
p.p = *(*unsafe.Pointer)(p.p)
}
return p
}
// valToPointer converts v to a pointer. v must be of pointer type.
func valToPointer(v reflect.Value) pointer {
return pointer{p: unsafe.Pointer(v.Pointer())}
}
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
// For safety, we should panic if !f.IsValid, however calling panic causes
// this to no longer be inlineable, which is a serious performance cost.
/*
if !f.IsValid() {
panic("invalid field")
}
*/
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
}
func (p pointer) isNil() bool {
return p.p == nil
}
func (p pointer) toInt64() *int64 {
return (*int64)(p.p)
}
func (p pointer) toInt64Ptr() **int64 {
return (**int64)(p.p)
}
func (p pointer) toInt64Slice() *[]int64 {
return (*[]int64)(p.p)
}
func (p pointer) toInt32() *int32 {
return (*int32)(p.p)
}
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
/*
func (p pointer) toInt32Ptr() **int32 {
return (**int32)(p.p)
}
func (p pointer) toInt32Slice() *[]int32 {
return (*[]int32)(p.p)
}
*/
func (p pointer) getInt32Ptr() *int32 {
return *(**int32)(p.p)
}
func (p pointer) setInt32Ptr(v int32) {
*(**int32)(p.p) = &v
}
// getInt32Slice loads a []int32 from p.
// The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getInt32Slice() []int32 {
return *(*[]int32)(p.p)
}
// setInt32Slice stores a []int32 to p.
// The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setInt32Slice(v []int32) {
*(*[]int32)(p.p) = v
}
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
func (p pointer) appendInt32Slice(v int32) {
s := (*[]int32)(p.p)
*s = append(*s, v)
}
func (p pointer) toUint64() *uint64 {
return (*uint64)(p.p)
}
func (p pointer) toUint64Ptr() **uint64 {
return (**uint64)(p.p)
}
func (p pointer) toUint64Slice() *[]uint64 {
return (*[]uint64)(p.p)
}
func (p pointer) toUint32() *uint32 {
return (*uint32)(p.p)
}
func (p pointer) toUint32Ptr() **uint32 {
return (**uint32)(p.p)
}
func (p pointer) toUint32Slice() *[]uint32 {
return (*[]uint32)(p.p)
}
func (p pointer) toBool() *bool {
return (*bool)(p.p)
}
func (p pointer) toBoolPtr() **bool {
return (**bool)(p.p)
}
func (p pointer) toBoolSlice() *[]bool {
return (*[]bool)(p.p)
}
func (p pointer) toFloat64() *float64 {
return (*float64)(p.p)
}
func (p pointer) toFloat64Ptr() **float64 {
return (**float64)(p.p)
}
func (p pointer) toFloat64Slice() *[]float64 {
return (*[]float64)(p.p)
}
func (p pointer) toFloat32() *float32 {
return (*float32)(p.p)
}
func (p pointer) toFloat32Ptr() **float32 {
return (**float32)(p.p)
}
func (p pointer) toFloat32Slice() *[]float32 {
return (*[]float32)(p.p)
}
func (p pointer) toString() *string {
return (*string)(p.p)
}
func (p pointer) toStringPtr() **string {
return (**string)(p.p)
}
func (p pointer) toStringSlice() *[]string {
return (*[]string)(p.p)
}
func (p pointer) toBytes() *[]byte {
return (*[]byte)(p.p)
}
func (p pointer) toBytesSlice() *[][]byte {
return (*[][]byte)(p.p)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(p.p)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return (*map[int32]Extension)(p.p)
}
// getPointerSlice loads []*T from p as a []pointer.
// The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getPointerSlice() []pointer {
// Super-tricky - p should point to a []*T where T is a
// message type. We load it as []pointer.
return *(*[]pointer)(p.p)
}
// setPointerSlice stores []pointer into p as a []*T.
// The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setPointerSlice(v []pointer) {
// Super-tricky - p should point to a []*T where T is a
// message type. We store it as []pointer.
*(*[]pointer)(p.p) = v
}
// getPointer loads the pointer at p and returns it.
func (p pointer) getPointer() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}
}
// setPointer stores the pointer q at p.
func (p pointer) setPointer(q pointer) {
*(*unsafe.Pointer)(p.p) = q.p
}
// append q to the slice pointed to by p.
func (p pointer) appendPointer(q pointer) {
s := (*[]unsafe.Pointer)(p.p)
*s = append(*s, q.p)
}
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
// Super-tricky - read pointer out of data word of interface value.
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
}
// asPointerTo returns a reflect.Value that is a pointer to an
// object of type t stored at p.
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
return reflect.NewAt(t, p.p)
}
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}

545
vendor/github.com/golang/protobuf/proto/properties.go generated vendored Normal file
View File

@ -0,0 +1,545 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string
WireType int
Tag int
Required bool
Optional bool
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
mtype reflect.Type // set for map types only
MapKeyProp *Properties // set for map types only
MapValProp *Properties // set for map types only
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s += ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != p.OrigName {
s += ",json=" + p.JSONName
}
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
case "fixed32":
p.WireType = WireFixed32
case "fixed64":
p.WireType = WireFixed64
case "zigzag32":
p.WireType = WireVarint
case "zigzag64":
p.WireType = WireVarint
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
outer:
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
p.Optional = true
case f == "rep":
p.Repeated = true
case f == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break outer
}
}
}
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// setFieldProps initializes the field properties for submessages and maps.
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
switch t1 := typ; t1.Kind() {
case reflect.Ptr:
if t1.Elem().Kind() == reflect.Struct {
p.stype = t1.Elem()
}
case reflect.Slice:
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
p.stype = t2.Elem()
}
case reflect.Map:
p.mtype = t1
p.MapKeyProp = &Properties{}
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.MapValProp = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
)
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
p.setFieldProps(typ, f, lockGetProp)
}
var (
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
}
type (
oneofFuncsIface interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
oneofWrappersIface interface {
XXX_OneofWrappers() []interface{}
}
)
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
return prop
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
p.OrigName = oneof
}
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
}
print("\n")
}
}
// Re-order prop.order.
sort.Sort(prop)
var oots []interface{}
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
case oneofFuncsIface:
_, _, _, oots = m.XXX_OneofFuncs()
case oneofWrappersIface:
oots = m.XXX_OneofWrappers()
}
if len(oots) > 0 {
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypedNils[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
// Generated code always calls RegisterType with nil x.
// This check is just for extra safety.
protoTypedNils[name] = x
} else {
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
}
revProtoTypes[t] = name
}
// RegisterMapType is called from generated code and maps from the fully qualified
// proto name to the native map type of the proto map definition.
func RegisterMapType(x interface{}, name string) {
if reflect.TypeOf(x).Kind() != reflect.Map {
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
}
if _, ok := protoMapTypes[name]; ok {
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoMapTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string {
type xname interface {
XXX_MessageName() string
}
if m, ok := x.(xname); ok {
return m.XXX_MessageName()
}
return revProtoTypes[reflect.TypeOf(x)]
}
// MessageType returns the message type (pointer to struct) for a named message.
// The type is not guaranteed to implement proto.Message if the name refers to a
// map entry.
func MessageType(name string) reflect.Type {
if t, ok := protoTypedNils[name]; ok {
return reflect.TypeOf(t)
}
return protoMapTypes[name]
}
// A registry of all linked proto files.
var (
protoFiles = make(map[string][]byte) // file name => fileDescriptor
)
// RegisterFile is called from generated code and maps from the
// full file name of a .proto file to its compressed FileDescriptorProto.
func RegisterFile(filename string, fileDescriptor []byte) {
protoFiles[filename] = fileDescriptor
}
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
func FileDescriptor(filename string) []byte { return protoFiles[filename] }

2776
vendor/github.com/golang/protobuf/proto/table_marshal.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

654
vendor/github.com/golang/protobuf/proto/table_merge.go generated vendored Normal file
View File

@ -0,0 +1,654 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
// Merge merges the src message into dst.
// This assumes that dst and src of the same type and are non-nil.
func (a *InternalMessageInfo) Merge(dst, src Message) {
mi := atomicLoadMergeInfo(&a.merge)
if mi == nil {
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
atomicStoreMergeInfo(&a.merge, mi)
}
mi.merge(toPointer(&dst), toPointer(&src))
}
type mergeInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []mergeFieldInfo
unrecognized field // Offset of XXX_unrecognized
}
type mergeFieldInfo struct {
field field // Offset of field, guaranteed to be valid
// isPointer reports whether the value in the field is a pointer.
// This is true for the following situations:
// * Pointer to struct
// * Pointer to basic type (proto2 only)
// * Slice (first value in slice header is a pointer)
// * String (first value in string header is a pointer)
isPointer bool
// basicWidth reports the width of the field assuming that it is directly
// embedded in the struct (as is the case for basic types in proto3).
// The possible values are:
// 0: invalid
// 1: bool
// 4: int32, uint32, float32
// 8: int64, uint64, float64
basicWidth int
// Where dst and src are pointers to the types being merged.
merge func(dst, src pointer)
}
var (
mergeInfoMap = map[reflect.Type]*mergeInfo{}
mergeInfoLock sync.Mutex
)
func getMergeInfo(t reflect.Type) *mergeInfo {
mergeInfoLock.Lock()
defer mergeInfoLock.Unlock()
mi := mergeInfoMap[t]
if mi == nil {
mi = &mergeInfo{typ: t}
mergeInfoMap[t] = mi
}
return mi
}
// merge merges src into dst assuming they are both of type *mi.typ.
func (mi *mergeInfo) merge(dst, src pointer) {
if dst.isNil() {
panic("proto: nil destination")
}
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&mi.initialized) == 0 {
mi.computeMergeInfo()
}
for _, fi := range mi.fields {
sfp := src.offset(fi.field)
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
continue
}
if fi.basicWidth > 0 {
switch {
case fi.basicWidth == 1 && !*sfp.toBool():
continue
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
continue
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
continue
}
}
}
dfp := dst.offset(fi.field)
fi.merge(dfp, sfp)
}
// TODO: Make this faster?
out := dst.asPointerTo(mi.typ).Elem()
in := src.asPointerTo(mi.typ).Elem()
if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
if mi.unrecognized.IsValid() {
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
}
}
}
func (mi *mergeInfo) computeMergeInfo() {
mi.lock.Lock()
defer mi.lock.Unlock()
if mi.initialized != 0 {
return
}
t := mi.typ
n := t.NumField()
props := GetProperties(t)
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mfi := mergeFieldInfo{field: toField(&f)}
tf := f.Type
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
switch tf.Kind() {
case reflect.Ptr, reflect.Slice, reflect.String:
// As a special case, we assume slices and strings are pointers
// since we know that the first field in the SliceSlice or
// StringHeader is a data pointer.
mfi.isPointer = true
case reflect.Bool:
mfi.basicWidth = 1
case reflect.Int32, reflect.Uint32, reflect.Float32:
mfi.basicWidth = 4
case reflect.Int64, reflect.Uint64, reflect.Float64:
mfi.basicWidth = 8
}
}
// Unwrap tf to get at its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic("both pointer and slice for basic type in " + tf.Name())
}
switch tf.Kind() {
case reflect.Int32:
switch {
case isSlice: // E.g., []int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
/*
sfsp := src.toInt32Slice()
if *sfsp != nil {
dfsp := dst.toInt32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
*/
sfs := src.getInt32Slice()
if sfs != nil {
dfs := dst.getInt32Slice()
dfs = append(dfs, sfs...)
if dfs == nil {
dfs = []int32{}
}
dst.setInt32Slice(dfs)
}
}
case isPointer: // E.g., *int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
/*
sfpp := src.toInt32Ptr()
if *sfpp != nil {
dfpp := dst.toInt32Ptr()
if *dfpp == nil {
*dfpp = Int32(**sfpp)
} else {
**dfpp = **sfpp
}
}
*/
sfp := src.getInt32Ptr()
if sfp != nil {
dfp := dst.getInt32Ptr()
if dfp == nil {
dst.setInt32Ptr(*sfp)
} else {
*dfp = *sfp
}
}
}
default: // E.g., int32
mfi.merge = func(dst, src pointer) {
if v := *src.toInt32(); v != 0 {
*dst.toInt32() = v
}
}
}
case reflect.Int64:
switch {
case isSlice: // E.g., []int64
mfi.merge = func(dst, src pointer) {
sfsp := src.toInt64Slice()
if *sfsp != nil {
dfsp := dst.toInt64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
}
case isPointer: // E.g., *int64
mfi.merge = func(dst, src pointer) {
sfpp := src.toInt64Ptr()
if *sfpp != nil {
dfpp := dst.toInt64Ptr()
if *dfpp == nil {
*dfpp = Int64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., int64
mfi.merge = func(dst, src pointer) {
if v := *src.toInt64(); v != 0 {
*dst.toInt64() = v
}
}
}
case reflect.Uint32:
switch {
case isSlice: // E.g., []uint32
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint32Slice()
if *sfsp != nil {
dfsp := dst.toUint32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint32{}
}
}
}
case isPointer: // E.g., *uint32
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint32Ptr()
if *sfpp != nil {
dfpp := dst.toUint32Ptr()
if *dfpp == nil {
*dfpp = Uint32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint32
mfi.merge = func(dst, src pointer) {
if v := *src.toUint32(); v != 0 {
*dst.toUint32() = v
}
}
}
case reflect.Uint64:
switch {
case isSlice: // E.g., []uint64
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint64Slice()
if *sfsp != nil {
dfsp := dst.toUint64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint64{}
}
}
}
case isPointer: // E.g., *uint64
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint64Ptr()
if *sfpp != nil {
dfpp := dst.toUint64Ptr()
if *dfpp == nil {
*dfpp = Uint64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint64
mfi.merge = func(dst, src pointer) {
if v := *src.toUint64(); v != 0 {
*dst.toUint64() = v
}
}
}
case reflect.Float32:
switch {
case isSlice: // E.g., []float32
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat32Slice()
if *sfsp != nil {
dfsp := dst.toFloat32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float32{}
}
}
}
case isPointer: // E.g., *float32
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat32Ptr()
if *sfpp != nil {
dfpp := dst.toFloat32Ptr()
if *dfpp == nil {
*dfpp = Float32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float32
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat32(); v != 0 {
*dst.toFloat32() = v
}
}
}
case reflect.Float64:
switch {
case isSlice: // E.g., []float64
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat64Slice()
if *sfsp != nil {
dfsp := dst.toFloat64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float64{}
}
}
}
case isPointer: // E.g., *float64
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat64Ptr()
if *sfpp != nil {
dfpp := dst.toFloat64Ptr()
if *dfpp == nil {
*dfpp = Float64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float64
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat64(); v != 0 {
*dst.toFloat64() = v
}
}
}
case reflect.Bool:
switch {
case isSlice: // E.g., []bool
mfi.merge = func(dst, src pointer) {
sfsp := src.toBoolSlice()
if *sfsp != nil {
dfsp := dst.toBoolSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []bool{}
}
}
}
case isPointer: // E.g., *bool
mfi.merge = func(dst, src pointer) {
sfpp := src.toBoolPtr()
if *sfpp != nil {
dfpp := dst.toBoolPtr()
if *dfpp == nil {
*dfpp = Bool(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., bool
mfi.merge = func(dst, src pointer) {
if v := *src.toBool(); v {
*dst.toBool() = v
}
}
}
case reflect.String:
switch {
case isSlice: // E.g., []string
mfi.merge = func(dst, src pointer) {
sfsp := src.toStringSlice()
if *sfsp != nil {
dfsp := dst.toStringSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []string{}
}
}
}
case isPointer: // E.g., *string
mfi.merge = func(dst, src pointer) {
sfpp := src.toStringPtr()
if *sfpp != nil {
dfpp := dst.toStringPtr()
if *dfpp == nil {
*dfpp = String(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., string
mfi.merge = func(dst, src pointer) {
if v := *src.toString(); v != "" {
*dst.toString() = v
}
}
}
case reflect.Slice:
isProto3 := props.Prop[i].proto3
switch {
case isPointer:
panic("bad pointer in byte slice case in " + tf.Name())
case tf.Elem().Kind() != reflect.Uint8:
panic("bad element kind in byte slice case in " + tf.Name())
case isSlice: // E.g., [][]byte
mfi.merge = func(dst, src pointer) {
sbsp := src.toBytesSlice()
if *sbsp != nil {
dbsp := dst.toBytesSlice()
for _, sb := range *sbsp {
if sb == nil {
*dbsp = append(*dbsp, nil)
} else {
*dbsp = append(*dbsp, append([]byte{}, sb...))
}
}
if *dbsp == nil {
*dbsp = [][]byte{}
}
}
}
default: // E.g., []byte
mfi.merge = func(dst, src pointer) {
sbp := src.toBytes()
if *sbp != nil {
dbp := dst.toBytes()
if !isProto3 || len(*sbp) > 0 {
*dbp = append([]byte{}, *sbp...)
}
}
}
}
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("message field %s without pointer", tf))
case isSlice: // E.g., []*pb.T
mi := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sps := src.getPointerSlice()
if sps != nil {
dps := dst.getPointerSlice()
for _, sp := range sps {
var dp pointer
if !sp.isNil() {
dp = valToPointer(reflect.New(tf))
mi.merge(dp, sp)
}
dps = append(dps, dp)
}
if dps == nil {
dps = []pointer{}
}
dst.setPointerSlice(dps)
}
}
default: // E.g., *pb.T
mi := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sp := src.getPointer()
if !sp.isNil() {
dp := dst.getPointer()
if dp.isNil() {
dp = valToPointer(reflect.New(tf))
dst.setPointer(dp)
}
mi.merge(dp, sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic("bad pointer or slice in map case in " + tf.Name())
default: // E.g., map[K]V
mfi.merge = func(dst, src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
dm := dst.asPointerTo(tf).Elem()
if dm.IsNil() {
dm.Set(reflect.MakeMap(tf))
}
switch tf.Elem().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(Clone(val.Interface().(Message)))
dm.SetMapIndex(key, val)
}
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
dm.SetMapIndex(key, val)
}
default: // Basic type (e.g., string)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
dm.SetMapIndex(key, val)
}
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic("bad pointer or slice in interface case in " + tf.Name())
default: // E.g., interface{}
// TODO: Make this faster?
mfi.merge = func(dst, src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
du := dst.asPointerTo(tf).Elem()
typ := su.Elem().Type()
if du.IsNil() || du.Elem().Type() != typ {
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
}
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
dv := du.Elem().Elem().Field(0)
if dv.Kind() == reflect.Ptr && dv.IsNil() {
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
Merge(dv.Interface().(Message), sv.Interface().(Message))
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
default: // Basic type (e.g., string)
dv.Set(sv)
}
}
}
}
default:
panic(fmt.Sprintf("merger not found for type:%s", tf))
}
mi.fields = append(mi.fields, mfi)
}
mi.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
mi.unrecognized = toField(&f)
}
atomic.StoreInt32(&mi.initialized, 1)
}

File diff suppressed because it is too large Load Diff

843
vendor/github.com/golang/protobuf/proto/text.go generated vendored Normal file
View File

@ -0,0 +1,843 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Print("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// isAny reports whether sv is a google.protobuf.Any message
func isAny(sv reflect.Value) bool {
type wkt interface {
XXX_WellKnownType() string
}
t, ok := sv.Addr().Interface().(wkt)
return ok && t.XXX_WellKnownType() == "Any"
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
turl := sv.FieldByName("TypeUrl")
val := sv.FieldByName("Value")
if !turl.IsValid() || !val.IsValid() {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
b, ok := val.Interface().([]byte)
if !ok {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
parts := strings.Split(turl.String(), "/")
mt := MessageType(parts[len(parts)-1])
if mt == nil {
return false, nil
}
m := reflect.New(mt.Elem())
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
return false, nil
}
w.Write([]byte("["))
u := turl.String()
if requiresQuotes(u) {
writeString(w, u)
} else {
w.Write([]byte(u))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.ind++
}
if err := tm.writeStruct(w, m.Elem()); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.ind--
w.Write([]byte(">\n"))
}
return true, nil
}
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
if tm.ExpandAny && isAny(sv) {
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
return err
}
}
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if name == "XXX_NoUnkeyedLiteral" {
continue
}
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := tm.writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
if isProto3Zero(fv) {
continue
}
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if _, err := extendable(pv.Interface()); err == nil {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Bytes())); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if v.CanAddr() {
// Calling v.Interface on a struct causes the reflect package to
// copy the entire struct. This is racy with the new Marshaler
// since we atomically update the XXX_sizecache.
//
// Thus, we retrieve a pointer to the struct if possible to avoid
// a race since v.Interface on the pointer doesn't copy the struct.
//
// If v is not addressable, then we are not worried about a race
// since it implies that the binary Marshaler cannot possibly be
// mutating this value.
v = v.Addr()
}
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if err := tm.writeStruct(w, v); err != nil {
return err
}
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep, _ := extendable(pv.Interface())
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m, mu := ep.extensionsRead()
if m == nil {
return nil
}
mu.Lock()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
mu.Unlock()
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line).
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes a given protocol buffer in text format.
// The only errors returned are from w.
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: tm.Compact,
}
if etm, ok := pb.(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := tm.writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Text is the same as Marshal, but returns the string directly.
func (tm *TextMarshaler) Text(pb Message) string {
var buf bytes.Buffer
tm.Marshal(&buf, pb)
return buf.String()
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// TODO: consider removing some of the Marshal functions below.
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }

880
vendor/github.com/golang/protobuf/proto/text_parser.go generated vendored Normal file
View File

@ -0,0 +1,880 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
// Error string emitted when deserializing Any and fields are already set
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
ss := string(r) + s[:2]
s = s[2:]
i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
case 'x', 'X', 'u', 'U':
var n int
switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
ss := s[:n]
s = s[n:]
i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(i), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension or an Any.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
extName, err := p.consumeExtName()
if err != nil {
return err
}
if s := strings.LastIndex(extName, "/"); s >= 0 {
// If it contains a slash, it's an Any type URL.
messageName := extName[s+1:]
mt := MessageType(messageName)
if mt == nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
}
tok = p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
v := reflect.New(mt.Elem())
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
return pe
}
b, err := Marshal(v.Interface().(Message))
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
}
if fieldSet["type_url"] {
return p.errorf(anyRepeatedlyUnpacked, "type_url")
}
if fieldSet["value"] {
return p.errorf(anyRepeatedlyUnpacked, "value")
}
sv.FieldByName("TypeUrl").SetString(extName)
sv.FieldByName("Value").SetBytes(b)
fieldSet["type_url"] = true
fieldSet["value"] = true
continue
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == extName {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", extName)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(Message)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
field := sv.Field(oop.Field)
if !field.IsNil() {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
}
field.Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order. See b/28924776 for a time
// this went wrong.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.MapKeyProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.MapValProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
default:
p.back()
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
// consumeExtName consumes extension name or expanded Any type URL and the
// following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field.
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
}
// One value of the repeated field.
p.back()
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// true/1/t/True or false/f/0/False.
switch tok.value {
case "true", "1", "t", "True":
fv.SetBool(true)
return nil
case "false", "0", "f", "False":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
return um.UnmarshalText([]byte(s))
}
pb.Reset()
v := reflect.ValueOf(pb)
return newTextParser(s).readStruct(v.Elem(), "")
}

201
vendor/github.com/jackspirou/syscerts/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

42
vendor/github.com/jackspirou/syscerts/README.md generated vendored Normal file
View File

@ -0,0 +1,42 @@
# syscerts
Gather local system certificates in Go via a public `SystemRootsPool` method.
#### What does this do?
Provide a way to gather local system certificates
on different OS platforms.
#### How does it do it?
It uses the `crypto/x509` package and provides a single public method called
`SystemRootsPool()` to return a `*x509.CertPool` object.
#### How do you use it?
```Go
// gather CA certs
certpool := syscerts.SystemRootsPool()
// place them in an HTTP client for trusted SSL/TLS connections
tlsConfig := &tls.Config{RootCAs: certpool}
transport := &http.Transport{TLSClientConfig: tlsConfig}
client := &http.Client{Transport: transport}
// make a request
resp, err := client.Do(req)
```
#### Why even do it?
The `crypto/x509` package already has a `systemRootsPool` method.
The `crypto/x509.systemRootsPool` method is almost the same as
`github.com/jackspirou/syscerts.SystemRootsPool`.
The difference? The `crypto/x509.systemRootsPool` method is private so you
cannot access it. :(
There are plans for the `crypto/x509.systemRootsPool` method to become public
in Go 1.7. When this happens you might no longer need `github.com/jackspirou/syscerts.SystemRootsPool`.
The only reason you may still use this package after the Go 1.7 release might
be for the Mac OSX System Keychain certs which are not included in the
`crypto/x509` package. Relevant lines below:
* https://github.com/jackspirou/syscerts/blob/master/root_darwin.go#L24-L32
Find more about this Go issue here: https://github.com/golang/go/issues/13335

22
vendor/github.com/jackspirou/syscerts/root.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscerts
import (
"crypto/x509"
"sync"
)
var (
once sync.Once
systemRoots *x509.CertPool
)
// SystemRootsPool attempts to find and return a pool of all all installed
// system certificates.
func SystemRootsPool() *x509.CertPool {
once.Do(initSystemRoots)
return systemRoots
}

14
vendor/github.com/jackspirou/syscerts/root_bsd.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build dragonfly freebsd netbsd openbsd
package syscerts
// Possible certificate files; stop after finding one.
var certFiles = []string{
"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD/DragonFly
"/etc/ssl/cert.pem", // OpenBSD
"/etc/openssl/certs/ca-certificates.crt", // NetBSD
}

View File

@ -0,0 +1,85 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build cgo,darwin,!arm,!arm64,!ios
package syscerts
/*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <CoreFoundation/CoreFoundation.h>
#include <Security/Security.h>
// FetchPEMRootsC fetches the system's list of trusted X.509 root certificates.
//
// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
// certificates of the system. On failure, the function returns -1.
//
// Note: The CFDataRef returned in pemRoots must be released (using CFRelease) after
// we've consumed its content.
int FetchPEMRootsC(CFDataRef *pemRoots) {
if (pemRoots == NULL) {
return -1;
}
CFArrayRef certs = NULL;
OSStatus err = SecTrustCopyAnchorCertificates(&certs);
if (err != noErr) {
return -1;
}
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
int i, ncerts = CFArrayGetCount(certs);
for (i = 0; i < ncerts; i++) {
CFDataRef data = NULL;
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
if (cert == NULL) {
continue;
}
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
// Once we support weak imports via cgo we should prefer that, and fall back to this
// for older systems.
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
if (data != NULL) {
CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
CFRelease(data);
}
}
CFRelease(certs);
*pemRoots = combinedData;
return 0;
}
*/
/*
import "C"
import (
"crypto/x509"
"unsafe"
)
func initSystemRoots() {
roots := x509.NewCertPool()
var data C.CFDataRef = nil
err := C.FetchPEMRootsC(&data)
if err == -1 {
return
}
defer C.CFRelease(C.CFTypeRef(data))
buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data)))
roots.AppendCertsFromPEM(buf)
systemRoots = roots
}
*/

39
vendor/github.com/jackspirou/syscerts/root_darwin.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run root_darwin_arm_gen.go -output root_darwin_armx.go
package syscerts
import (
"crypto/x509"
"os"
"os/exec"
)
func execSecurityRoots() (*x509.CertPool, error) {
roots := x509.NewCertPool()
cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", "/System/Library/Keychains/SystemRootCertificates.keychain")
data, err := cmd.Output()
if err != nil {
return nil, err
}
roots.AppendCertsFromPEM(data)
// if available add the Mac OSX System Keychain
if _, err := os.Stat("/Library/Keychains/System.keychain"); err == nil {
cmd = exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", "/Library/Keychains/System.keychain")
data, err = cmd.Output()
if err != nil {
return nil, err
}
roots.AppendCertsFromPEM(data)
}
return roots, nil
}
func initSystemRoots() {
systemRoots, _ = execSecurityRoots()
}

4909
vendor/github.com/jackspirou/syscerts/root_darwin_armx.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

13
vendor/github.com/jackspirou/syscerts/root_linux.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscerts
// Possible certificate files; stop after finding one.
var certFiles = []string{
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
"/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/pki/tls/cacert.pem", // OpenELEC
}

8
vendor/github.com/jackspirou/syscerts/root_nacl.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscerts
// Possible certificate files; stop after finding one.
var certFiles = []string{}

32
vendor/github.com/jackspirou/syscerts/root_plan9.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build plan9
package syscerts
import (
"crypto/x509"
"io/ioutil"
)
// Possible certificate files; stop after finding one.
var certFiles = []string{
"/sys/lib/tls/ca.pem",
}
func initSystemRoots() {
roots := x509.NewCertPool()
for _, file := range certFiles {
data, err := ioutil.ReadFile(file)
if err == nil {
roots.AppendCertsFromPEM(data)
systemRoots = roots
return
}
}
// All of the files failed to load. systemRoots will be nil which will
// trigger a specific error at verification time.
}

12
vendor/github.com/jackspirou/syscerts/root_solaris.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscerts
// Possible certificate files; stop after finding one.
var certFiles = []string{
"/etc/certs/ca-certificates.crt", // Solaris 11.2+
"/etc/ssl/certs/ca-certificates.crt", // Joyent SmartOS
"/etc/ssl/cacert.pem", // OmniOS
}

52
vendor/github.com/jackspirou/syscerts/root_unix.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build dragonfly freebsd linux nacl netbsd openbsd solaris
package syscerts
import (
"crypto/x509"
"io/ioutil"
)
// Possible directories with certificate files; stop after successfully
// reading at least one file from a directory.
var certDirectories = []string{
"/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139
"/system/etc/security/cacerts", // Android
}
func initSystemRoots() {
roots := x509.NewCertPool()
for _, file := range certFiles {
data, err := ioutil.ReadFile(file)
if err == nil {
roots.AppendCertsFromPEM(data)
systemRoots = roots
return
}
}
for _, directory := range certDirectories {
fis, err := ioutil.ReadDir(directory)
if err != nil {
continue
}
rootsAdded := false
for _, fi := range fis {
data, err := ioutil.ReadFile(directory + "/" + fi.Name())
if err == nil && roots.AppendCertsFromPEM(data) {
rootsAdded = true
}
}
if rootsAdded {
systemRoots = roots
return
}
}
// All of the files failed to load. systemRoots will be nil which will
// trigger a specific error at verification time.
}

40
vendor/github.com/jackspirou/syscerts/root_windows.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscerts
import (
"crypto/x509"
"errors"
"syscall"
"unsafe"
)
// extractSimpleChain extracts the final certificate chain from a CertSimpleChain.
func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*x509.Certificate, err error) {
if simpleChain == nil || count == 0 {
return nil, errors.New("x509: invalid simple chain")
}
simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:]
lastChain := simpleChains[count-1]
elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:]
for i := 0; i < int(lastChain.NumElements); i++ {
// Copy the buf, since ParseCertificate does not create its own copy.
cert := elements[i].CertContext
encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
buf := make([]byte, cert.Length)
copy(buf, encodedCert[:])
parsedCert, err := x509.ParseCertificate(buf)
if err != nil {
return nil, err
}
chain = append(chain, parsedCert)
}
return chain, nil
}
func initSystemRoots() {
}

Some files were not shown because too many files have changed in this diff Show More