1
0
mirror of https://github.com/Chipazawra/v8-1c-cluster-pde.git synced 2024-11-24 08:22:19 +02:00

refactor project struct

This commit is contained in:
Anton 2021-12-30 16:59:13 +03:00
parent a2d21e95c1
commit fab49c402d
823 changed files with 116 additions and 269298 deletions

3
.env
View File

@ -4,7 +4,8 @@ RAS_HOST=192.168.10.10
RAS_PORT=2545
CLS_USER=admin
CLS_PASS=admin
MODE=push
MODE=pull
PULL_EXPOSE=9096
PUSH_INTERVAL=500
PUSH_HOST=pushgateway
PUSH_PORT=9091

4
go.mod
View File

@ -12,8 +12,6 @@ require (
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/golang/protobuf v1.4.3 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
google.golang.org/protobuf v1.26.0-rc.1 // indirect
)
@ -26,6 +24,8 @@ require (
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.26.0
github.com/satori/go.uuid v1.2.0 // indirect
github.com/stretchr/testify v1.7.0 // indirect
github.com/xelaj/go-dry v0.0.0-20201004191957-aab3eecf0604 // indirect

View File

@ -5,17 +5,15 @@ import (
"flag"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/Chipazawra/v8-1c-cluster-pde/internal/puller"
pusher "github.com/Chipazawra/v8-1c-cluster-pde/internal/pusher"
"github.com/Chipazawra/v8-1c-cluster-pde/internal/rpHostsCollector"
"github.com/caarlos0/env"
rascli "github.com/khorevaa/ras-client"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
@ -24,11 +22,11 @@ const (
)
var (
conf Config
conf AppConfig
RAS_HOST string
RAS_PORT string
PULL_EXPOSE string
MODE string
PULL_EXPOSE string
PUSH_INTERVAL int
PUSH_HOST string
PUSH_PORT string
@ -92,6 +90,8 @@ func Run() error {
fmt.Sprintf("%s:%s", conf.RAS_HOST, conf.RAS_PORT),
)
rhc := rpHostsCollector.New(rcli)
ctx, cancel := context.WithCancel(context.Background())
sigchan := make(chan os.Signal, 1)
@ -101,17 +101,26 @@ func Run() error {
signal.Notify(sigchan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
var collecter Collecter
switch conf.MODE {
case push:
go RunPusher(ctx, errchan, rcli)
collecter = pusher.New(rhc, pusher.WithConfig(
pusher.PusherConfig{
PUSH_INTERVAL: conf.PUSH_INTERVAL,
PUSH_HOST: conf.PUSH_HOST,
PUSH_PORT: conf.PUSH_PORT,
}))
case pull:
go RunPuller(ctx, errchan, rcli)
default:
{
errchan <- fmt.Errorf("v8-1c-cluster-pde: %v", "undefined mode")
}
collecter = puller.New(rhc, puller.WithConfig(
puller.PullerConfig{
PULL_EXPOSE: conf.PULL_EXPOSE,
}))
}
log.Printf("v8-1c-cluster-pde: runing in %v mode", conf.MODE)
go collecter.Run(ctx, errchan)
select {
case sig := <-sigchan:
cancel()
@ -123,45 +132,6 @@ func Run() error {
}
}
func RunPuller(ctx context.Context, errchan chan<- error, rasapi rascli.Api) {
log.Printf("v8-1c-cluster-pde: runing in %v mode", conf.MODE)
promRegistry := prometheus.NewRegistry()
promRegistry.MustRegister(rpHostsCollector.New(rasapi))
mux := http.NewServeMux()
mux.Handle("/metrics",
promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}),
)
srv := http.Server{
Addr: fmt.Sprintf("%s:%s", "", conf.PULL_EXPOSE),
Handler: mux,
}
go func() {
errchan <- srv.ListenAndServe()
}()
log.Printf("v8-1c-cluster-pde: listen %v", fmt.Sprintf("%s:%s", "", conf.PULL_EXPOSE))
<-ctx.Done()
if err := srv.Shutdown(context.Background()); err != nil {
errchan <- fmt.Errorf("v8-1c-cluster-pde: server shutdown with err: %v", err)
}
log.Printf("v8-1c-cluster-pde: server shutdown")
}
func RunPusher(ctx context.Context, errchan chan<- error, rasapi rascli.Api) {
log.Printf("v8-1c-cluster-pde: runing in %v mode %v\n",
conf.MODE, fmt.Sprintf("%s:%s", conf.PUSH_HOST, conf.PUSH_PORT))
go pusher.New(
rpHostsCollector.New(rasapi),
fmt.Sprintf("%s:%s", conf.PUSH_HOST, conf.PUSH_PORT),
pusher.WithInterval(500),
).Run(ctx, errchan)
}
type RASCollector interface {
Run(ctx context.Context, errchan chan<- error, rasapi rascli.Api)
type Collecter interface {
Run(ctx context.Context, errchan chan<- error)
}

View File

@ -1,13 +1,13 @@
package app
type Config struct {
type AppConfig struct {
RAS_HOST string `env:"RAS_HOST" envDefault:"localhost"`
RAS_PORT string `env:"RAS_PORT" envDefault:"1545"`
CLS_USER string `env:"CLS_USER"`
CLS_PASS string `env:"CLS_PASS"`
PULL_EXPOSE string `env:"PULL_EXPOSE" envDefault:"9096"`
MODE string `env:"MODE" envDefault:"pull"`
PUSH_INTERVAL int `env:"PUSH_INTERVAL" envDefault:"500"`
PUSH_HOST string `env:"PUSH_HOST" envDefault:"localhost"`
PUSH_PORT string `env:"PUSH_PORT" envDefault:"9091"`
PULL_EXPOSE string `env:"PULL_EXPOSE" envDefault:"9096"`
}

View File

@ -0,0 +1,5 @@
package puller
type PullerConfig struct {
PULL_EXPOSE string `env:"PULL_EXPOSE" envDefault:"9096"`
}

64
internal/puller/puller.go Normal file
View File

@ -0,0 +1,64 @@
package puller
import (
"context"
"fmt"
"log"
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type Puller struct {
collector prometheus.Collector
expose string
}
type PullerOption func(*Puller)
func WithConfig(config PullerConfig) PullerOption {
return func(p *Puller) {
p.expose = config.PULL_EXPOSE
}
}
func New(collector prometheus.Collector, opts ...PullerOption) *Puller {
p := &Puller{
collector: collector,
}
for _, opt := range opts {
opt(p)
}
return p
}
func (p *Puller) Run(ctx context.Context, errchan chan<- error) {
promRegistry := prometheus.NewRegistry()
promRegistry.MustRegister(p.collector)
mux := http.NewServeMux()
mux.Handle("/metrics",
promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}),
)
srv := http.Server{
Addr: fmt.Sprintf("%s:%s", "", p.expose),
Handler: mux,
}
go func() {
errchan <- srv.ListenAndServe()
}()
log.Printf("v8-1c-cluster-pde: puller listen %v", fmt.Sprintf("%s:%s", "", p.expose))
<-ctx.Done()
if err := srv.Shutdown(context.Background()); err != nil {
errchan <- fmt.Errorf("v8-1c-cluster-pde: puller server shutdown with err: %v", err)
}
log.Printf("v8-1c-cluster-pde: puller server shutdown")
}

View File

@ -0,0 +1,7 @@
package pusher
type PusherConfig struct {
PUSH_INTERVAL int `env:"PUSH_INTERVAL" envDefault:"500"`
PUSH_HOST string `env:"PUSH_HOST" envDefault:"localhost"`
PUSH_PORT string `env:"PUSH_PORT" envDefault:"9091"`
}

View File

@ -37,12 +37,17 @@ func WithJobName(Name string) PusherOption {
}
}
func New(collector prometheus.Collector, url string, opts ...PusherOption) *Pusher {
func WithConfig(config PusherConfig) PusherOption {
return func(p *Pusher) {
p.url = fmt.Sprintf("%s:%s", config.PUSH_HOST, config.PUSH_PORT)
p.intervalMillis = config.PUSH_INTERVAL
}
}
func New(collector prometheus.Collector, opts ...PusherOption) *Pusher {
p := &Pusher{
collector: collector,
intervalMillis: defaultIntervalMillis,
url: url,
jobName: defaultJobname,
}
@ -50,13 +55,13 @@ func New(collector prometheus.Collector, url string, opts ...PusherOption) *Push
opt(p)
}
p.pusher = push.New(url, p.jobName).Collector(collector)
p.pusher = push.New(p.url, p.jobName).Collector(collector)
return p
}
func (p *Pusher) Run(ctx context.Context, errchan chan<- error) {
log.Printf("v8-1c-cluster-pde: pusher %v", p.url)
ticker := time.NewTicker(time.Duration(p.intervalMillis * int(time.Microsecond)))
Loop:
for {

BIN
main

Binary file not shown.

View File

@ -1,20 +0,0 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

File diff suppressed because it is too large Load Diff

View File

@ -1,316 +0,0 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targetMap map[float64]float64) *Stream {
// Convert map to slice to avoid slow iterations on a map.
// ƒ is called on the hot path, so converting the map to a slice
// beforehand results in significant CPU savings.
targets := targetMapToSlice(targetMap)
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for _, t := range targets {
if t.quantile*s.n <= r {
f = (2 * t.epsilon * r) / t.quantile
} else {
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
type target struct {
quantile float64
epsilon float64
}
func targetMapToSlice(targetMap map[float64]float64) []target {
targets := make([]target, 0, len(targetMap))
for quantile, epsilon := range targetMap {
t := target{
quantile: quantile,
epsilon: epsilon,
}
targets = append(targets, t)
}
return targets
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

View File

@ -1 +0,0 @@
coverage.out

View File

@ -1,2 +0,0 @@
go:
enabled: true

View File

@ -1,19 +0,0 @@
language: go
go:
- 1.5
- 1.6
- 1.7
- 1.8
- 1.9
- '1.10.x'
- '1.11.x'
- tip
before_install:
- go get github.com/axw/gocov/gocov
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- go test -v -cover -race -coverprofile=coverage.out
after_script:
- go get github.com/mattn/goveralls
- goveralls -coverprofile=coverage.out -service=travis-ci -repotoken='eCcizKmTdSaJCz8Ih33WDppdqb9kioYwi'

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015-2016 Carlos Alexandro Becker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,119 +0,0 @@
# env [![Build Status](https://travis-ci.org/caarlos0/env.svg?branch=master)](https://travis-ci.org/caarlos0/env) [![Coverage Status](https://coveralls.io/repos/caarlos0/env/badge.svg?branch=master&service=github)](https://coveralls.io/github/caarlos0/env?branch=master) [![](https://godoc.org/github.com/caarlos0/env?status.svg)](http://godoc.org/github.com/caarlos0/env) [![](http://goreportcard.com/badge/caarlos0/env)](http://goreportcard.com/report/caarlos0/env) [![SayThanks.io](https://img.shields.io/badge/SayThanks.io-%E2%98%BC-1EAEDB.svg?style=flat-square)](https://saythanks.io/to/caarlos0)
A KISS way to deal with environment variables in Go.
## Why
At first, it was boring for me to write down an entire function just to
get some `var` from the environment and default to another in case it's missing.
For that manner, I wrote a `GetOr` function in the
[go-idioms](https://github.com/caarlos0/go-idioms) project.
Then, I got pissed about writing `os.Getenv`, `os.Setenv`, `os.Unsetenv`...
it kind of make more sense to me write it as `env.Get`, `env.Set`, `env.Unset`.
So I did.
Then I got a better idea: to use `struct` tags to do all that work for me.
## Example
A very basic example (check the `examples` folder):
```go
package main
import (
"fmt"
"time"
"github.com/caarlos0/env"
)
type config struct {
Home string `env:"HOME"`
Port int `env:"PORT" envDefault:"3000"`
IsProduction bool `env:"PRODUCTION"`
Hosts []string `env:"HOSTS" envSeparator:":"`
Duration time.Duration `env:"DURATION"`
TempFolder string `env:"TEMP_FOLDER" envDefault:"${HOME}/tmp" envExpand:"true"`
}
func main() {
cfg := config{}
err := env.Parse(&cfg)
if err != nil {
fmt.Printf("%+v\n", err)
}
fmt.Printf("%+v\n", cfg)
}
```
You can run it like this:
```sh
$ PRODUCTION=true HOSTS="host1:host2:host3" DURATION=1s go run examples/first.go
{Home:/your/home Port:3000 IsProduction:true Hosts:[host1 host2 host3] Duration:1s}
```
## Supported types and defaults
The library has built-in support for the following types:
* `string`
* `int`
* `uint`
* `int64`
* `bool`
* `float32`
* `float64`
* `time.Duration`
* `[]string`
* `[]int`
* `[]bool`
* `[]float32`
* `[]float64`
* `[]time.Duration`
* .. or use/define a [custom parser func](#custom-parser-funcs) for any other type
If you set the `envDefault` tag for something, this value will be used in the
case of absence of it in the environment. If you don't do that AND the
environment variable is also not set, the zero-value
of the type will be used: empty for `string`s, `false` for `bool`s
and `0` for `int`s.
By default, slice types will split the environment value on `,`; you can change this behavior by setting the `envSeparator` tag.
If you set the `envExpand` tag, environment variables (either in `${var}` or `$var` format)
in the string will be replaced according with the actual value of the variable.
## Custom Parser Funcs
If you have a type that is not supported out of the box by the lib, you are able
to use (or define) and pass custom parsers (and their associated `reflect.Type`) to the
`env.ParseWithFuncs()` function.
In addition to accepting a struct pointer (same as `Parse()`), this function also
accepts a `env.CustomParsers` arg that under the covers is a `map[reflect.Type]env.ParserFunc`.
To see what this looks like in practice, take a look at the [commented block in the example](https://github.com/caarlos0/env/blob/master/examples/first.go#L35-L39).
`env` also ships with some pre-built custom parser funcs for common types. You
can check them out [here](parsers/).
## Required fields
The `env` tag option `required` (e.g., `env:"tagKey,required"`) can be added
to ensure that some environment variable is set. In the example above,
an error is returned if the `config` struct is changed to:
```go
type config struct {
Home string `env:"HOME"`
Port int `env:"PORT" envDefault:"3000"`
IsProduction bool `env:"PRODUCTION"`
Hosts []string `env:"HOSTS" envSeparator:":"`
SecretKey string `env:"SECRET_KEY,required"`
}
```

436
vendor/github.com/caarlos0/env/env.go generated vendored
View File

@ -1,436 +0,0 @@
package env
import (
"encoding"
"errors"
"fmt"
"os"
"reflect"
"strconv"
"strings"
"time"
)
var (
// ErrNotAStructPtr is returned if you pass something that is not a pointer to a
// Struct to Parse
ErrNotAStructPtr = errors.New("Expected a pointer to a Struct")
// ErrUnsupportedType if the struct field type is not supported by env
ErrUnsupportedType = errors.New("Type is not supported")
// ErrUnsupportedSliceType if the slice element type is not supported by env
ErrUnsupportedSliceType = errors.New("Unsupported slice type")
// OnEnvVarSet is an optional convenience callback, such as for logging purposes.
// If not nil, it's called after successfully setting the given field from the given value.
OnEnvVarSet func(reflect.StructField, string)
// Friendly names for reflect types
sliceOfInts = reflect.TypeOf([]int(nil))
sliceOfInt64s = reflect.TypeOf([]int64(nil))
sliceOfUint64s = reflect.TypeOf([]uint64(nil))
sliceOfStrings = reflect.TypeOf([]string(nil))
sliceOfBools = reflect.TypeOf([]bool(nil))
sliceOfFloat32s = reflect.TypeOf([]float32(nil))
sliceOfFloat64s = reflect.TypeOf([]float64(nil))
sliceOfDurations = reflect.TypeOf([]time.Duration(nil))
)
// CustomParsers is a friendly name for the type that `ParseWithFuncs()` accepts
type CustomParsers map[reflect.Type]ParserFunc
// ParserFunc defines the signature of a function that can be used within `CustomParsers`
type ParserFunc func(v string) (interface{}, error)
// Parse parses a struct containing `env` tags and loads its values from
// environment variables.
func Parse(v interface{}) error {
ptrRef := reflect.ValueOf(v)
if ptrRef.Kind() != reflect.Ptr {
return ErrNotAStructPtr
}
ref := ptrRef.Elem()
if ref.Kind() != reflect.Struct {
return ErrNotAStructPtr
}
return doParse(ref, make(map[reflect.Type]ParserFunc, 0))
}
// ParseWithFuncs is the same as `Parse` except it also allows the user to pass
// in custom parsers.
func ParseWithFuncs(v interface{}, funcMap CustomParsers) error {
ptrRef := reflect.ValueOf(v)
if ptrRef.Kind() != reflect.Ptr {
return ErrNotAStructPtr
}
ref := ptrRef.Elem()
if ref.Kind() != reflect.Struct {
return ErrNotAStructPtr
}
return doParse(ref, funcMap)
}
func doParse(ref reflect.Value, funcMap CustomParsers) error {
refType := ref.Type()
var errorList []string
for i := 0; i < refType.NumField(); i++ {
refField := ref.Field(i)
if reflect.Ptr == refField.Kind() && !refField.IsNil() && refField.CanSet() {
err := Parse(refField.Interface())
if nil != err {
return err
}
continue
}
refTypeField := refType.Field(i)
value, err := get(refTypeField)
if err != nil {
errorList = append(errorList, err.Error())
continue
}
if value == "" {
continue
}
if err := set(refField, refTypeField, value, funcMap); err != nil {
errorList = append(errorList, err.Error())
continue
}
if OnEnvVarSet != nil {
OnEnvVarSet(refTypeField, value)
}
}
if len(errorList) == 0 {
return nil
}
return errors.New(strings.Join(errorList, ". "))
}
func get(field reflect.StructField) (string, error) {
var (
val string
err error
)
key, opts := parseKeyForOption(field.Tag.Get("env"))
defaultValue := field.Tag.Get("envDefault")
val = getOr(key, defaultValue)
expandVar := field.Tag.Get("envExpand")
if strings.ToLower(expandVar) == "true" {
val = os.ExpandEnv(val)
}
if len(opts) > 0 {
for _, opt := range opts {
// The only option supported is "required".
switch opt {
case "":
break
case "required":
val, err = getRequired(key)
default:
err = fmt.Errorf("env tag option %q not supported", opt)
}
}
}
return val, err
}
// split the env tag's key into the expected key and desired option, if any.
func parseKeyForOption(key string) (string, []string) {
opts := strings.Split(key, ",")
return opts[0], opts[1:]
}
func getRequired(key string) (string, error) {
if value, ok := os.LookupEnv(key); ok {
return value, nil
}
return "", fmt.Errorf("required environment variable %q is not set", key)
}
func getOr(key, defaultValue string) string {
value, ok := os.LookupEnv(key)
if ok {
return value
}
return defaultValue
}
func set(field reflect.Value, refType reflect.StructField, value string, funcMap CustomParsers) error {
// use custom parser if configured for this type
parserFunc, ok := funcMap[refType.Type]
if ok {
val, err := parserFunc(value)
if err != nil {
return fmt.Errorf("Custom parser error: %v", err)
}
field.Set(reflect.ValueOf(val))
return nil
}
// fall back to built-in parsers
switch field.Kind() {
case reflect.Slice:
separator := refType.Tag.Get("envSeparator")
return handleSlice(field, value, separator)
case reflect.String:
field.SetString(value)
case reflect.Bool:
bvalue, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(bvalue)
case reflect.Int:
intValue, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return err
}
field.SetInt(intValue)
case reflect.Uint:
uintValue, err := strconv.ParseUint(value, 10, 32)
if err != nil {
return err
}
field.SetUint(uintValue)
case reflect.Float32:
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return err
}
field.SetFloat(v)
case reflect.Float64:
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
field.Set(reflect.ValueOf(v))
case reflect.Int64:
if refType.Type.String() == "time.Duration" {
dValue, err := time.ParseDuration(value)
if err != nil {
return err
}
field.Set(reflect.ValueOf(dValue))
} else {
intValue, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return err
}
field.SetInt(intValue)
}
case reflect.Uint64:
uintValue, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return err
}
field.SetUint(uintValue)
default:
return handleTextUnmarshaler(field, value)
}
return nil
}
func handleSlice(field reflect.Value, value, separator string) error {
if separator == "" {
separator = ","
}
splitData := strings.Split(value, separator)
switch field.Type() {
case sliceOfStrings:
field.Set(reflect.ValueOf(splitData))
case sliceOfInts:
intData, err := parseInts(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(intData))
case sliceOfInt64s:
int64Data, err := parseInt64s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(int64Data))
case sliceOfUint64s:
uint64Data, err := parseUint64s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(uint64Data))
case sliceOfFloat32s:
data, err := parseFloat32s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(data))
case sliceOfFloat64s:
data, err := parseFloat64s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(data))
case sliceOfBools:
boolData, err := parseBools(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(boolData))
case sliceOfDurations:
durationData, err := parseDurations(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(durationData))
default:
elemType := field.Type().Elem()
// Ensure we test *type as we can always address elements in a slice.
if elemType.Kind() == reflect.Ptr {
elemType = elemType.Elem()
}
if _, ok := reflect.New(elemType).Interface().(encoding.TextUnmarshaler); !ok {
return ErrUnsupportedSliceType
}
return parseTextUnmarshalers(field, splitData)
}
return nil
}
func handleTextUnmarshaler(field reflect.Value, value string) error {
if reflect.Ptr == field.Kind() {
if field.IsNil() {
field.Set(reflect.New(field.Type().Elem()))
}
} else if field.CanAddr() {
field = field.Addr()
}
tm, ok := field.Interface().(encoding.TextUnmarshaler)
if !ok {
return ErrUnsupportedType
}
return tm.UnmarshalText([]byte(value))
}
func parseInts(data []string) ([]int, error) {
intSlice := make([]int, 0, len(data))
for _, v := range data {
intValue, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return nil, err
}
intSlice = append(intSlice, int(intValue))
}
return intSlice, nil
}
func parseInt64s(data []string) ([]int64, error) {
intSlice := make([]int64, 0, len(data))
for _, v := range data {
intValue, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
intSlice = append(intSlice, int64(intValue))
}
return intSlice, nil
}
func parseUint64s(data []string) ([]uint64, error) {
var uintSlice []uint64
for _, v := range data {
uintValue, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return nil, err
}
uintSlice = append(uintSlice, uint64(uintValue))
}
return uintSlice, nil
}
func parseFloat32s(data []string) ([]float32, error) {
float32Slice := make([]float32, 0, len(data))
for _, v := range data {
data, err := strconv.ParseFloat(v, 32)
if err != nil {
return nil, err
}
float32Slice = append(float32Slice, float32(data))
}
return float32Slice, nil
}
func parseFloat64s(data []string) ([]float64, error) {
float64Slice := make([]float64, 0, len(data))
for _, v := range data {
data, err := strconv.ParseFloat(v, 64)
if err != nil {
return nil, err
}
float64Slice = append(float64Slice, float64(data))
}
return float64Slice, nil
}
func parseBools(data []string) ([]bool, error) {
boolSlice := make([]bool, 0, len(data))
for _, v := range data {
bvalue, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
boolSlice = append(boolSlice, bvalue)
}
return boolSlice, nil
}
func parseDurations(data []string) ([]time.Duration, error) {
durationSlice := make([]time.Duration, 0, len(data))
for _, v := range data {
dvalue, err := time.ParseDuration(v)
if err != nil {
return nil, err
}
durationSlice = append(durationSlice, dvalue)
}
return durationSlice, nil
}
func parseTextUnmarshalers(field reflect.Value, data []string) error {
s := len(data)
elemType := field.Type().Elem()
slice := reflect.MakeSlice(reflect.SliceOf(elemType), s, s)
for i, v := range data {
sv := slice.Index(i)
kind := sv.Kind()
if kind == reflect.Ptr {
sv = reflect.New(elemType.Elem())
} else {
sv = sv.Addr()
}
tm := sv.Interface().(encoding.TextUnmarshaler)
if err := tm.UnmarshalText([]byte(v)); err != nil {
return err
}
if kind == reflect.Ptr {
slice.Index(i).Set(sv)
}
}
field.Set(slice)
return nil
}

View File

@ -1,8 +0,0 @@
language: go
go:
- "1.x"
- master
env:
- TAGS=""
- TAGS="-tags purego"
script: go test $TAGS -v ./...

View File

@ -1,22 +0,0 @@
Copyright (c) 2016 Caleb Spare
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,67 +0,0 @@
# xxhash
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
This package provides a straightforward API:
```
func Sum64(b []byte) uint64
func Sum64String(s string) uint64
type Digest struct{ ... }
func New() *Digest
```
The `Digest` type implements hash.Hash64. Its key methods are:
```
func (*Digest) Write([]byte) (int, error)
func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
This implementation provides a fast pure-Go implementation and an even faster
assembly implementation for amd64.
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
| input size | purego | asm |
| --- | --- | --- |
| 5 B | 979.66 MB/s | 1291.17 MB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
the following commands under Go 1.11.2:
```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
- [FreeCache](https://github.com/coocood/freecache)

View File

@ -1,236 +0,0 @@
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
// at http://cyan4973.github.io/xxHash/.
package xxhash
import (
"encoding/binary"
"errors"
"math/bits"
)
const (
prime1 uint64 = 11400714785074694791
prime2 uint64 = 14029467366897019727
prime3 uint64 = 1609587929392839161
prime4 uint64 = 9650029242287828579
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64.
type Digest struct {
v1 uint64
v2 uint64
v3 uint64
v4 uint64
total uint64
mem [32]byte
n int // how much of mem is used
}
// New creates a new Digest that computes the 64-bit xxHash algorithm.
func New() *Digest {
var d Digest
d.Reset()
return &d
}
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
d.v1 = prime1v + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -prime1v
d.total = 0
d.n = 0
}
// Size always returns 8 bytes.
func (d *Digest) Size() int { return 8 }
// BlockSize always returns 32 bytes.
func (d *Digest) BlockSize() int { return 32 }
// Write adds more data to d. It always returns len(b), nil.
func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
if d.n+n < 32 {
// This new data doesn't even fill the current block.
copy(d.mem[d.n:], b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
copy(d.mem[d.n:], b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:]
d.n = 0
}
if len(b) >= 32 {
// One or more full blocks left.
nw := writeBlocks(d, b)
b = b[nw:]
}
// Store any remaining partial block.
copy(d.mem[:], b)
d.n = len(b)
return
}
// Sum appends the current hash to b and returns the resulting slice.
func (d *Digest) Sum(b []byte) []byte {
s := d.Sum64()
return append(
b,
byte(s>>56),
byte(s>>48),
byte(s>>40),
byte(s>>32),
byte(s>>24),
byte(s>>16),
byte(s>>8),
byte(s),
)
}
// Sum64 returns the current hash.
func (d *Digest) Sum64() uint64 {
var h uint64
if d.total >= 32 {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = d.v3 + prime5
}
h += d.total
i, end := 0, d.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(d.mem[i:i+8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(d.mem[i:i+4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for i < end {
h ^= uint64(d.mem[i]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
const (
magic = "xxh\x06"
marshaledSize = len(magic) + 8*5 + 32
)
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (d *Digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
b = appendUint64(b, d.v1)
b = appendUint64(b, d.v2)
b = appendUint64(b, d.v3)
b = appendUint64(b, d.v4)
b = appendUint64(b, d.total)
b = append(b, d.mem[:d.n]...)
b = b[:len(b)+len(d.mem)-d.n]
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (d *Digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic):]
b, d.v1 = consumeUint64(b)
b, d.v2 = consumeUint64(b)
b, d.v3 = consumeUint64(b)
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
b = b[len(d.mem):]
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.LittleEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func consumeUint64(b []byte) ([]byte, uint64) {
x := u64(b)
return b[8:], x
}
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
func round(acc, input uint64) uint64 {
acc += input * prime2
acc = rol31(acc)
acc *= prime1
return acc
}
func mergeRound(acc, val uint64) uint64 {
val = round(0, val)
acc ^= val
acc = acc*prime1 + prime4
return acc
}
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

View File

@ -1,13 +0,0 @@
// +build !appengine
// +build gc
// +build !purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
//
//go:noescape
func Sum64(b []byte) uint64
//go:noescape
func writeBlocks(d *Digest, b []byte) int

View File

@ -1,215 +0,0 @@
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
// Register allocation:
// AX h
// CX pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
// R9 v2
// R10 v3
// R11 v4
// R12 tmp
// R13 prime1v
// R14 prime2v
// R15 prime4v
// round reads from and advances the buffer pointer in CX.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (CX), R12 \
ADDQ $8, CX \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ R15, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), R15
// Load slice.
MOVQ b_base+0(FP), CX
MOVQ b_len+8(FP), DX
LEAQ (CX)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
// Check whether we have at least one block.
CMPQ DX, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8
ADDQ R14, R8
MOVQ R14, R9
XORQ R10, R10
XORQ R11, R11
SUBQ R13, R11
// Loop until CX > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ CX, BX
JLE blockLoop
MOVQ R8, AX
ROLQ $1, AX
MOVQ R9, R12
ROLQ $7, R12
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks
noBlocks:
MOVQ ·prime5v(SB), AX
afterBlocks:
ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
ADDQ $24, BX
CMPQ CX, BX
JG fourByte
wordLoop:
// Calculate k1.
MOVQ (CX), R8
ADDQ $8, CX
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ R15, AX
CMPQ CX, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
CMPQ CX, BX
JG singles
MOVL (CX), R8
ADDQ $4, CX
IMULQ R13, R8
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ CX, BX
JGE finalize
singlesLoop:
MOVBQZX (CX), R12
ADDQ $1, CX
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
CMPQ CX, BX
JL singlesLoop
finalize:
MOVQ AX, R12
SHRQ $33, R12
XORQ R12, AX
IMULQ R14, AX
MOVQ AX, R12
SHRQ $29, R12
XORQ R12, AX
IMULQ ·prime3v(SB), AX
MOVQ AX, R12
SHRQ $32, R12
XORQ R12, AX
MOVQ AX, ret+24(FP)
RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), CX
MOVQ b_len+16(FP), DX
LEAQ (CX)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
MOVQ 24(AX), R11 // v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ CX, BX
JLE blockLoop
// Copy vN back to d.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
// The number of bytes written is CX minus the old base pointer.
SUBQ b_base+8(FP), CX
MOVQ CX, ret+32(FP)
RET

View File

@ -1,76 +0,0 @@
// +build !amd64 appengine !gc purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
// d.Write(b)
// return d.Sum64()
// but this is faster, particularly for small inputs.
n := len(b)
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = prime5
}
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
h = rol11(h) * prime1
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
func writeBlocks(d *Digest, b []byte) int {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
n := len(b)
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
return n - len(b)
}

View File

@ -1,15 +0,0 @@
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
package xxhash
// Sum64String computes the 64-bit xxHash digest of s.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
// WriteString adds more data to d. It always returns len(s), nil.
func (d *Digest) WriteString(s string) (n int, err error) {
return d.Write([]byte(s))
}

View File

@ -1,46 +0,0 @@
// +build !appengine
// This file encapsulates usage of unsafe.
// xxhash_safe.go contains the safe implementations.
package xxhash
import (
"reflect"
"unsafe"
)
// Notes:
//
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
// for some discussion about these unsafe conversions.
//
// In the future it's possible that compiler optimizations will make these
// unsafe operations unnecessary: https://golang.org/issue/2205.
//
// Both of these wrapper functions still incur function call overhead since they
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
// for strings to squeeze out a bit more speed. Mid-stack inlining should
// eventually fix this.
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
return Sum64(b)
}
// WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) {
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
return d.Write(b)
}

View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View File

@ -1,28 +0,0 @@
Copyright 2010 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,324 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"errors"
"fmt"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/runtime/protoimpl"
)
const (
WireVarint = 0
WireFixed32 = 5
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
)
// EncodeVarint returns the varint encoded bytes of v.
func EncodeVarint(v uint64) []byte {
return protowire.AppendVarint(nil, v)
}
// SizeVarint returns the length of the varint encoded bytes of v.
// This is equal to len(EncodeVarint(v)).
func SizeVarint(v uint64) int {
return protowire.SizeVarint(v)
}
// DecodeVarint parses a varint encoded integer from b,
// returning the integer value and the length of the varint.
// It returns (0, 0) if there is a parse error.
func DecodeVarint(b []byte) (uint64, int) {
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return 0, 0
}
return v, n
}
// Buffer is a buffer for encoding and decoding the protobuf wire format.
// It may be reused between invocations to reduce memory usage.
type Buffer struct {
buf []byte
idx int
deterministic bool
}
// NewBuffer allocates a new Buffer initialized with buf,
// where the contents of buf are considered the unread portion of the buffer.
func NewBuffer(buf []byte) *Buffer {
return &Buffer{buf: buf}
}
// SetDeterministic specifies whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexographical order. This is an implementation detail and
// subject to change.
func (b *Buffer) SetDeterministic(deterministic bool) {
b.deterministic = deterministic
}
// SetBuf sets buf as the internal buffer,
// where the contents of buf are considered the unread portion of the buffer.
func (b *Buffer) SetBuf(buf []byte) {
b.buf = buf
b.idx = 0
}
// Reset clears the internal buffer of all written and unread data.
func (b *Buffer) Reset() {
b.buf = b.buf[:0]
b.idx = 0
}
// Bytes returns the internal buffer.
func (b *Buffer) Bytes() []byte {
return b.buf
}
// Unread returns the unread portion of the buffer.
func (b *Buffer) Unread() []byte {
return b.buf[b.idx:]
}
// Marshal appends the wire-format encoding of m to the buffer.
func (b *Buffer) Marshal(m Message) error {
var err error
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
return err
}
// Unmarshal parses the wire-format message in the buffer and
// places the decoded results in m.
// It does not reset m before unmarshaling.
func (b *Buffer) Unmarshal(m Message) error {
err := UnmarshalMerge(b.Unread(), m)
b.idx = len(b.buf)
return err
}
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
func (m *unknownFields) String() string { panic("not implemented") }
func (m *unknownFields) Reset() { panic("not implemented") }
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
// DebugPrint dumps the encoded bytes of b with a header and footer including s
// to stdout. This is only intended for debugging.
func (*Buffer) DebugPrint(s string, b []byte) {
m := MessageReflect(new(unknownFields))
m.SetUnknown(b)
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
}
// EncodeVarint appends an unsigned varint encoding to the buffer.
func (b *Buffer) EncodeVarint(v uint64) error {
b.buf = protowire.AppendVarint(b.buf, v)
return nil
}
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
func (b *Buffer) EncodeZigzag32(v uint64) error {
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
}
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
func (b *Buffer) EncodeZigzag64(v uint64) error {
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
}
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
func (b *Buffer) EncodeFixed32(v uint64) error {
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
return nil
}
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
func (b *Buffer) EncodeFixed64(v uint64) error {
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
return nil
}
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
func (b *Buffer) EncodeRawBytes(v []byte) error {
b.buf = protowire.AppendBytes(b.buf, v)
return nil
}
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
// It does not validate whether v contains valid UTF-8.
func (b *Buffer) EncodeStringBytes(v string) error {
b.buf = protowire.AppendString(b.buf, v)
return nil
}
// EncodeMessage appends a length-prefixed encoded message to the buffer.
func (b *Buffer) EncodeMessage(m Message) error {
var err error
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
return err
}
// DecodeVarint consumes an encoded unsigned varint from the buffer.
func (b *Buffer) DecodeVarint() (uint64, error) {
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
func (b *Buffer) DecodeZigzag32() (uint64, error) {
v, err := b.DecodeVarint()
if err != nil {
return 0, err
}
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
}
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
func (b *Buffer) DecodeZigzag64() (uint64, error) {
v, err := b.DecodeVarint()
if err != nil {
return 0, err
}
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
}
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
func (b *Buffer) DecodeFixed32() (uint64, error) {
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
func (b *Buffer) DecodeFixed64() (uint64, error) {
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
// If alloc is specified, it returns a copy the raw bytes
// rather than a sub-slice of the buffer.
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
if n < 0 {
return nil, protowire.ParseError(n)
}
b.idx += n
if alloc {
v = append([]byte(nil), v...)
}
return v, nil
}
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
// It does not validate whether the raw bytes contain valid UTF-8.
func (b *Buffer) DecodeStringBytes() (string, error) {
v, n := protowire.ConsumeString(b.buf[b.idx:])
if n < 0 {
return "", protowire.ParseError(n)
}
b.idx += n
return v, nil
}
// DecodeMessage consumes a length-prefixed message from the buffer.
// It does not reset m before unmarshaling.
func (b *Buffer) DecodeMessage(m Message) error {
v, err := b.DecodeRawBytes(false)
if err != nil {
return err
}
return UnmarshalMerge(v, m)
}
// DecodeGroup consumes a message group from the buffer.
// It assumes that the start group marker has already been consumed and
// consumes all bytes until (and including the end group marker).
// It does not reset m before unmarshaling.
func (b *Buffer) DecodeGroup(m Message) error {
v, n, err := consumeGroup(b.buf[b.idx:])
if err != nil {
return err
}
b.idx += n
return UnmarshalMerge(v, m)
}
// consumeGroup parses b until it finds an end group marker, returning
// the raw bytes of the message (excluding the end group marker) and the
// the total length of the message (including the end group marker).
func consumeGroup(b []byte) ([]byte, int, error) {
b0 := b
depth := 1 // assume this follows a start group marker
for {
_, wtyp, tagLen := protowire.ConsumeTag(b)
if tagLen < 0 {
return nil, 0, protowire.ParseError(tagLen)
}
b = b[tagLen:]
var valLen int
switch wtyp {
case protowire.VarintType:
_, valLen = protowire.ConsumeVarint(b)
case protowire.Fixed32Type:
_, valLen = protowire.ConsumeFixed32(b)
case protowire.Fixed64Type:
_, valLen = protowire.ConsumeFixed64(b)
case protowire.BytesType:
_, valLen = protowire.ConsumeBytes(b)
case protowire.StartGroupType:
depth++
case protowire.EndGroupType:
depth--
default:
return nil, 0, errors.New("proto: cannot parse reserved wire type")
}
if valLen < 0 {
return nil, 0, protowire.ParseError(valLen)
}
b = b[valLen:]
if depth == 0 {
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
}
}
}

View File

@ -1,63 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// SetDefaults sets unpopulated scalar fields to their default values.
// Fields within a oneof are not set even if they have a default value.
// SetDefaults is recursively called upon any populated message fields.
func SetDefaults(m Message) {
if m != nil {
setDefaults(MessageReflect(m))
}
}
func setDefaults(m protoreflect.Message) {
fds := m.Descriptor().Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if !m.Has(fd) {
if fd.HasDefault() && fd.ContainingOneof() == nil {
v := fd.Default()
if fd.Kind() == protoreflect.BytesKind {
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
}
m.Set(fd, v)
}
continue
}
}
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
setDefaults(m.Get(fd).Message())
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
setDefaults(ls.Get(i).Message())
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
setDefaults(v.Message())
return true
})
}
}
return true
})
}

View File

@ -1,113 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"encoding/json"
"errors"
"fmt"
"strconv"
protoV2 "google.golang.org/protobuf/proto"
)
var (
// Deprecated: No longer returned.
ErrNil = errors.New("proto: Marshal called with nil")
// Deprecated: No longer returned.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
// Deprecated: No longer returned.
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
)
// Deprecated: Do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: Do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: Do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func RegisterMessageSetType(Message, int32, string) {}
// Deprecated: Do not use.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// Deprecated: Do not use.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// Deprecated: Do not use; this type existed for intenal-use only.
type InternalMessageInfo struct{}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) DiscardUnknown(m Message) {
DiscardUnknown(m)
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Merge(dst, src Message) {
protoV2.Merge(MessageV2(dst), MessageV2(src))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Size(m Message) int {
return protoV2.Size(MessageV2(m))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
}

View File

@ -1,58 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
func DiscardUnknown(m Message) {
if m != nil {
discardUnknown(MessageReflect(m))
}
}
func discardUnknown(m protoreflect.Message) {
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
discardUnknown(m.Get(fd).Message())
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
discardUnknown(ls.Get(i).Message())
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
discardUnknown(v.Message())
return true
})
}
}
return true
})
// Discard unknown fields.
if len(m.GetUnknown()) > 0 {
m.SetUnknown(nil)
}
}

View File

@ -1,356 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"errors"
"fmt"
"reflect"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
"google.golang.org/protobuf/runtime/protoimpl"
)
type (
// ExtensionDesc represents an extension descriptor and
// is used to interact with an extension field in a message.
//
// Variables of this type are generated in code by protoc-gen-go.
ExtensionDesc = protoimpl.ExtensionInfo
// ExtensionRange represents a range of message extensions.
// Used in code generated by protoc-gen-go.
ExtensionRange = protoiface.ExtensionRangeV1
// Deprecated: Do not use; this is an internal type.
Extension = protoimpl.ExtensionFieldV1
// Deprecated: Do not use; this is an internal type.
XXX_InternalExtensions = protoimpl.ExtensionFields
)
// ErrMissingExtension reports whether the extension was not present.
var ErrMissingExtension = errors.New("proto: missing extension")
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
// HasExtension reports whether the extension field is present in m
// either as an explicitly populated field or as an unknown field.
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return false
}
// Check whether any populated known field matches the field number.
xtd := xt.TypeDescriptor()
if isValidExtension(mr.Descriptor(), xtd) {
has = mr.Has(xtd)
} else {
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
has = int32(fd.Number()) == xt.Field
return !has
})
}
// Check whether any unknown field matches the field number.
for b := mr.GetUnknown(); !has && len(b) > 0; {
num, _, n := protowire.ConsumeField(b)
has = int32(num) == xt.Field
b = b[n:]
}
return has
}
// ClearExtension removes the extension field from m
// either as an explicitly populated field or as an unknown field.
func ClearExtension(m Message, xt *ExtensionDesc) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
xtd := xt.TypeDescriptor()
if isValidExtension(mr.Descriptor(), xtd) {
mr.Clear(xtd)
} else {
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if int32(fd.Number()) == xt.Field {
mr.Clear(fd)
return false
}
return true
})
}
clearUnknown(mr, fieldNum(xt.Field))
}
// ClearAllExtensions clears all extensions from m.
// This includes populated fields and unknown fields in the extension range.
func ClearAllExtensions(m Message) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if fd.IsExtension() {
mr.Clear(fd)
}
return true
})
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
}
// GetExtension retrieves a proto2 extended field from m.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes for the extension field.
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return nil, errNotExtendable
}
// Retrieve the unknown fields for this extension field.
var bo protoreflect.RawFields
for bi := mr.GetUnknown(); len(bi) > 0; {
num, _, n := protowire.ConsumeField(bi)
if int32(num) == xt.Field {
bo = append(bo, bi[:n]...)
}
bi = bi[n:]
}
// For type incomplete descriptors, only retrieve the unknown fields.
if xt.ExtensionType == nil {
return []byte(bo), nil
}
// If the extension field only exists as unknown fields, unmarshal it.
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
xtd := xt.TypeDescriptor()
if !isValidExtension(mr.Descriptor(), xtd) {
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
if !mr.Has(xtd) && len(bo) > 0 {
m2 := mr.New()
if err := (proto.UnmarshalOptions{
Resolver: extensionResolver{xt},
}.Unmarshal(bo, m2.Interface())); err != nil {
return nil, err
}
if m2.Has(xtd) {
mr.Set(xtd, m2.Get(xtd))
clearUnknown(mr, fieldNum(xt.Field))
}
}
// Check whether the message has the extension field set or a default.
var pv protoreflect.Value
switch {
case mr.Has(xtd):
pv = mr.Get(xtd)
case xtd.HasDefault():
pv = xtd.Default()
default:
return nil, ErrMissingExtension
}
v := xt.InterfaceOf(pv)
rv := reflect.ValueOf(v)
if isScalarKind(rv.Kind()) {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
}
return v, nil
}
// extensionResolver is a custom extension resolver that stores a single
// extension type that takes precedence over the global registry.
type extensionResolver struct{ xt protoreflect.ExtensionType }
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
return r.xt, nil
}
return protoregistry.GlobalTypes.FindExtensionByName(field)
}
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
return r.xt, nil
}
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
// GetExtensions returns a list of the extensions values present in m,
// corresponding with the provided list of extension descriptors, xts.
// If an extension is missing in m, the corresponding value is nil.
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return nil, errNotExtendable
}
vs := make([]interface{}, len(xts))
for i, xt := range xts {
v, err := GetExtension(m, xt)
if err != nil {
if err == ErrMissingExtension {
continue
}
return vs, err
}
vs[i] = v
}
return vs, nil
}
// SetExtension sets an extension field in m to the provided value.
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return errNotExtendable
}
rv := reflect.ValueOf(v)
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
}
if rv.Kind() == reflect.Ptr {
if rv.IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
}
if isScalarKind(rv.Elem().Kind()) {
v = rv.Elem().Interface()
}
}
xtd := xt.TypeDescriptor()
if !isValidExtension(mr.Descriptor(), xtd) {
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
mr.Set(xtd, xt.ValueOf(v))
clearUnknown(mr, fieldNum(xt.Field))
return nil
}
// SetRawExtension inserts b into the unknown fields of m.
//
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
func SetRawExtension(m Message, fnum int32, b []byte) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
// Verify that the raw field is valid.
for b0 := b; len(b0) > 0; {
num, _, n := protowire.ConsumeField(b0)
if int32(num) != fnum {
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
}
b0 = b0[n:]
}
ClearExtension(m, &ExtensionDesc{Field: fnum})
mr.SetUnknown(append(mr.GetUnknown(), b...))
}
// ExtensionDescs returns a list of extension descriptors found in m,
// containing descriptors for both populated extension fields in m and
// also unknown fields of m that are in the extension range.
// For the later case, an type incomplete descriptor is provided where only
// the ExtensionDesc.Field field is populated.
// The order of the extension descriptors is undefined.
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return nil, errNotExtendable
}
// Collect a set of known extension descriptors.
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
xt := fd.(protoreflect.ExtensionTypeDescriptor)
if xd, ok := xt.Type().(*ExtensionDesc); ok {
extDescs[fd.Number()] = xd
}
}
return true
})
// Collect a set of unknown extension descriptors.
extRanges := mr.Descriptor().ExtensionRanges()
for b := mr.GetUnknown(); len(b) > 0; {
num, _, n := protowire.ConsumeField(b)
if extRanges.Has(num) && extDescs[num] == nil {
extDescs[num] = nil
}
b = b[n:]
}
// Transpose the set of descriptors into a list.
var xts []*ExtensionDesc
for num, xt := range extDescs {
if xt == nil {
xt = &ExtensionDesc{Field: int32(num)}
}
xts = append(xts, xt)
}
return xts, nil
}
// isValidExtension reports whether xtd is a valid extension descriptor for md.
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
}
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
// This function exists for historical reasons since the representation of
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
func isScalarKind(k reflect.Kind) bool {
switch k {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
return true
default:
return false
}
}
// clearUnknown removes unknown fields from m where remover.Has reports true.
func clearUnknown(m protoreflect.Message, remover interface {
Has(protoreflect.FieldNumber) bool
}) {
var bo protoreflect.RawFields
for bi := m.GetUnknown(); len(bi) > 0; {
num, _, n := protowire.ConsumeField(bi)
if !remover.Has(num) {
bo = append(bo, bi[:n]...)
}
bi = bi[n:]
}
if bi := m.GetUnknown(); len(bi) != len(bo) {
m.SetUnknown(bo)
}
}
type fieldNum protoreflect.FieldNumber
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
return protoreflect.FieldNumber(n1) == n2
}

View File

@ -1,306 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
)
// StructProperties represents protocol buffer type information for a
// generated protobuf message in the open-struct API.
//
// Deprecated: Do not use.
type StructProperties struct {
// Prop are the properties for each field.
//
// Fields belonging to a oneof are stored in OneofTypes instead, with a
// single Properties representing the parent oneof held here.
//
// The order of Prop matches the order of fields in the Go struct.
// Struct fields that are not related to protobufs have a "XXX_" prefix
// in the Properties.Name and must be ignored by the user.
Prop []*Properties
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the protobuf field name.
OneofTypes map[string]*OneofProperties
}
// Properties represents the type information for a protobuf message field.
//
// Deprecated: Do not use.
type Properties struct {
// Name is a placeholder name with little meaningful semantic value.
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
Name string
// OrigName is the protobuf field name or oneof name.
OrigName string
// JSONName is the JSON name for the protobuf field.
JSONName string
// Enum is a placeholder name for enums.
// For historical reasons, this is neither the Go name for the enum,
// nor the protobuf name for the enum.
Enum string // Deprecated: Do not use.
// Weak contains the full name of the weakly referenced message.
Weak string
// Wire is a string representation of the wire type.
Wire string
// WireType is the protobuf wire type for the field.
WireType int
// Tag is the protobuf field number.
Tag int
// Required reports whether this is a required field.
Required bool
// Optional reports whether this is a optional field.
Optional bool
// Repeated reports whether this is a repeated field.
Repeated bool
// Packed reports whether this is a packed repeated field of scalars.
Packed bool
// Proto3 reports whether this field operates under the proto3 syntax.
Proto3 bool
// Oneof reports whether this field belongs within a oneof.
Oneof bool
// Default is the default value in string form.
Default string
// HasDefault reports whether the field has a default value.
HasDefault bool
// MapKeyProp is the properties for the key field for a map field.
MapKeyProp *Properties
// MapValProp is the properties for the value field for a map field.
MapValProp *Properties
}
// OneofProperties represents the type information for a protobuf oneof.
//
// Deprecated: Do not use.
type OneofProperties struct {
// Type is a pointer to the generated wrapper type for the field value.
// This is nil for messages that are not in the open-struct API.
Type reflect.Type
// Field is the index into StructProperties.Prop for the containing oneof.
Field int
// Prop is the properties for the field.
Prop *Properties
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s += "," + strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != "" {
s += ",json=" + p.JSONName
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if len(p.Weak) > 0 {
s += ",weak=" + p.Weak
}
if p.Proto3 {
s += ",proto3"
}
if p.Oneof {
s += ",oneof"
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(tag string) {
// For example: "bytes,49,opt,name=foo,def=hello!"
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
i = len(tag)
}
switch s := tag[:i]; {
case strings.HasPrefix(s, "name="):
p.OrigName = s[len("name="):]
case strings.HasPrefix(s, "json="):
p.JSONName = s[len("json="):]
case strings.HasPrefix(s, "enum="):
p.Enum = s[len("enum="):]
case strings.HasPrefix(s, "weak="):
p.Weak = s[len("weak="):]
case strings.Trim(s, "0123456789") == "":
n, _ := strconv.ParseUint(s, 10, 32)
p.Tag = int(n)
case s == "opt":
p.Optional = true
case s == "req":
p.Required = true
case s == "rep":
p.Repeated = true
case s == "varint" || s == "zigzag32" || s == "zigzag64":
p.Wire = s
p.WireType = WireVarint
case s == "fixed32":
p.Wire = s
p.WireType = WireFixed32
case s == "fixed64":
p.Wire = s
p.WireType = WireFixed64
case s == "bytes":
p.Wire = s
p.WireType = WireBytes
case s == "group":
p.Wire = s
p.WireType = WireStartGroup
case s == "packed":
p.Packed = true
case s == "proto3":
p.Proto3 = true
case s == "oneof":
p.Oneof = true
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
p.HasDefault = true
p.Default, i = tag[len("def="):], len(tag)
}
tag = strings.TrimPrefix(tag[i:], ",")
}
}
// Init populates the properties from a protocol buffer struct tag.
//
// Deprecated: Do not use.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
if typ != nil && typ.Kind() == reflect.Map {
p.MapKeyProp = new(Properties)
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
p.MapValProp = new(Properties)
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
}
}
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
// GetProperties returns the list of properties for the type represented by t,
// which must be a generated protocol buffer message in the open-struct API,
// where protobuf message fields are represented by exported Go struct fields.
//
// Deprecated: Use protobuf reflection instead.
func GetProperties(t reflect.Type) *StructProperties {
if p, ok := propertiesCache.Load(t); ok {
return p.(*StructProperties)
}
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
return p.(*StructProperties)
}
func newProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
var hasOneof bool
prop := new(StructProperties)
// Construct a list of properties for each field in the struct.
for i := 0; i < t.NumField(); i++ {
p := new(Properties)
f := t.Field(i)
tagField := f.Tag.Get("protobuf")
p.Init(f.Type, f.Name, tagField, &f)
tagOneof := f.Tag.Get("protobuf_oneof")
if tagOneof != "" {
hasOneof = true
p.OrigName = tagOneof
}
// Rename unrelated struct fields with the "XXX_" prefix since so much
// user code simply checks for this to exclude special fields.
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
p.Name = "XXX_" + p.Name
p.OrigName = "XXX_" + p.OrigName
} else if p.Weak != "" {
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
}
prop.Prop = append(prop.Prop, p)
}
// Construct a mapping of oneof field names to properties.
if hasOneof {
var oneofWrappers []interface{}
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
}
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
}
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
}
}
prop.OneofTypes = make(map[string]*OneofProperties)
for _, wrapper := range oneofWrappers {
p := &OneofProperties{
Type: reflect.ValueOf(wrapper).Type(), // *T
Prop: new(Properties),
}
f := p.Type.Elem().Field(0)
p.Prop.Name = f.Name
p.Prop.Parse(f.Tag.Get("protobuf"))
// Determine the struct field that contains this oneof.
// Each wrapper is assignable to exactly one parent field.
var foundOneof bool
for i := 0; i < t.NumField() && !foundOneof; i++ {
if p.Type.AssignableTo(t.Field(i).Type) {
p.Field = i
foundOneof = true
}
}
if !foundOneof {
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
prop.OneofTypes[p.Prop.OrigName] = p
}
}
return prop
}
func (sp *StructProperties) Len() int { return len(sp.Prop) }
func (sp *StructProperties) Less(i, j int) bool { return false }
func (sp *StructProperties) Swap(i, j int) { return }

View File

@ -1,167 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package proto provides functionality for handling protocol buffer messages.
// In particular, it provides marshaling and unmarshaling between a protobuf
// message and the binary wire format.
//
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
// more information.
//
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
package proto
import (
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
"google.golang.org/protobuf/runtime/protoimpl"
)
const (
ProtoPackageIsVersion1 = true
ProtoPackageIsVersion2 = true
ProtoPackageIsVersion3 = true
ProtoPackageIsVersion4 = true
)
// GeneratedEnum is any enum type generated by protoc-gen-go
// which is a named int32 kind.
// This type exists for documentation purposes.
type GeneratedEnum interface{}
// GeneratedMessage is any message type generated by protoc-gen-go
// which is a pointer to a named struct kind.
// This type exists for documentation purposes.
type GeneratedMessage interface{}
// Message is a protocol buffer message.
//
// This is the v1 version of the message interface and is marginally better
// than an empty interface as it lacks any method to programatically interact
// with the contents of the message.
//
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
// exposes protobuf reflection as a first-class feature of the interface.
//
// To convert a v1 message to a v2 message, use the MessageV2 function.
// To convert a v2 message to a v1 message, use the MessageV1 function.
type Message = protoiface.MessageV1
// MessageV1 converts either a v1 or v2 message to a v1 message.
// It returns nil if m is nil.
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
return protoimpl.X.ProtoMessageV1Of(m)
}
// MessageV2 converts either a v1 or v2 message to a v2 message.
// It returns nil if m is nil.
func MessageV2(m GeneratedMessage) protoV2.Message {
return protoimpl.X.ProtoMessageV2Of(m)
}
// MessageReflect returns a reflective view for a message.
// It returns nil if m is nil.
func MessageReflect(m Message) protoreflect.Message {
return protoimpl.X.MessageOf(m)
}
// Marshaler is implemented by messages that can marshal themselves.
// This interface is used by the following functions: Size, Marshal,
// Buffer.Marshal, and Buffer.EncodeMessage.
//
// Deprecated: Do not implement.
type Marshaler interface {
// Marshal formats the encoded bytes of the message.
// It should be deterministic and emit valid protobuf wire data.
// The caller takes ownership of the returned buffer.
Marshal() ([]byte, error)
}
// Unmarshaler is implemented by messages that can unmarshal themselves.
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
//
// Deprecated: Do not implement.
type Unmarshaler interface {
// Unmarshal parses the encoded bytes of the protobuf wire input.
// The provided buffer is only valid for during method call.
// It should not reset the receiver message.
Unmarshal([]byte) error
}
// Merger is implemented by messages that can merge themselves.
// This interface is used by the following functions: Clone and Merge.
//
// Deprecated: Do not implement.
type Merger interface {
// Merge merges the contents of src into the receiver message.
// It clones all data structures in src such that it aliases no mutable
// memory referenced by src.
Merge(src Message)
}
// RequiredNotSetError is an error type returned when
// marshaling or unmarshaling a message with missing required fields.
type RequiredNotSetError struct {
err error
}
func (e *RequiredNotSetError) Error() string {
if e.err != nil {
return e.err.Error()
}
return "proto: required field not set"
}
func (e *RequiredNotSetError) RequiredNotSet() bool {
return true
}
func checkRequiredNotSet(m protoV2.Message) error {
if err := protoV2.CheckInitialized(m); err != nil {
return &RequiredNotSetError{err: err}
}
return nil
}
// Clone returns a deep copy of src.
func Clone(src Message) Message {
return MessageV1(protoV2.Clone(MessageV2(src)))
}
// Merge merges src into dst, which must be messages of the same type.
//
// Populated scalar fields in src are copied to dst, while populated
// singular messages in src are merged into dst by recursively calling Merge.
// The elements of every list field in src is appended to the corresponded
// list fields in dst. The entries of every map field in src is copied into
// the corresponding map field in dst, possibly replacing existing entries.
// The unknown fields of src are appended to the unknown fields of dst.
func Merge(dst, src Message) {
protoV2.Merge(MessageV2(dst), MessageV2(src))
}
// Equal reports whether two messages are equal.
// If two messages marshal to the same bytes under deterministic serialization,
// then Equal is guaranteed to report true.
//
// Two messages are equal if they are the same protobuf message type,
// have the same set of populated known and extension field values,
// and the same set of unknown fields values.
//
// Scalar values are compared with the equivalent of the == operator in Go,
// except bytes values which are compared using bytes.Equal and
// floating point values which specially treat NaNs as equal.
// Message values are compared by recursively calling Equal.
// Lists are equal if each element value is also equal.
// Maps are equal if they have the same set of keys, where the pair of values
// for each key is also equal.
func Equal(x, y Message) bool {
return protoV2.Equal(MessageV2(x), MessageV2(y))
}
func isMessageSet(md protoreflect.MessageDescriptor) bool {
ms, ok := md.(interface{ IsMessageSet() bool })
return ok && ms.IsMessageSet()
}

View File

@ -1,323 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"reflect"
"strings"
"sync"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
)
// filePath is the path to the proto source file.
type filePath = string // e.g., "google/protobuf/descriptor.proto"
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
type fileDescGZIP = []byte
var fileCache sync.Map // map[filePath]fileDescGZIP
// RegisterFile is called from generated code to register the compressed
// FileDescriptorProto with the file path for a proto source file.
//
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
func RegisterFile(s filePath, d fileDescGZIP) {
// Decompress the descriptor.
zr, err := gzip.NewReader(bytes.NewReader(d))
if err != nil {
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
}
b, err := ioutil.ReadAll(zr)
if err != nil {
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
}
// Construct a protoreflect.FileDescriptor from the raw descriptor.
// Note that DescBuilder.Build automatically registers the constructed
// file descriptor with the v2 registry.
protoimpl.DescBuilder{RawDescriptor: b}.Build()
// Locally cache the raw descriptor form for the file.
fileCache.Store(s, d)
}
// FileDescriptor returns the compressed FileDescriptorProto given the file path
// for a proto source file. It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
func FileDescriptor(s filePath) fileDescGZIP {
if v, ok := fileCache.Load(s); ok {
return v.(fileDescGZIP)
}
// Find the descriptor in the v2 registry.
var b []byte
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
b = fd.ProtoLegacyRawDesc()
} else {
// TODO: Use protodesc.ToFileDescriptorProto to construct
// a descriptorpb.FileDescriptorProto and marshal it.
// However, doing so causes the proto package to have a dependency
// on descriptorpb, leading to cyclic dependency issues.
}
}
// Locally cache the raw descriptor form for the file.
if len(b) > 0 {
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
return v.(fileDescGZIP)
}
return nil
}
// enumName is the name of an enum. For historical reasons, the enum name is
// neither the full Go name nor the full protobuf name of the enum.
// The name is the dot-separated combination of just the proto package that the
// enum is declared within followed by the Go type name of the generated enum.
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
// enumsByName maps enum values by name to their numeric counterpart.
type enumsByName = map[string]int32
// enumsByNumber maps enum values by number to their name counterpart.
type enumsByNumber = map[int32]string
var enumCache sync.Map // map[enumName]enumsByName
var numFilesCache sync.Map // map[protoreflect.FullName]int
// RegisterEnum is called from the generated code to register the mapping of
// enum value names to enum numbers for the enum identified by s.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
if _, ok := enumCache.Load(s); ok {
panic("proto: duplicate enum registered: " + s)
}
enumCache.Store(s, m)
// This does not forward registration to the v2 registry since this API
// lacks sufficient information to construct a complete v2 enum descriptor.
}
// EnumValueMap returns the mapping from enum value names to enum numbers for
// the enum of the given name. It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
func EnumValueMap(s enumName) enumsByName {
if v, ok := enumCache.Load(s); ok {
return v.(enumsByName)
}
// Check whether the cache is stale. If the number of files in the current
// package differs, then it means that some enums may have been recently
// registered upstream that we do not know about.
var protoPkg protoreflect.FullName
if i := strings.LastIndexByte(s, '.'); i >= 0 {
protoPkg = protoreflect.FullName(s[:i])
}
v, _ := numFilesCache.Load(protoPkg)
numFiles, _ := v.(int)
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
return nil // cache is up-to-date; was not found earlier
}
// Update the enum cache for all enums declared in the given proto package.
numFiles = 0
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
name := protoimpl.X.LegacyEnumName(ed)
if _, ok := enumCache.Load(name); !ok {
m := make(enumsByName)
evs := ed.Values()
for i := evs.Len() - 1; i >= 0; i-- {
ev := evs.Get(i)
m[string(ev.Name())] = int32(ev.Number())
}
enumCache.LoadOrStore(name, m)
}
})
numFiles++
return true
})
numFilesCache.Store(protoPkg, numFiles)
// Check cache again for enum map.
if v, ok := enumCache.Load(s); ok {
return v.(enumsByName)
}
return nil
}
// walkEnums recursively walks all enums declared in d.
func walkEnums(d interface {
Enums() protoreflect.EnumDescriptors
Messages() protoreflect.MessageDescriptors
}, f func(protoreflect.EnumDescriptor)) {
eds := d.Enums()
for i := eds.Len() - 1; i >= 0; i-- {
f(eds.Get(i))
}
mds := d.Messages()
for i := mds.Len() - 1; i >= 0; i-- {
walkEnums(mds.Get(i), f)
}
}
// messageName is the full name of protobuf message.
type messageName = string
var messageTypeCache sync.Map // map[messageName]reflect.Type
// RegisterType is called from generated code to register the message Go type
// for a message of the given name.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
func RegisterType(m Message, s messageName) {
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
panic(err)
}
messageTypeCache.Store(s, reflect.TypeOf(m))
}
// RegisterMapType is called from generated code to register the Go map type
// for a protobuf message representing a map entry.
//
// Deprecated: Do not use.
func RegisterMapType(m interface{}, s messageName) {
t := reflect.TypeOf(m)
if t.Kind() != reflect.Map {
panic(fmt.Sprintf("invalid map kind: %v", t))
}
if _, ok := messageTypeCache.Load(s); ok {
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
}
messageTypeCache.Store(s, t)
}
// MessageType returns the message type for a named message.
// It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
func MessageType(s messageName) reflect.Type {
if v, ok := messageTypeCache.Load(s); ok {
return v.(reflect.Type)
}
// Derive the message type from the v2 registry.
var t reflect.Type
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
t = messageGoType(mt)
}
// If we could not get a concrete type, it is possible that it is a
// pseudo-message for a map entry.
if t == nil {
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
kt := goTypeForField(md.Fields().ByNumber(1))
vt := goTypeForField(md.Fields().ByNumber(2))
t = reflect.MapOf(kt, vt)
}
}
// Locally cache the message type for the given name.
if t != nil {
v, _ := messageTypeCache.LoadOrStore(s, t)
return v.(reflect.Type)
}
return nil
}
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
switch k := fd.Kind(); k {
case protoreflect.EnumKind:
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
return enumGoType(et)
}
return reflect.TypeOf(protoreflect.EnumNumber(0))
case protoreflect.MessageKind, protoreflect.GroupKind:
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
return messageGoType(mt)
}
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
default:
return reflect.TypeOf(fd.Default().Interface())
}
}
func enumGoType(et protoreflect.EnumType) reflect.Type {
return reflect.TypeOf(et.New(0))
}
func messageGoType(mt protoreflect.MessageType) reflect.Type {
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
}
// MessageName returns the full protobuf name for the given message type.
//
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
func MessageName(m Message) messageName {
if m == nil {
return ""
}
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
return m.XXX_MessageName()
}
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
}
// RegisterExtension is called from the generated code to register
// the extension descriptor.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
func RegisterExtension(d *ExtensionDesc) {
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
panic(err)
}
}
type extensionsByNumber = map[int32]*ExtensionDesc
var extensionCache sync.Map // map[messageName]extensionsByNumber
// RegisteredExtensions returns a map of the registered extensions for the
// provided protobuf message, indexed by the extension field number.
//
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
func RegisteredExtensions(m Message) extensionsByNumber {
// Check whether the cache is stale. If the number of extensions for
// the given message differs, then it means that some extensions were
// recently registered upstream that we do not know about.
s := MessageName(m)
v, _ := extensionCache.Load(s)
xs, _ := v.(extensionsByNumber)
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
return xs // cache is up-to-date
}
// Cache is stale, re-compute the extensions map.
xs = make(extensionsByNumber)
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
if xd, ok := xt.(*ExtensionDesc); ok {
xs[int32(xt.TypeDescriptor().Number())] = xd
} else {
// TODO: This implies that the protoreflect.ExtensionType is a
// custom type not generated by protoc-gen-go. We could try and
// convert the type to an ExtensionDesc.
}
return true
})
extensionCache.Store(s, xs)
return xs
}

View File

@ -1,801 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/encoding/prototext"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapTextUnmarshalV2 = false
// ParseError is returned by UnmarshalText.
type ParseError struct {
Message string
// Deprecated: Do not use.
Line, Offset int
}
func (e *ParseError) Error() string {
if wrapTextUnmarshalV2 {
return e.Message
}
if e.Line == 1 {
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
}
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
}
// UnmarshalText parses a proto text formatted string into m.
func UnmarshalText(s string, m Message) error {
if u, ok := m.(encoding.TextUnmarshaler); ok {
return u.UnmarshalText([]byte(s))
}
m.Reset()
mi := MessageV2(m)
if wrapTextUnmarshalV2 {
err := prototext.UnmarshalOptions{
AllowPartial: true,
}.Unmarshal([]byte(s), mi)
if err != nil {
return &ParseError{Message: err.Error()}
}
return checkRequiredNotSet(mi)
} else {
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
return err
}
return checkRequiredNotSet(mi)
}
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
md := m.Descriptor()
fds := md.Fields()
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
seen := make(map[protoreflect.FieldNumber]bool)
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := protoreflect.Name(tok.value)
fd := fds.ByName(name)
switch {
case fd == nil:
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
fd = gd
}
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
fd = nil
case fd.IsWeak() && fd.Message().IsPlaceholder():
fd = nil
}
if fd == nil {
typeName := string(md.FullName())
if m, ok := m.Interface().(Message); ok {
t := reflect.TypeOf(m)
if t.Kind() == reflect.Ptr {
typeName = t.Elem().String()
}
}
return p.errorf("unknown field name %q in %v", name, typeName)
}
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
}
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
return p.errorf("non-repeated field %q was repeated", fd.Name())
}
seen[fd.Number()] = true
// Consume any colon.
if err := p.checkForColon(fd); err != nil {
return err
}
// Parse into the field.
v := m.Get(fd)
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
v = m.Mutable(fd)
}
if v, err = p.unmarshalValue(v, fd); err != nil {
return err
}
m.Set(fd, v)
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
return nil
}
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
name, err := p.consumeExtensionOrAnyName()
if err != nil {
return err
}
// If it contains a slash, it's an Any type URL.
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
tok := p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
if err != nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
}
m2 := mt.New()
if err := p.unmarshalMessage(m2, terminator); err != nil {
return err
}
b, err := protoV2.Marshal(m2.Interface())
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
}
urlFD := m.Descriptor().Fields().ByName("type_url")
valFD := m.Descriptor().Fields().ByName("value")
if seen[urlFD.Number()] {
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
}
if seen[valFD.Number()] {
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
}
m.Set(urlFD, protoreflect.ValueOfString(name))
m.Set(valFD, protoreflect.ValueOfBytes(b))
seen[urlFD.Number()] = true
seen[valFD.Number()] = true
return nil
}
xname := protoreflect.FullName(name)
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
if xt == nil && isMessageSet(m.Descriptor()) {
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
}
if xt == nil {
return p.errorf("unrecognized extension %q", name)
}
fd := xt.TypeDescriptor()
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
}
if err := p.checkForColon(fd); err != nil {
return err
}
v := m.Get(fd)
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
v = m.Mutable(fd)
}
v, err = p.unmarshalValue(v, fd)
if err != nil {
return err
}
m.Set(fd, v)
return p.consumeOptionalSeparator()
}
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "" {
return v, p.errorf("unexpected EOF")
}
switch {
case fd.IsList():
lv := v.List()
var err error
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
vv := lv.NewElement()
vv, err = p.unmarshalSingularValue(vv, fd)
if err != nil {
return v, err
}
lv.Append(vv)
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return v, nil
}
// One value of the repeated field.
p.back()
vv := lv.NewElement()
vv, err = p.unmarshalSingularValue(vv, fd)
if err != nil {
return v, err
}
lv.Append(vv)
return v, nil
case fd.IsMap():
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order.
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return v, p.errorf("expected '{' or '<', found %q", tok.value)
}
keyFD := fd.MapKey()
valFD := fd.MapValue()
mv := v.Map()
kv := keyFD.Default()
vv := mv.NewValue()
for {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == terminator {
break
}
var err error
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return v, err
}
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
return v, err
}
if err := p.consumeOptionalSeparator(); err != nil {
return v, err
}
case "value":
if err := p.checkForColon(valFD); err != nil {
return v, err
}
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
return v, err
}
if err := p.consumeOptionalSeparator(); err != nil {
return v, err
}
default:
p.back()
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
}
mv.Set(kv.MapKey(), vv)
return v, nil
default:
p.back()
return p.unmarshalSingularValue(v, fd)
}
}
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "" {
return v, p.errorf("unexpected EOF")
}
switch fd.Kind() {
case protoreflect.BoolKind:
switch tok.value {
case "true", "1", "t", "True":
return protoreflect.ValueOfBool(true), nil
case "false", "0", "f", "False":
return protoreflect.ValueOfBool(false), nil
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfInt32(int32(x)), nil
}
// The C++ parser accepts large positive hex numbers that uses
// two's complement arithmetic to represent negative numbers.
// This feature is here for backwards compatibility with C++.
if strings.HasPrefix(tok.value, "0x") {
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
}
}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfInt64(int64(x)), nil
}
// The C++ parser accepts large positive hex numbers that uses
// two's complement arithmetic to represent negative numbers.
// This feature is here for backwards compatibility with C++.
if strings.HasPrefix(tok.value, "0x") {
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
}
}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfUint32(uint32(x)), nil
}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfUint64(uint64(x)), nil
}
case protoreflect.FloatKind:
// Ignore 'f' for compatibility with output generated by C++,
// but don't remove 'f' when the value is "-inf" or "inf".
v := tok.value
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
v = v[:len(v)-len("f")]
}
if x, err := strconv.ParseFloat(v, 32); err == nil {
return protoreflect.ValueOfFloat32(float32(x)), nil
}
case protoreflect.DoubleKind:
// Ignore 'f' for compatibility with output generated by C++,
// but don't remove 'f' when the value is "-inf" or "inf".
v := tok.value
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
v = v[:len(v)-len("f")]
}
if x, err := strconv.ParseFloat(v, 64); err == nil {
return protoreflect.ValueOfFloat64(float64(x)), nil
}
case protoreflect.StringKind:
if isQuote(tok.value[0]) {
return protoreflect.ValueOfString(tok.unquoted), nil
}
case protoreflect.BytesKind:
if isQuote(tok.value[0]) {
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
}
case protoreflect.EnumKind:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
}
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
if vd != nil {
return protoreflect.ValueOfEnum(vd.Number()), nil
}
case protoreflect.MessageKind, protoreflect.GroupKind:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return v, p.errorf("expected '{' or '<', found %q", tok.value)
}
err := p.unmarshalMessage(v.Message(), terminator)
return v, err
default:
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
}
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
if fd.Message() == nil {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
// the following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in unmarshalMessage to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
var errBadUTF8 = errors.New("proto: bad UTF-8")
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
ss := string(r) + s[:2]
s = s[2:]
i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
case 'x', 'X', 'u', 'U':
var n int
switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
ss := s[:n]
s = s[n:]
i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(rune(i)), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}

View File

@ -1,560 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"bytes"
"encoding"
"fmt"
"io"
"math"
"sort"
"strings"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapTextMarshalV2 = false
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line)
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes the proto text format of m to w.
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
b, err := tm.marshal(m)
if len(b) > 0 {
if _, err := w.Write(b); err != nil {
return err
}
}
return err
}
// Text returns a proto text formatted string of m.
func (tm *TextMarshaler) Text(m Message) string {
b, _ := tm.marshal(m)
return string(b)
}
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return []byte("<nil>"), nil
}
if wrapTextMarshalV2 {
if m, ok := m.(encoding.TextMarshaler); ok {
return m.MarshalText()
}
opts := prototext.MarshalOptions{
AllowPartial: true,
EmitUnknown: true,
}
if !tm.Compact {
opts.Indent = " "
}
if !tm.ExpandAny {
opts.Resolver = (*protoregistry.Types)(nil)
}
return opts.Marshal(mr.Interface())
} else {
w := &textWriter{
compact: tm.Compact,
expandAny: tm.ExpandAny,
complete: true,
}
if m, ok := m.(encoding.TextMarshaler); ok {
b, err := m.MarshalText()
if err != nil {
return nil, err
}
w.Write(b)
return w.buf, nil
}
err := w.writeMessage(mr)
return w.buf, err
}
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// MarshalText writes the proto text format of m to w.
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
// MarshalTextString returns a proto text formatted string of m.
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
// CompactText writes the compact proto text format of m to w.
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
// CompactTextString returns a compact proto text formatted string of m.
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
var (
newline = []byte("\n")
endBraceNewline = []byte("}\n")
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
compact bool // same as TextMarshaler.Compact
expandAny bool // same as TextMarshaler.ExpandAny
complete bool // whether the current position is a complete line
indent int // indentation level; never negative
buf []byte
}
func (w *textWriter) Write(p []byte) (n int, _ error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
w.buf = append(w.buf, p...)
w.complete = false
return len(p), nil
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
w.buf = append(w.buf, ' ')
n++
}
w.buf = append(w.buf, frag...)
n += len(frag)
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
w.buf = append(w.buf, frag...)
n += len(frag)
if i+1 < len(frags) {
w.buf = append(w.buf, '\n')
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
w.buf = append(w.buf, c)
w.complete = c == '\n'
return nil
}
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
if fd.Kind() != protoreflect.GroupKind {
w.buf = append(w.buf, fd.Name()...)
w.WriteByte(':')
} else {
// Use message type name for group field name.
w.buf = append(w.buf, fd.Message().Name()...)
}
if !w.compact {
w.WriteByte(' ')
}
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
md := m.Descriptor()
fdURL := md.Fields().ByName("type_url")
fdVal := md.Fields().ByName("value")
url := m.Get(fdURL).String()
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
if err != nil {
return false, nil
}
b := m.Get(fdVal).Bytes()
m2 := mt.New()
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
return false, nil
}
w.Write([]byte("["))
if requiresQuotes(url) {
w.writeQuotedString(url)
} else {
w.Write([]byte(url))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.indent++
}
if err := w.writeMessage(m2); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.indent--
w.Write([]byte(">\n"))
}
return true, nil
}
func (w *textWriter) writeMessage(m protoreflect.Message) error {
md := m.Descriptor()
if w.expandAny && md.FullName() == "google.protobuf.Any" {
if canExpand, err := w.writeProto3Any(m); canExpand {
return err
}
}
fds := md.Fields()
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
} else {
i++
}
if fd == nil || !m.Has(fd) {
continue
}
switch {
case fd.IsList():
lv := m.Get(fd).List()
for j := 0; j < lv.Len(); j++ {
w.writeName(fd)
v := lv.Get(j)
if err := w.writeSingularValue(v, fd); err != nil {
return err
}
w.WriteByte('\n')
}
case fd.IsMap():
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := m.Get(fd).Map()
type entry struct{ key, val protoreflect.Value }
var entries []entry
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, entry{k.Value(), v})
return true
})
sort.Slice(entries, func(i, j int) bool {
switch kfd.Kind() {
case protoreflect.BoolKind:
return !entries[i].key.Bool() && entries[j].key.Bool()
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return entries[i].key.Int() < entries[j].key.Int()
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return entries[i].key.Uint() < entries[j].key.Uint()
case protoreflect.StringKind:
return entries[i].key.String() < entries[j].key.String()
default:
panic("invalid kind")
}
})
for _, entry := range entries {
w.writeName(fd)
w.WriteByte('<')
if !w.compact {
w.WriteByte('\n')
}
w.indent++
w.writeName(kfd)
if err := w.writeSingularValue(entry.key, kfd); err != nil {
return err
}
w.WriteByte('\n')
w.writeName(vfd)
if err := w.writeSingularValue(entry.val, vfd); err != nil {
return err
}
w.WriteByte('\n')
w.indent--
w.WriteByte('>')
w.WriteByte('\n')
}
default:
w.writeName(fd)
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
return err
}
w.WriteByte('\n')
}
}
if b := m.GetUnknown(); len(b) > 0 {
w.writeUnknownFields(b)
}
return w.writeExtensions(m)
}
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
switch fd.Kind() {
case protoreflect.FloatKind, protoreflect.DoubleKind:
switch vf := v.Float(); {
case math.IsInf(vf, +1):
w.Write(posInf)
case math.IsInf(vf, -1):
w.Write(negInf)
case math.IsNaN(vf):
w.Write(nan)
default:
fmt.Fprint(w, v.Interface())
}
case protoreflect.StringKind:
// NOTE: This does not validate UTF-8 for historical reasons.
w.writeQuotedString(string(v.String()))
case protoreflect.BytesKind:
w.writeQuotedString(string(v.Bytes()))
case protoreflect.MessageKind, protoreflect.GroupKind:
var bra, ket byte = '<', '>'
if fd.Kind() == protoreflect.GroupKind {
bra, ket = '{', '}'
}
w.WriteByte(bra)
if !w.compact {
w.WriteByte('\n')
}
w.indent++
m := v.Message()
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
b, err := m2.MarshalText()
if err != nil {
return err
}
w.Write(b)
} else {
w.writeMessage(m)
}
w.indent--
w.WriteByte(ket)
case protoreflect.EnumKind:
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
fmt.Fprint(w, ev.Name())
} else {
fmt.Fprint(w, v.Enum())
}
default:
fmt.Fprint(w, v.Interface())
}
return nil
}
// writeQuotedString writes a quoted string in the protocol buffer text format.
func (w *textWriter) writeQuotedString(s string) {
w.WriteByte('"')
for i := 0; i < len(s); i++ {
switch c := s[i]; c {
case '\n':
w.buf = append(w.buf, `\n`...)
case '\r':
w.buf = append(w.buf, `\r`...)
case '\t':
w.buf = append(w.buf, `\t`...)
case '"':
w.buf = append(w.buf, `\"`...)
case '\\':
w.buf = append(w.buf, `\\`...)
default:
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
w.buf = append(w.buf, c)
} else {
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
}
}
}
w.WriteByte('"')
}
func (w *textWriter) writeUnknownFields(b []byte) {
if !w.compact {
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
}
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
return
}
b = b[n:]
if wtyp == protowire.EndGroupType {
w.indent--
w.Write(endBraceNewline)
continue
}
fmt.Fprint(w, num)
if wtyp != protowire.StartGroupType {
w.WriteByte(':')
}
if !w.compact || wtyp == protowire.StartGroupType {
w.WriteByte(' ')
}
switch wtyp {
case protowire.VarintType:
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.Fixed32Type:
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.Fixed64Type:
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.BytesType:
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprintf(w, "%q", v)
case protowire.StartGroupType:
w.WriteByte('{')
w.indent++
default:
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
}
w.WriteByte('\n')
}
}
// writeExtensions writes all the extensions in m.
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
md := m.Descriptor()
if md.ExtensionRanges().Len() == 0 {
return nil
}
type ext struct {
desc protoreflect.FieldDescriptor
val protoreflect.Value
}
var exts []ext
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
exts = append(exts, ext{fd, v})
}
return true
})
sort.Slice(exts, func(i, j int) bool {
return exts[i].desc.Number() < exts[j].desc.Number()
})
for _, ext := range exts {
// For message set, use the name of the message as the extension name.
name := string(ext.desc.FullName())
if isMessageSet(ext.desc.ContainingMessage()) {
name = strings.TrimSuffix(name, ".message_set_extension")
}
if !ext.desc.IsList() {
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
return err
}
} else {
lv := ext.val.List()
for i := 0; i < lv.Len(); i++ {
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
return err
}
}
}
}
return nil
}
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
fmt.Fprintf(w, "[%s]:", name)
if !w.compact {
w.WriteByte(' ')
}
if err := w.writeSingularValue(v, fd); err != nil {
return err
}
w.WriteByte('\n')
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
for i := 0; i < w.indent*2; i++ {
w.buf = append(w.buf, ' ')
}
w.complete = false
}

View File

@ -1,78 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/runtime/protoiface"
)
// Size returns the size in bytes of the wire-format encoding of m.
func Size(m Message) int {
if m == nil {
return 0
}
mi := MessageV2(m)
return protoV2.Size(mi)
}
// Marshal returns the wire-format encoding of m.
func Marshal(m Message) ([]byte, error) {
b, err := marshalAppend(nil, m, false)
if b == nil {
b = zeroBytes
}
return b, err
}
var zeroBytes = make([]byte, 0, 0)
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
if m == nil {
return nil, ErrNil
}
mi := MessageV2(m)
nbuf, err := protoV2.MarshalOptions{
Deterministic: deterministic,
AllowPartial: true,
}.MarshalAppend(buf, mi)
if err != nil {
return buf, err
}
if len(buf) == len(nbuf) {
if !mi.ProtoReflect().IsValid() {
return buf, ErrNil
}
}
return nbuf, checkRequiredNotSet(mi)
}
// Unmarshal parses a wire-format message in b and places the decoded results in m.
//
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
// removed. Use UnmarshalMerge to preserve and append to existing data.
func Unmarshal(b []byte, m Message) error {
m.Reset()
return UnmarshalMerge(b, m)
}
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
func UnmarshalMerge(b []byte, m Message) error {
mi := MessageV2(m)
out, err := protoV2.UnmarshalOptions{
AllowPartial: true,
Merge: true,
}.UnmarshalState(protoiface.UnmarshalInput{
Buf: b,
Message: mi.ProtoReflect(),
})
if err != nil {
return err
}
if out.Flags&protoiface.UnmarshalInitialized > 0 {
return nil
}
return checkRequiredNotSet(mi)
}

View File

@ -1,34 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
// Bool stores v in a new bool value and returns a pointer to it.
func Bool(v bool) *bool { return &v }
// Int stores v in a new int32 value and returns a pointer to it.
//
// Deprecated: Use Int32 instead.
func Int(v int) *int32 { return Int32(int32(v)) }
// Int32 stores v in a new int32 value and returns a pointer to it.
func Int32(v int32) *int32 { return &v }
// Int64 stores v in a new int64 value and returns a pointer to it.
func Int64(v int64) *int64 { return &v }
// Uint32 stores v in a new uint32 value and returns a pointer to it.
func Uint32(v uint32) *uint32 { return &v }
// Uint64 stores v in a new uint64 value and returns a pointer to it.
func Uint64(v uint64) *uint64 { return &v }
// Float32 stores v in a new float32 value and returns a pointer to it.
func Float32(v float32) *float32 { return &v }
// Float64 stores v in a new float64 value and returns a pointer to it.
func Float64(v float64) *float64 { return &v }
// String stores v in a new string value and returns a pointer to it.
func String(v string) *string { return &v }

View File

@ -1,165 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"fmt"
"strings"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
anypb "github.com/golang/protobuf/ptypes/any"
)
const urlPrefix = "type.googleapis.com/"
// AnyMessageName returns the message name contained in an anypb.Any message.
// Most type assertions should use the Is function instead.
func AnyMessageName(any *anypb.Any) (string, error) {
name, err := anyMessageName(any)
return string(name), err
}
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
name := protoreflect.FullName(any.TypeUrl)
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
name = name[i+len("/"):]
}
if !name.IsValid() {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return name, nil
}
// MarshalAny marshals the given message m into an anypb.Any message.
func MarshalAny(m proto.Message) (*anypb.Any, error) {
switch dm := m.(type) {
case DynamicAny:
m = dm.Message
case *DynamicAny:
if dm == nil {
return nil, proto.ErrNil
}
m = dm.Message
}
b, err := proto.Marshal(m)
if err != nil {
return nil, err
}
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
}
// Empty returns a new message of the type specified in an anypb.Any message.
// It returns protoregistry.NotFound if the corresponding message type could not
// be resolved in the global registry.
func Empty(any *anypb.Any) (proto.Message, error) {
name, err := anyMessageName(any)
if err != nil {
return nil, err
}
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
if err != nil {
return nil, err
}
return proto.MessageV1(mt.New().Interface()), nil
}
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
// into the provided message m. It returns an error if the target message
// does not match the type in the Any message or if an unmarshal error occurs.
//
// The target message m may be a *DynamicAny message. If the underlying message
// type could not be resolved, then this returns protoregistry.NotFound.
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
if dm, ok := m.(*DynamicAny); ok {
if dm.Message == nil {
var err error
dm.Message, err = Empty(any)
if err != nil {
return err
}
}
m = dm.Message
}
anyName, err := AnyMessageName(any)
if err != nil {
return err
}
msgName := proto.MessageName(m)
if anyName != msgName {
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
}
return proto.Unmarshal(any.Value, m)
}
// Is reports whether the Any message contains a message of the specified type.
func Is(any *anypb.Any, m proto.Message) bool {
if any == nil || m == nil {
return false
}
name := proto.MessageName(m)
if !strings.HasSuffix(any.TypeUrl, name) {
return false
}
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in an anypb.Any message.
// The allocated message is stored in the embedded proto.Message.
//
// Example:
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct{ proto.Message }
func (m DynamicAny) String() string {
if m.Message == nil {
return "<nil>"
}
return m.Message.String()
}
func (m DynamicAny) Reset() {
if m.Message == nil {
return
}
m.Message.Reset()
}
func (m DynamicAny) ProtoMessage() {
return
}
func (m DynamicAny) ProtoReflect() protoreflect.Message {
if m.Message == nil {
return nil
}
return dynamicAny{proto.MessageReflect(m.Message)}
}
type dynamicAny struct{ protoreflect.Message }
func (m dynamicAny) Type() protoreflect.MessageType {
return dynamicAnyType{m.Message.Type()}
}
func (m dynamicAny) New() protoreflect.Message {
return dynamicAnyType{m.Message.Type()}.New()
}
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
return DynamicAny{proto.MessageV1(m.Message.Interface())}
}
type dynamicAnyType struct{ protoreflect.MessageType }
func (t dynamicAnyType) New() protoreflect.Message {
return dynamicAny{t.MessageType.New()}
}
func (t dynamicAnyType) Zero() protoreflect.Message {
return dynamicAny{t.MessageType.Zero()}
}

View File

@ -1,62 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
package any
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/any.proto.
type Any = anypb.Any
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
}

View File

@ -1,6 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ptypes provides functionality for interacting with well-known types.
package ptypes

View File

@ -1,72 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
durationpb "github.com/golang/protobuf/ptypes/duration"
)
// Range of google.protobuf.Duration as specified in duration.proto.
// This is about 10,000 years in seconds.
const (
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// Duration converts a durationpb.Duration to a time.Duration.
// Duration returns an error if dur is invalid or overflows a time.Duration.
func Duration(dur *durationpb.Duration) (time.Duration, error) {
if err := validateDuration(dur); err != nil {
return 0, err
}
d := time.Duration(dur.Seconds) * time.Second
if int64(d/time.Second) != dur.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
if dur.Nanos != 0 {
d += time.Duration(dur.Nanos) * time.Nanosecond
if (d < 0) != (dur.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durationpb.Duration.
func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durationpb.Duration{
Seconds: int64(secs),
Nanos: int32(nanos),
}
}
// validateDuration determines whether the durationpb.Duration is valid
// according to the definition in google/protobuf/duration.proto.
// A valid durpb.Duration may still be too large to fit into a time.Duration
// Note that the range of durationpb.Duration is about 10,000 years,
// while the range of time.Duration is about 290 years.
func validateDuration(dur *durationpb.Duration) error {
if dur == nil {
return errors.New("duration: nil Duration")
}
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", dur)
}
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", dur)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
}
return nil
}

View File

@ -1,63 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
package duration
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/duration.proto.
type Duration = durationpb.Duration
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
}

View File

@ -1,103 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return
// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
ts := &timestamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps.
// For invalid Timestamps, it returns an error message in parentheses.
func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
// and has a Nanos field in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes the problem.
//
// Every valid Timestamp can be represented by a time.Time,
// but the converse is not true.
func validateTimestamp(ts *timestamppb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}

View File

@ -1,64 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
package timestamp
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/timestamp.proto.
type Timestamp = timestamppb.Timestamp
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
}

View File

@ -1,354 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor’s Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party’s
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients’ rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients’
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
party’s negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a party’s ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View File

@ -1,89 +0,0 @@
# errwrap
`errwrap` is a package for Go that formalizes the pattern of wrapping errors
and checking if an error contains another error.
There is a common pattern in Go of taking a returned `error` value and
then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
with this pattern is that you completely lose the original `error` structure.
Arguably the _correct_ approach is that you should make a custom structure
implementing the `error` interface, and have the original error as a field
on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
This is a good approach, but you have to know the entire chain of possible
rewrapping that happens, when you might just care about one.
`errwrap` formalizes this pattern (it doesn't matter what approach you use
above) by giving a single interface for wrapping errors, checking if a specific
error is wrapped, and extracting that error.
## Installation and Docs
Install using `go get github.com/hashicorp/errwrap`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/errwrap
## Usage
#### Basic Usage
Below is a very basic example of its usage:
```go
// A function that always returns an error, but wraps it, like a real
// function might.
func tryOpen() error {
_, err := os.Open("/i/dont/exist")
if err != nil {
return errwrap.Wrapf("Doesn't exist: {{err}}", err)
}
return nil
}
func main() {
err := tryOpen()
// We can use the Contains helpers to check if an error contains
// another error. It is safe to do this with a nil error, or with
// an error that doesn't even use the errwrap package.
if errwrap.Contains(err, "does not exist") {
// Do something
}
if errwrap.ContainsType(err, new(os.PathError)) {
// Do something
}
// Or we can use the associated `Get` functions to just extract
// a specific error. This would return nil if that specific error doesn't
// exist.
perr := errwrap.GetType(err, new(os.PathError))
}
```
#### Custom Types
If you're already making custom types that properly wrap errors, then
you can get all the functionality of `errwraps.Contains` and such by
implementing the `Wrapper` interface with just one function. Example:
```go
type AppError {
Code ErrorCode
Err error
}
func (e *AppError) WrappedErrors() []error {
return []error{e.Err}
}
```
Now this works:
```go
err := &AppError{Err: fmt.Errorf("an error")}
if errwrap.ContainsType(err, fmt.Errorf("")) {
// This will work!
}
```

View File

@ -1,169 +0,0 @@
// Package errwrap implements methods to formalize error wrapping in Go.
//
// All of the top-level functions that take an `error` are built to be able
// to take any error, not just wrapped errors. This allows you to use errwrap
// without having to type-check and type-cast everywhere.
package errwrap
import (
"errors"
"reflect"
"strings"
)
// WalkFunc is the callback called for Walk.
type WalkFunc func(error)
// Wrapper is an interface that can be implemented by custom types to
// have all the Contains, Get, etc. functions in errwrap work.
//
// When Walk reaches a Wrapper, it will call the callback for every
// wrapped error in addition to the wrapper itself. Since all the top-level
// functions in errwrap use Walk, this means that all those functions work
// with your custom type.
type Wrapper interface {
WrappedErrors() []error
}
// Wrap defines that outer wraps inner, returning an error type that
// can be cleanly used with the other methods in this package, such as
// Contains, GetAll, etc.
//
// This function won't modify the error message at all (the outer message
// will be used).
func Wrap(outer, inner error) error {
return &wrappedError{
Outer: outer,
Inner: inner,
}
}
// Wrapf wraps an error with a formatting message. This is similar to using
// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
// errors, you should replace it with this.
//
// format is the format of the error message. The string '{{err}}' will
// be replaced with the original error message.
func Wrapf(format string, err error) error {
outerMsg := "<nil>"
if err != nil {
outerMsg = err.Error()
}
outer := errors.New(strings.Replace(
format, "{{err}}", outerMsg, -1))
return Wrap(outer, err)
}
// Contains checks if the given error contains an error with the
// message msg. If err is not a wrapped error, this will always return
// false unless the error itself happens to match this msg.
func Contains(err error, msg string) bool {
return len(GetAll(err, msg)) > 0
}
// ContainsType checks if the given error contains an error with
// the same concrete type as v. If err is not a wrapped error, this will
// check the err itself.
func ContainsType(err error, v interface{}) bool {
return len(GetAllType(err, v)) > 0
}
// Get is the same as GetAll but returns the deepest matching error.
func Get(err error, msg string) error {
es := GetAll(err, msg)
if len(es) > 0 {
return es[len(es)-1]
}
return nil
}
// GetType is the same as GetAllType but returns the deepest matching error.
func GetType(err error, v interface{}) error {
es := GetAllType(err, v)
if len(es) > 0 {
return es[len(es)-1]
}
return nil
}
// GetAll gets all the errors that might be wrapped in err with the
// given message. The order of the errors is such that the outermost
// matching error (the most recent wrap) is index zero, and so on.
func GetAll(err error, msg string) []error {
var result []error
Walk(err, func(err error) {
if err.Error() == msg {
result = append(result, err)
}
})
return result
}
// GetAllType gets all the errors that are the same type as v.
//
// The order of the return value is the same as described in GetAll.
func GetAllType(err error, v interface{}) []error {
var result []error
var search string
if v != nil {
search = reflect.TypeOf(v).String()
}
Walk(err, func(err error) {
var needle string
if err != nil {
needle = reflect.TypeOf(err).String()
}
if needle == search {
result = append(result, err)
}
})
return result
}
// Walk walks all the wrapped errors in err and calls the callback. If
// err isn't a wrapped error, this will be called once for err. If err
// is a wrapped error, the callback will be called for both the wrapper
// that implements error as well as the wrapped error itself.
func Walk(err error, cb WalkFunc) {
if err == nil {
return
}
switch e := err.(type) {
case *wrappedError:
cb(e.Outer)
Walk(e.Inner, cb)
case Wrapper:
cb(err)
for _, err := range e.WrappedErrors() {
Walk(err, cb)
}
default:
cb(err)
}
}
// wrappedError is an implementation of error that has both the
// outer and inner errors.
type wrappedError struct {
Outer error
Inner error
}
func (w *wrappedError) Error() string {
return w.Outer.Error()
}
func (w *wrappedError) WrappedErrors() []error {
return []error{w.Outer, w.Inner}
}

View File

@ -1,12 +0,0 @@
sudo: false
language: go
go:
- 1.x
branches:
only:
- master
script: env GO111MODULE=on make test testrace

View File

@ -1,353 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor’s Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party’s
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients’ rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients’
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
party’s negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a party’s ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View File

@ -1,31 +0,0 @@
TEST?=./...
default: test
# test runs the test suite and vets the code.
test: generate
@echo "==> Running tests..."
@go list $(TEST) \
| grep -v "/vendor/" \
| xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
# testrace runs the race checker
testrace: generate
@echo "==> Running tests (race)..."
@go list $(TEST) \
| grep -v "/vendor/" \
| xargs -n1 go test -timeout=60s -race ${TESTARGS}
# updatedeps installs all the dependencies needed to run and build.
updatedeps:
@sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
# generate runs `go generate` to build the dynamically generated source files.
generate:
@echo "==> Generating..."
@find . -type f -name '.DS_Store' -delete
@go list ./... \
| grep -v "/vendor/" \
| xargs -n1 go generate
.PHONY: default test testrace updatedeps generate

View File

@ -1,131 +0,0 @@
# go-multierror
[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis]
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
[travis]: https://travis-ci.org/hashicorp/go-multierror
[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
`go-multierror` is a package for Go that provides a mechanism for
representing a list of `error` values as a single `error`.
This allows a function in Go to return an `error` that might actually
be a list of errors. If the caller knows this, they can unwrap the
list and access the errors. If the caller doesn't know, the error
formats to a nice human-readable format.
`go-multierror` is fully compatible with the Go standard library
[errors](https://golang.org/pkg/errors/) package, including the
functions `As`, `Is`, and `Unwrap`. This provides a standardized approach
for introspecting on error values.
## Installation and Docs
Install using `go get github.com/hashicorp/go-multierror`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/go-multierror
## Usage
go-multierror is easy to use and purposely built to be unobtrusive in
existing Go applications/libraries that may not be aware of it.
**Building a list of errors**
The `Append` function is used to create a list of errors. This function
behaves a lot like the Go built-in `append` function: it doesn't matter
if the first argument is nil, a `multierror.Error`, or any other `error`,
the function behaves as you would expect.
```go
var result error
if err := step1(); err != nil {
result = multierror.Append(result, err)
}
if err := step2(); err != nil {
result = multierror.Append(result, err)
}
return result
```
**Customizing the formatting of the errors**
By specifying a custom `ErrorFormat`, you can customize the format
of the `Error() string` function:
```go
var result *multierror.Error
// ... accumulate errors here, maybe using Append
if result != nil {
result.ErrorFormat = func([]error) string {
return "errors!"
}
}
```
**Accessing the list of errors**
`multierror.Error` implements `error` so if the caller doesn't know about
multierror, it will work just fine. But if you're aware a multierror might
be returned, you can use type switches to access the list of errors:
```go
if err := something(); err != nil {
if merr, ok := err.(*multierror.Error); ok {
// Use merr.Errors
}
}
```
You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap)
function. This will continue to unwrap into subsequent errors until none exist.
**Extracting an error**
The standard library [`errors.As`](https://golang.org/pkg/errors/#As)
function can be used directly with a multierror to extract a specific error:
```go
// Assume err is a multierror value
err := somefunc()
// We want to know if "err" has a "RichErrorType" in it and extract it.
var errRich RichErrorType
if errors.As(err, &errRich) {
// It has it, and now errRich is populated.
}
```
**Checking for an exact error value**
Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables)
error in the `os` package. You can check if this error is present by using
the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function.
```go
// Assume err is a multierror value
err := somefunc()
if errors.Is(err, os.ErrNotExist) {
// err contains os.ErrNotExist
}
```
**Returning a multierror only if there are errors**
If you build a `multierror.Error`, you can use the `ErrorOrNil` function
to return an `error` implementation only if there are errors to return:
```go
var result *multierror.Error
// ... accumulate errors here
// Return the `error` only if errors were added to the multierror, otherwise
// return nil since there are no errors.
return result.ErrorOrNil()
```

View File

@ -1,41 +0,0 @@
package multierror
// Append is a helper function that will append more errors
// onto an Error in order to create a larger multi-error.
//
// If err is not a multierror.Error, then it will be turned into
// one. If any of the errs are multierr.Error, they will be flattened
// one level into err.
func Append(err error, errs ...error) *Error {
switch err := err.(type) {
case *Error:
// Typed nils can reach here, so initialize if we are nil
if err == nil {
err = new(Error)
}
// Go through each error and flatten
for _, e := range errs {
switch e := e.(type) {
case *Error:
if e != nil {
err.Errors = append(err.Errors, e.Errors...)
}
default:
if e != nil {
err.Errors = append(err.Errors, e)
}
}
}
return err
default:
newErrs := make([]error, 0, len(errs)+1)
if err != nil {
newErrs = append(newErrs, err)
}
newErrs = append(newErrs, errs...)
return Append(&Error{}, newErrs...)
}
}

View File

@ -1,26 +0,0 @@
package multierror
// Flatten flattens the given error, merging any *Errors together into
// a single *Error.
func Flatten(err error) error {
// If it isn't an *Error, just return the error as-is
if _, ok := err.(*Error); !ok {
return err
}
// Otherwise, make the result and flatten away!
flatErr := new(Error)
flatten(err, flatErr)
return flatErr
}
func flatten(err error, flatErr *Error) {
switch err := err.(type) {
case *Error:
for _, e := range err.Errors {
flatten(e, flatErr)
}
default:
flatErr.Errors = append(flatErr.Errors, err)
}
}

View File

@ -1,27 +0,0 @@
package multierror
import (
"fmt"
"strings"
)
// ErrorFormatFunc is a function callback that is called by Error to
// turn the list of errors into a string.
type ErrorFormatFunc func([]error) string
// ListFormatFunc is a basic formatter that outputs the number of errors
// that occurred along with a bullet point list of the errors.
func ListFormatFunc(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %s", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}

View File

@ -1,38 +0,0 @@
package multierror
import "sync"
// Group is a collection of goroutines which return errors that need to be
// coalesced.
type Group struct {
mutex sync.Mutex
err *Error
wg sync.WaitGroup
}
// Go calls the given function in a new goroutine.
//
// If the function returns an error it is added to the group multierror which
// is returned by Wait.
func (g *Group) Go(f func() error) {
g.wg.Add(1)
go func() {
defer g.wg.Done()
if err := f(); err != nil {
g.mutex.Lock()
g.err = Append(g.err, err)
g.mutex.Unlock()
}
}()
}
// Wait blocks until all function calls from the Go method have returned, then
// returns the multierror.
func (g *Group) Wait() *Error {
g.wg.Wait()
g.mutex.Lock()
defer g.mutex.Unlock()
return g.err
}

View File

@ -1,118 +0,0 @@
package multierror
import (
"errors"
"fmt"
)
// Error is an error type to track multiple errors. This is used to
// accumulate errors in cases and return them as a single "error".
type Error struct {
Errors []error
ErrorFormat ErrorFormatFunc
}
func (e *Error) Error() string {
fn := e.ErrorFormat
if fn == nil {
fn = ListFormatFunc
}
return fn(e.Errors)
}
// ErrorOrNil returns an error interface if this Error represents
// a list of errors, or returns nil if the list of errors is empty. This
// function is useful at the end of accumulation to make sure that the value
// returned represents the existence of errors.
func (e *Error) ErrorOrNil() error {
if e == nil {
return nil
}
if len(e.Errors) == 0 {
return nil
}
return e
}
func (e *Error) GoString() string {
return fmt.Sprintf("*%#v", *e)
}
// WrappedErrors returns the list of errors that this Error is wrapping.
// It is an implementation of the errwrap.Wrapper interface so that
// multierror.Error can be used with that library.
//
// This method is not safe to be called concurrently and is no different
// than accessing the Errors field directly. It is implemented only to
// satisfy the errwrap.Wrapper interface.
func (e *Error) WrappedErrors() []error {
return e.Errors
}
// Unwrap returns an error from Error (or nil if there are no errors).
// This error returned will further support Unwrap to get the next error,
// etc. The order will match the order of Errors in the multierror.Error
// at the time of calling.
//
// The resulting error supports errors.As/Is/Unwrap so you can continue
// to use the stdlib errors package to introspect further.
//
// This will perform a shallow copy of the errors slice. Any errors appended
// to this error after calling Unwrap will not be available until a new
// Unwrap is called on the multierror.Error.
func (e *Error) Unwrap() error {
// If we have no errors then we do nothing
if e == nil || len(e.Errors) == 0 {
return nil
}
// If we have exactly one error, we can just return that directly.
if len(e.Errors) == 1 {
return e.Errors[0]
}
// Shallow copy the slice
errs := make([]error, len(e.Errors))
copy(errs, e.Errors)
return chain(errs)
}
// chain implements the interfaces necessary for errors.Is/As/Unwrap to
// work in a deterministic way with multierror. A chain tracks a list of
// errors while accounting for the current represented error. This lets
// Is/As be meaningful.
//
// Unwrap returns the next error. In the cleanest form, Unwrap would return
// the wrapped error here but we can't do that if we want to properly
// get access to all the errors. Instead, users are recommended to use
// Is/As to get the correct error type out.
//
// Precondition: []error is non-empty (len > 0)
type chain []error
// Error implements the error interface
func (e chain) Error() string {
return e[0].Error()
}
// Unwrap implements errors.Unwrap by returning the next error in the
// chain or nil if there are no more errors.
func (e chain) Unwrap() error {
if len(e) == 1 {
return nil
}
return e[1:]
}
// As implements errors.As by attempting to map to the current value.
func (e chain) As(target interface{}) bool {
return errors.As(e[0], target)
}
// Is implements errors.Is by comparing the current value directly.
func (e chain) Is(target error) bool {
return errors.Is(e[0], target)
}

View File

@ -1,37 +0,0 @@
package multierror
import (
"fmt"
"github.com/hashicorp/errwrap"
)
// Prefix is a helper function that will prefix some text
// to the given error. If the error is a multierror.Error, then
// it will be prefixed to each wrapped error.
//
// This is useful to use when appending multiple multierrors
// together in order to give better scoping.
func Prefix(err error, prefix string) error {
if err == nil {
return nil
}
format := fmt.Sprintf("%s {{err}}", prefix)
switch err := err.(type) {
case *Error:
// Typed nils can reach here, so initialize if we are nil
if err == nil {
err = new(Error)
}
// Wrap each of the errors
for i, e := range err.Errors {
err.Errors[i] = errwrap.Wrapf(format, e)
}
return err
default:
return errwrap.Wrapf(format, err)
}
}

View File

@ -1,16 +0,0 @@
package multierror
// Len implements sort.Interface function for length
func (err Error) Len() int {
return len(err.Errors)
}
// Swap implements sort.Interface function for swapping elements
func (err Error) Swap(i, j int) {
err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
}
// Less implements sort.Interface function for determining order
func (err Error) Less(i, j int) bool {
return err.Errors[i].Error() < err.Errors[j].Error()
}

View File

@ -1,106 +0,0 @@
# Change Log
## [v3.0.0](https://github.com/k0kubun/pp/tree/v3.0.0) (2019-03-04)
[Full Changelog](https://github.com/k0kubun/pp/compare/v2.4.0...v3.0.0)
## [v2.4.0](https://github.com/k0kubun/pp/tree/v2.4.0) (2019-03-03)
[Full Changelog](https://github.com/k0kubun/pp/compare/v2.3.0...v2.4.0)
**Merged pull requests:**
- Fix newline of map type [\#29](https://github.com/k0kubun/pp/pull/29) ([itchyny](https://github.com/itchyny))
- add MIT license file [\#28](https://github.com/k0kubun/pp/pull/28) ([alteholz](https://github.com/alteholz))
- Update the map printer to properly print maps. [\#25](https://github.com/k0kubun/pp/pull/25) ([denniszl](https://github.com/denniszl))
## [v2.3.0](https://github.com/k0kubun/pp/tree/v2.3.0) (2017-01-23)
[Full Changelog](https://github.com/k0kubun/pp/compare/v2.2.0...v2.3.0)
**Merged pull requests:**
- Add WithLineInfo method for print filename and line number along [\#24](https://github.com/k0kubun/pp/pull/24) ([huydx](https://github.com/huydx))
## [v2.2.0](https://github.com/k0kubun/pp/tree/v2.2.0) (2015-07-23)
[Full Changelog](https://github.com/k0kubun/pp/compare/v2.1.0...v2.2.0)
**Closed issues:**
- please do not use unsafe package [\#20](https://github.com/k0kubun/pp/issues/20)
**Merged pull requests:**
- check whether reflect.Value can call `Interface\(\)` [\#19](https://github.com/k0kubun/pp/pull/19) ([skatsuta](https://github.com/skatsuta))
- Fix indent for slices [\#18](https://github.com/k0kubun/pp/pull/18) ([sdidyk](https://github.com/sdidyk))
## [v2.1.0](https://github.com/k0kubun/pp/tree/v2.1.0) (2015-04-25)
[Full Changelog](https://github.com/k0kubun/pp/compare/v2.0.1...v2.1.0)
**Merged pull requests:**
- Custom colors [\#17](https://github.com/k0kubun/pp/pull/17) ([sdidyk](https://github.com/sdidyk))
- Some changes of printer [\#16](https://github.com/k0kubun/pp/pull/16) ([sdidyk](https://github.com/sdidyk))
- Suppress panic caused by Float values [\#15](https://github.com/k0kubun/pp/pull/15) ([yudai](https://github.com/yudai))
## [v2.0.1](https://github.com/k0kubun/pp/tree/v2.0.1) (2015-03-01)
[Full Changelog](https://github.com/k0kubun/pp/compare/v2.0.0...v2.0.1)
**Merged pull requests:**
- escape sequences to pipe [\#13](https://github.com/k0kubun/pp/pull/13) ([mattn](https://github.com/mattn))
## [v2.0.0](https://github.com/k0kubun/pp/tree/v2.0.0) (2015-02-14)
[Full Changelog](https://github.com/k0kubun/pp/compare/v1.3.0...v2.0.0)
**Closed issues:**
- Fold large buffers [\#8](https://github.com/k0kubun/pp/issues/8)
**Merged pull requests:**
- Fold a large buffer [\#12](https://github.com/k0kubun/pp/pull/12) ([k0kubun](https://github.com/k0kubun))
## [v1.3.0](https://github.com/k0kubun/pp/tree/v1.3.0) (2015-02-14)
[Full Changelog](https://github.com/k0kubun/pp/compare/v1.2.0...v1.3.0)
**Closed issues:**
- time.Time formatter [\#2](https://github.com/k0kubun/pp/issues/2)
**Merged pull requests:**
- Implement time.Time pretty printer [\#11](https://github.com/k0kubun/pp/pull/11) ([k0kubun](https://github.com/k0kubun))
## [v1.2.0](https://github.com/k0kubun/pp/tree/v1.2.0) (2015-02-14)
[Full Changelog](https://github.com/k0kubun/pp/compare/v1.1.0...v1.2.0)
**Merged pull requests:**
- Color escaped characters inside strings [\#10](https://github.com/k0kubun/pp/pull/10) ([motemen](https://github.com/motemen))
## [v1.1.0](https://github.com/k0kubun/pp/tree/v1.1.0) (2015-02-14)
[Full Changelog](https://github.com/k0kubun/pp/compare/v1.0.0...v1.1.0)
**Merged pull requests:**
- Handle circular structures [\#9](https://github.com/k0kubun/pp/pull/9) ([motemen](https://github.com/motemen))
## [v1.0.0](https://github.com/k0kubun/pp/tree/v1.0.0) (2015-01-09)
[Full Changelog](https://github.com/k0kubun/pp/compare/v0.0.1...v1.0.0)
**Closed issues:**
- test failed if Golang over 1.4 [\#5](https://github.com/k0kubun/pp/issues/5)
**Merged pull requests:**
- remove unused struct. [\#7](https://github.com/k0kubun/pp/pull/7) ([walf443](https://github.com/walf443))
- customizable Print\* functions output [\#6](https://github.com/k0kubun/pp/pull/6) ([walf443](https://github.com/walf443))
## [v0.0.1](https://github.com/k0kubun/pp/tree/v0.0.1) (2014-12-29)
**Merged pull requests:**
- fix: `Fprintln` infinite loop bug. [\#3](https://github.com/k0kubun/pp/pull/3) ([kyokomi](https://github.com/kyokomi))
- Support windows [\#1](https://github.com/k0kubun/pp/pull/1) ([k0kubun](https://github.com/k0kubun))
\* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)*

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Takashi Kokubun
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,100 +0,0 @@
# pp [![wercker status](https://app.wercker.com/status/fc5308fe78e92594f7ea09b67a486caf/s/master "wercker status")](https://app.wercker.com/project/byKey/fc5308fe78e92594f7ea09b67a486caf)
Colored pretty printer for Go language
![](http://i.gyazo.com/d3253ae839913b7239a7229caa4af551.png)
## Usage
Just call `pp.Print()`.
```go
import "github.com/k0kubun/pp"
m := map[string]string{"foo": "bar", "hello": "world"}
pp.Print(m)
```
![](http://i.gyazo.com/0d08376ed2656257627f79626d5e0cde.png)
### API
fmt package-like functions are provided.
```go
pp.Print()
pp.Println()
pp.Sprint()
pp.Fprintf()
// ...
```
API doc is available at: http://godoc.org/github.com/k0kubun/pp
### Custom colors
If you require, you may change the colors (all or some) for syntax highlighting:
```go
// Create a struct describing your scheme
scheme := pp.ColorScheme{
Integer: pp.Green | pp.Bold,
Float: pp.Black | pp.BackgroundWhite | pp.Bold,
String: pp.Yellow,
}
// Register it for usage
pp.SetColorScheme(scheme)
```
Look into ColorScheme struct for the field names.
If you would like to revert to the default highlighting, you may do so by calling `pp.ResetColorScheme()`.
Out of the following color flags, you may combine any color with a background color and optionally with the bold parameter. Please note that bold will likely not work on the windows platform.
```go
// Colors
Black
Red
Green
Yellow
Blue
Magenta
Cyan
White
// Background colors
BackgroundBlack
BackgroundRed
BackgroundGreen
BackgroundYellow
BackgroundBlue
BackgroundMagenta
BackgroundCyan
BackgroundWhite
// Other
Bold
// Special
NoColor
```
## Demo
### Timeline
![](http://i.gyazo.com/a8adaeec965db943486e35083cf707f2.png)
### UserStream event
![](http://i.gyazo.com/1e88915b3a6a9129f69fb5d961c4f079.png)
### Works on windows
![](http://i.gyazo.com/ab791997a980f1ab3ee2a01586efdce6.png)
## License
MIT License

129
vendor/github.com/k0kubun/pp/color.go generated vendored
View File

@ -1,129 +0,0 @@
package pp
import (
"fmt"
"reflect"
)
const (
// No color
NoColor uint16 = 1 << 15
)
const (
// Foreground colors for ColorScheme.
_ uint16 = iota | NoColor
Black
Red
Green
Yellow
Blue
Magenta
Cyan
White
bitsForeground = 0
maskForegorund = 0xf
ansiForegroundOffset = 30 - 1
)
const (
// Background colors for ColorScheme.
_ uint16 = iota<<bitsBackground | NoColor
BackgroundBlack
BackgroundRed
BackgroundGreen
BackgroundYellow
BackgroundBlue
BackgroundMagenta
BackgroundCyan
BackgroundWhite
bitsBackground = 4
maskBackground = 0xf << bitsBackground
ansiBackgroundOffset = 40 - 1
)
const (
// Bold flag for ColorScheme.
Bold uint16 = 1<<bitsBold | NoColor
bitsBold = 8
maskBold = 1 << bitsBold
ansiBold = 1
)
// To use with SetColorScheme.
type ColorScheme struct {
Bool uint16
Integer uint16
Float uint16
String uint16
StringQuotation uint16
EscapedChar uint16
FieldName uint16
PointerAdress uint16
Nil uint16
Time uint16
StructName uint16
ObjectLength uint16
}
var (
// If you set false to this variable, you can use pretty formatter
// without coloring.
ColoringEnabled = true
defaultScheme = ColorScheme{
Bool: Cyan | Bold,
Integer: Blue | Bold,
Float: Magenta | Bold,
String: Red,
StringQuotation: Red | Bold,
EscapedChar: Magenta | Bold,
FieldName: Yellow,
PointerAdress: Blue | Bold,
Nil: Cyan | Bold,
Time: Blue | Bold,
StructName: Green,
ObjectLength: Blue,
}
)
func (cs *ColorScheme) fixColors() {
typ := reflect.Indirect(reflect.ValueOf(cs))
defaultType := reflect.ValueOf(defaultScheme)
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
if field.Uint() == 0 {
field.SetUint(defaultType.Field(i).Uint())
}
}
}
func colorize(text string, color uint16) string {
if !ColoringEnabled {
return text
}
foreground := color & maskForegorund >> bitsForeground
background := color & maskBackground >> bitsBackground
bold := color & maskBold
if foreground == 0 && background == 0 && bold == 0 {
return text
}
modBold := ""
modForeground := ""
modBackground := ""
if bold > 0 {
modBold = "\033[1m"
}
if foreground > 0 {
modForeground = fmt.Sprintf("\033[%dm", foreground+ansiForegroundOffset)
}
if background > 0 {
modBackground = fmt.Sprintf("\033[%dm", background+ansiBackgroundOffset)
}
return fmt.Sprintf("%s%s%s%s\033[0m", modForeground, modBackground, modBold, text)
}

148
vendor/github.com/k0kubun/pp/pp.go generated vendored
View File

@ -1,148 +0,0 @@
package pp
import (
"errors"
"fmt"
"io"
"os"
"runtime"
"sync"
"github.com/mattn/go-colorable"
)
var (
out io.Writer
outLock sync.Mutex
defaultOut = colorable.NewColorableStdout()
currentScheme ColorScheme
// WithLineInfo add file name and line information to output
// call this function with care, because getting stack has performance penalty
WithLineInfo = false
)
func init() {
out = defaultOut
currentScheme = defaultScheme
}
// Print prints given arguments.
func Print(a ...interface{}) (n int, err error) {
return fmt.Fprint(out, formatAll(a)...)
}
// Printf prints a given format.
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(out, format, formatAll(a)...)
}
// Println prints given arguments with newline.
func Println(a ...interface{}) (n int, err error) {
return fmt.Fprintln(out, formatAll(a)...)
}
// Sprint formats given arguemnts and returns the result as string.
func Sprint(a ...interface{}) string {
return fmt.Sprint(formatAll(a)...)
}
// Sprintf formats with pretty print and returns the result as string.
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, formatAll(a)...)
}
// Sprintln formats given arguemnts with newline and returns the result as string.
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(formatAll(a)...)
}
// Fprint prints given arguments to a given writer.
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, formatAll(a)...)
}
// Fprintf prints format to a given writer.
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, formatAll(a)...)
}
// Fprintln prints given arguments to a given writer with newline.
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, formatAll(a)...)
}
// Errorf formats given arguments and returns it as error type.
func Errorf(format string, a ...interface{}) error {
return errors.New(Sprintf(format, a...))
}
// Fatal prints given arguments and finishes execution with exit status 1.
func Fatal(a ...interface{}) {
fmt.Fprint(out, formatAll(a)...)
os.Exit(1)
}
// Fatalf prints a given format and finishes execution with exit status 1.
func Fatalf(format string, a ...interface{}) {
fmt.Fprintf(out, format, formatAll(a)...)
os.Exit(1)
}
// Fatalln prints given arguments with newline and finishes execution with exit status 1.
func Fatalln(a ...interface{}) {
fmt.Fprintln(out, formatAll(a)...)
os.Exit(1)
}
// Change Print* functions' output to a given writer.
// For example, you can limit output by ENV.
//
// func init() {
// if os.Getenv("DEBUG") == "" {
// pp.SetDefaultOutput(ioutil.Discard)
// }
// }
func SetDefaultOutput(o io.Writer) {
outLock.Lock()
out = o
outLock.Unlock()
}
// GetDefaultOutput returns pp's default output.
func GetDefaultOutput() io.Writer {
return out
}
// Change Print* functions' output to default one.
func ResetDefaultOutput() {
outLock.Lock()
out = defaultOut
outLock.Unlock()
}
// SetColorScheme takes a colorscheme used by all future Print calls.
func SetColorScheme(scheme ColorScheme) {
scheme.fixColors()
currentScheme = scheme
}
// ResetColorScheme resets colorscheme to default.
func ResetColorScheme() {
currentScheme = defaultScheme
}
func formatAll(objects []interface{}) []interface{} {
results := []interface{}{}
if WithLineInfo {
_, fn, line, _ := runtime.Caller(2) // 2 because current Caller is pp itself
results = append(results, fmt.Sprintf("%s:%d\n", fn, line))
}
for _, object := range objects {
results = append(results, format(object))
}
return results
}

View File

@ -1,389 +0,0 @@
package pp
import (
"bytes"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"text/tabwriter"
"time"
)
const (
indentWidth = 2
)
var (
// If the length of array or slice is larger than this,
// the buffer will be shorten as {...}.
BufferFoldThreshold = 1024
// PrintMapTypes when set to true will have map types will always appended to maps.
PrintMapTypes = true
)
func format(object interface{}) string {
return newPrinter(object).String()
}
func newPrinter(object interface{}) *printer {
buffer := bytes.NewBufferString("")
tw := new(tabwriter.Writer)
tw.Init(buffer, indentWidth, 0, 1, ' ', 0)
return &printer{
Buffer: buffer,
tw: tw,
depth: 0,
value: reflect.ValueOf(object),
visited: map[uintptr]bool{},
}
}
type printer struct {
*bytes.Buffer
tw *tabwriter.Writer
depth int
value reflect.Value
visited map[uintptr]bool
}
func (p *printer) String() string {
switch p.value.Kind() {
case reflect.Bool:
p.colorPrint(p.raw(), currentScheme.Bool)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Uintptr, reflect.Complex64, reflect.Complex128:
p.colorPrint(p.raw(), currentScheme.Integer)
case reflect.Float32, reflect.Float64:
p.colorPrint(p.raw(), currentScheme.Float)
case reflect.String:
p.printString()
case reflect.Map:
p.printMap()
case reflect.Struct:
p.printStruct()
case reflect.Array, reflect.Slice:
p.printSlice()
case reflect.Chan:
p.printf("(%s)(%s)", p.typeString(), p.pointerAddr())
case reflect.Interface:
p.printInterface()
case reflect.Ptr:
p.printPtr()
case reflect.Func:
p.printf("%s {...}", p.typeString())
case reflect.UnsafePointer:
p.printf("%s(%s)", p.typeString(), p.pointerAddr())
case reflect.Invalid:
p.print(p.nil())
default:
p.print(p.raw())
}
p.tw.Flush()
return p.Buffer.String()
}
func (p *printer) print(text string) {
fmt.Fprint(p.tw, text)
}
func (p *printer) printf(format string, args ...interface{}) {
text := fmt.Sprintf(format, args...)
p.print(text)
}
func (p *printer) println(text string) {
p.print(text + "\n")
}
func (p *printer) indentPrint(text string) {
p.print(p.indent() + text)
}
func (p *printer) indentPrintf(format string, args ...interface{}) {
text := fmt.Sprintf(format, args...)
p.indentPrint(text)
}
func (p *printer) colorPrint(text string, color uint16) {
p.print(colorize(text, color))
}
func (p *printer) printString() {
quoted := strconv.Quote(p.value.String())
quoted = quoted[1 : len(quoted)-1]
p.colorPrint(`"`, currentScheme.StringQuotation)
for len(quoted) > 0 {
pos := strings.IndexByte(quoted, '\\')
if pos == -1 {
p.colorPrint(quoted, currentScheme.String)
break
}
if pos != 0 {
p.colorPrint(quoted[0:pos], currentScheme.String)
}
n := 1
switch quoted[pos+1] {
case 'x': // "\x00"
n = 3
case 'u': // "\u0000"
n = 5
case 'U': // "\U00000000"
n = 9
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': // "\000"
n = 3
}
p.colorPrint(quoted[pos:pos+n+1], currentScheme.EscapedChar)
quoted = quoted[pos+n+1:]
}
p.colorPrint(`"`, currentScheme.StringQuotation)
}
func (p *printer) printMap() {
if p.value.Len() == 0 {
p.printf("%s{}", p.typeString())
return
}
if p.visited[p.value.Pointer()] {
p.printf("%s{...}", p.typeString())
return
}
p.visited[p.value.Pointer()] = true
if PrintMapTypes {
p.printf("%s{\n", p.typeString())
} else {
p.println("{")
}
p.indented(func() {
keys := p.value.MapKeys()
for i := 0; i < p.value.Len(); i++ {
value := p.value.MapIndex(keys[i])
p.indentPrintf("%s:\t%s,\n", p.format(keys[i]), p.format(value))
}
})
p.indentPrint("}")
}
func (p *printer) printStruct() {
if p.value.Type().String() == "time.Time" {
p.printTime()
return
}
if p.value.NumField() == 0 {
p.print(p.typeString() + "{}")
return
}
p.println(p.typeString() + "{")
p.indented(func() {
for i := 0; i < p.value.NumField(); i++ {
field := colorize(p.value.Type().Field(i).Name, currentScheme.FieldName)
value := p.value.Field(i)
p.indentPrintf("%s:\t%s,\n", field, p.format(value))
}
})
p.indentPrint("}")
}
func (p *printer) printTime() {
if !p.value.CanInterface() {
p.printf("(unexported time.Time)")
return
}
tm := p.value.Interface().(time.Time)
p.printf(
"%s-%s-%s %s:%s:%s %s",
colorize(strconv.Itoa(tm.Year()), currentScheme.Time),
colorize(fmt.Sprintf("%02d", tm.Month()), currentScheme.Time),
colorize(fmt.Sprintf("%02d", tm.Day()), currentScheme.Time),
colorize(fmt.Sprintf("%02d", tm.Hour()), currentScheme.Time),
colorize(fmt.Sprintf("%02d", tm.Minute()), currentScheme.Time),
colorize(fmt.Sprintf("%02d", tm.Second()), currentScheme.Time),
colorize(tm.Location().String(), currentScheme.Time),
)
}
func (p *printer) printSlice() {
if p.value.Len() == 0 {
p.printf("%s{}", p.typeString())
return
}
if p.value.Kind() == reflect.Slice {
if p.visited[p.value.Pointer()] {
// Stop travarsing cyclic reference
p.printf("%s{...}", p.typeString())
return
}
p.visited[p.value.Pointer()] = true
}
// Fold a large buffer
if p.value.Len() > BufferFoldThreshold {
p.printf("%s{...}", p.typeString())
return
}
p.println(p.typeString() + "{")
p.indented(func() {
groupsize := 0
switch p.value.Type().Elem().Kind() {
case reflect.Uint8:
groupsize = 16
case reflect.Uint16:
groupsize = 8
case reflect.Uint32:
groupsize = 8
case reflect.Uint64:
groupsize = 4
}
if groupsize > 0 {
for i := 0; i < p.value.Len(); i++ {
// indent for new group
if i%groupsize == 0 {
p.print(p.indent())
}
// slice element
p.printf("%s,", p.format(p.value.Index(i)))
// space or newline
if (i+1)%groupsize == 0 || i+1 == p.value.Len() {
p.print("\n")
} else {
p.print(" ")
}
}
} else {
for i := 0; i < p.value.Len(); i++ {
p.indentPrintf("%s,\n", p.format(p.value.Index(i)))
}
}
})
p.indentPrint("}")
}
func (p *printer) printInterface() {
e := p.value.Elem()
if e.Kind() == reflect.Invalid {
p.print(p.nil())
} else if e.IsValid() {
p.print(p.format(e))
} else {
p.printf("%s(%s)", p.typeString(), p.nil())
}
}
func (p *printer) printPtr() {
if p.visited[p.value.Pointer()] {
p.printf("&%s{...}", p.elemTypeString())
return
}
if p.value.Pointer() != 0 {
p.visited[p.value.Pointer()] = true
}
if p.value.Elem().IsValid() {
p.printf("&%s", p.format(p.value.Elem()))
} else {
p.printf("(%s)(%s)", p.typeString(), p.nil())
}
}
func (p *printer) pointerAddr() string {
return colorize(fmt.Sprintf("%#v", p.value.Pointer()), currentScheme.PointerAdress)
}
func (p *printer) typeString() string {
return p.colorizeType(p.value.Type().String())
}
func (p *printer) elemTypeString() string {
return p.colorizeType(p.value.Elem().Type().String())
}
func (p *printer) colorizeType(t string) string {
prefix := ""
if p.matchRegexp(t, `^\[\].+$`) {
prefix = "[]"
t = t[2:]
}
if p.matchRegexp(t, `^\[\d+\].+$`) {
num := regexp.MustCompile(`\d+`).FindString(t)
prefix = fmt.Sprintf("[%s]", colorize(num, currentScheme.ObjectLength))
t = t[2+len(num):]
}
if p.matchRegexp(t, `^[^\.]+\.[^\.]+$`) {
ts := strings.Split(t, ".")
t = fmt.Sprintf("%s.%s", ts[0], colorize(ts[1], currentScheme.StructName))
} else {
t = colorize(t, currentScheme.StructName)
}
return prefix + t
}
func (p *printer) matchRegexp(text, exp string) bool {
return regexp.MustCompile(exp).MatchString(text)
}
func (p *printer) indented(proc func()) {
p.depth++
proc()
p.depth--
}
func (p *printer) raw() string {
// Some value causes panic when Interface() is called.
switch p.value.Kind() {
case reflect.Bool:
return fmt.Sprintf("%#v", p.value.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return fmt.Sprintf("%#v", p.value.Int())
case reflect.Uint, reflect.Uintptr:
return fmt.Sprintf("%#v", p.value.Uint())
case reflect.Uint8:
return fmt.Sprintf("0x%02x", p.value.Uint())
case reflect.Uint16:
return fmt.Sprintf("0x%04x", p.value.Uint())
case reflect.Uint32:
return fmt.Sprintf("0x%08x", p.value.Uint())
case reflect.Uint64:
return fmt.Sprintf("0x%016x", p.value.Uint())
case reflect.Float32, reflect.Float64:
return fmt.Sprintf("%f", p.value.Float())
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%#v", p.value.Complex())
default:
return fmt.Sprintf("%#v", p.value.Interface())
}
}
func (p *printer) nil() string {
return colorize("nil", currentScheme.Nil)
}
func (p *printer) format(object interface{}) string {
pp := newPrinter(object)
pp.depth = p.depth
pp.visited = p.visited
if value, ok := object.(reflect.Value); ok {
pp.value = value
}
return pp.String()
}
func (p *printer) indent() string {
return strings.Repeat("\t", p.depth)
}

View File

@ -1,16 +0,0 @@
box: golang
build:
steps:
- setup-go-workspace
- script:
name: go get
code: |
cd $WERCKER_SOURCE_DIR
go version
go get -t ./...
- script:
name: go test
code: |
go test -v

View File

@ -1,15 +0,0 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
.idea
# Dependency directories (remove the comment below to include it)
# vendor/

View File

@ -1 +0,0 @@

View File

@ -1,2 +0,0 @@
# rac-c
Исполнитель `rac` от 1С Предприятие

View File

@ -1,80 +0,0 @@
package rclient
import (
"context"
"github.com/khorevaa/ras-client/messages"
"github.com/khorevaa/ras-client/serialize"
)
var _ agentApi = (*Client)(nil)
func (c *Client) GetAgentVersion(ctx context.Context) (string, error) {
switch c.serviceVersion {
case "4.0":
return "8.3.11", nil
case "5.0":
return "8.3.12", nil
case "6.0":
return "8.3.13", nil
case "7.0":
return "8.3.14", nil
case "8.0":
return "8.3.15", nil
}
req := &messages.GetAgentVersionRequest{}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return "", err
}
response := resp.(*messages.GetAgentVersionResponse)
return response.Version, err
}
func (c *Client) GetAgentAdmins(ctx context.Context) (serialize.UsersList, error) {
req := &messages.GetAgentAdminsRequest{}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetAgentAdminsResponse)
return response.Users, err
}
func (c *Client) RegAgentAdmin(ctx context.Context, user serialize.UserInfo) error {
req := &messages.RegAgentAdminRequest{
User: user,
}
_, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return err
}
return nil
}
func (c *Client) UnregAgentAdmin(ctx context.Context, user string) error {
req := &messages.UnregAgentAdminRequest{
User: user,
}
_, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return err
}
return nil
}

View File

@ -1,14 +0,0 @@
package rclient
/*
locks api
file: locks.go
GetClusterLocks(ctx context.Context, cluster uuid.UUID) (serialize.LocksList, error)
GetInfobaseLocks(ctx context.Context, cluster uuid.UUID, infobase uuid.UUID) (serialize.LocksList, error)
GetSessionLocks(ctx context.Context, cluster uuid.UUID, infobase uuid.UUID, session uuid.UUID) (serialize.LocksList, error)
GetConnectionLocks(ctx context.Context, cluster uuid.UUID, connection uuid.UUID) (serialize.LocksList, error)
*/

View File

@ -1,23 +0,0 @@
package rclient
import uuid "github.com/satori/go.uuid"
var _ authApi = (*Client)(nil)
func (c *Client) AuthenticateAgent(user, password string) {
c.base.SetAgentAuth(user, password)
}
func (c *Client) AuthenticateCluster(cluster uuid.UUID, user, password string) {
c.base.SetClusterAuth(cluster, user, password)
}
func (c *Client) AuthenticateInfobase(cluster uuid.UUID, user, password string) {
c.base.SetInfobaseAuth(cluster, user, password)
}

View File

@ -1,381 +0,0 @@
package rclient
import (
"bytes"
"context"
"github.com/k0kubun/pp"
"github.com/khorevaa/ras-client/internal/pool"
"github.com/khorevaa/ras-client/messages"
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize/esig"
"net"
"strconv"
"time"
"github.com/pkg/errors"
)
const protocolVersion = 256
var serviceVersions = []string{"3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0", "10.0"}
var _ Api = (*Client)(nil)
type Client struct {
addr string
laddr net.Addr
ctx context.Context
stopRoutines context.CancelFunc // остановить ping, read, и подобные горутины
agentUser string
agentPassword string
base pool.EndpointPool
codec codec.Codec
serviceVersion string
}
func (c *Client) Version() string {
return c.serviceVersion
}
func (c *Client) Close() error {
return c.base.Close()
}
func NewClient(addr string, opts ...Option) *Client {
opt := &Options{
serviceVersion: "9.0",
codec: codec.NewCodec1_0(),
}
for _, fn := range opts {
fn(opt)
}
m := new(Client)
m.addr = addr
m.codec = opt.codec
if opt.poolOptions != nil {
m.base = pool.NewEndpointPool(opt.poolOptions)
} else {
m.base = pool.NewEndpointPool(m.poolOptions())
}
m.serviceVersion = opt.serviceVersion
return m
}
func (c *Client) poolOptions() *pool.Options {
return &pool.Options{
Dialer: c.dialFn,
OpenEndpoint: c.openEndpoint,
CloseEndpoint: c.closeEndpoint,
InitConnection: c.initConnection,
PoolSize: 5,
MinIdleConns: 1,
MaxConnAge: 30 * time.Minute,
IdleTimeout: 10 * time.Minute,
IdleCheckFrequency: 1 * time.Minute,
PoolTimeout: 10 * time.Minute,
}
}
func (c *Client) initConnection(ctx context.Context, conn *pool.Conn) error {
negotiateMessage := messages.NewNegotiateMessage(protocolVersion, c.codec.Version())
err := c.sendRequestMessage(conn, negotiateMessage)
if err != nil {
return err
}
err = c.sendRequestMessage(conn, &messages.ConnectMessage{Params: map[string]interface{}{
"connect.timeout": int64(2000),
}})
packet, err := conn.GetPacket(ctx)
if err != nil {
return err
}
answer, err := c.tryParseMessage(packet)
if err != nil {
return err
}
if _, ok := answer.(*messages.ConnectMessageAck); !ok {
return errors.New("unknown ack")
}
return nil
}
func (c *Client) openEndpoint(ctx context.Context, conn *pool.Conn) (info pool.EndpointInfo, err error) {
var ack *messages.OpenEndpointMessageAck
ack, err = c.tryOpenEndpoint(ctx, conn)
if err != nil {
message, ok := err.(*messages.EndpointFailure)
if !ok {
return nil, err
}
supportedVersion := detectSupportedVersion(message)
if len(supportedVersion) == 0 {
return nil, err
}
c.serviceVersion = supportedVersion
ack, err = c.tryOpenEndpoint(ctx, conn)
}
if err != nil {
return nil, err
}
endpointVersion, err := strconv.ParseFloat(ack.Version, 10)
if err != nil {
return nil, err
}
return endpointInfo{
id: ack.EndpointID,
version: int(endpointVersion),
format: 0, // defaultFormat,
serviceID: ack.ServiceID,
codec: c.codec,
}, nil
}
type endpointInfo struct {
id int
version int
format int16
serviceID string
codec codec.Codec
}
func (e endpointInfo) ID() int {
return e.id
}
func (e endpointInfo) Version() int {
return e.version
}
func (e endpointInfo) Format() int16 {
return e.format
}
func (e endpointInfo) ServiceID() string {
return e.serviceID
}
func (e endpointInfo) Codec() codec.Codec {
return e.codec
}
func (c *Client) tryOpenEndpoint(ctx context.Context, conn *pool.Conn) (*messages.OpenEndpointMessageAck, error) {
err := c.sendRequestMessage(conn, &messages.OpenEndpointMessage{Version: c.serviceVersion})
packet, err := conn.GetPacket(ctx)
if err != nil {
return nil, err
}
answer, err := c.tryParseMessage(packet)
if err != nil {
return nil, err
}
switch t := answer.(type) {
case *messages.EndpointFailure:
return nil, t
case *messages.OpenEndpointMessageAck:
return t, nil
default:
pp.Println(answer)
panic("unknown answer type")
}
}
func (c *Client) closeEndpoint(_ context.Context, conn *pool.Conn, endpoint *pool.Endpoint) error {
err := c.sendRequestMessage(conn, &messages.CloseEndpointMessage{EndpointID: endpoint.ID()})
if err != nil {
return err
}
return nil
}
func (c *Client) sendRequestMessage(conn *pool.Conn, message messages.RequestMessage) error {
body := bytes.NewBuffer([]byte{})
message.Format(c.codec.Encoder(), body)
packet := pool.NewPacket(message.Type(), body.Bytes())
err := conn.SendPacket(packet)
if err != nil {
return err
}
return nil
}
func (c *Client) tryParseMessage(packet *pool.Packet) (message messages.ResponseMessage, err error) {
defer func() {
if e := recover(); e != nil {
switch val := e.(type) {
case string:
err = errors.New(val)
case error:
err = val
default:
panic(e)
}
}
}()
switch packet.Type {
case messages.CONNECT_ACK:
decoder := c.codec.Decoder()
message = &messages.ConnectMessageAck{}
message.Parse(decoder, packet)
case messages.KEEP_ALIVE:
// nothing
case messages.ENDPOINT_OPEN_ACK:
decoder := c.codec.Decoder()
message = &messages.OpenEndpointMessageAck{}
message.Parse(decoder, packet)
case messages.ENDPOINT_FAILURE:
decoder := c.codec.Decoder()
message = &messages.EndpointFailure{}
message.Parse(decoder, packet)
case messages.NULL_TYPE:
panic(pp.Sprintln(int(packet.Type), "packet", packet))
default:
panic(pp.Sprintln(int(packet.Type), "packet", packet))
}
return
}
func (c *Client) dialFn(ctx context.Context) (net.Conn, error) {
_, err := net.ResolveTCPAddr("tcp", c.addr)
if err != nil {
return nil, errors.Wrap(err, "resolving tcp")
}
var dialer net.Dialer
conn, err := dialer.DialContext(ctx, "tcp", c.addr)
if err != nil {
return nil, errors.Wrap(err, "dialing tcp")
}
return conn, nil
}
func (c *Client) getEndpoint(ctx context.Context, sig esig.ESIG) (*pool.Endpoint, error) {
return c.base.Get(ctx, sig)
}
func (c *Client) putEndpoint(ctx context.Context, endpoint *pool.Endpoint) {
c.base.Put(ctx, endpoint)
}
func (c *Client) withEndpoint(ctx context.Context, sig esig.ESIG, fn func(context.Context, *pool.Endpoint) error) error {
cn, err := c.getEndpoint(ctx, sig)
if err != nil {
return err
}
defer c.putEndpoint(ctx, cn)
err = fn(ctx, cn)
return err
}
func (c *Client) sendEndpointRequest(ctx context.Context, req messages.EndpointRequestMessage) (interface{}, error) {
var value interface{}
err := c.withEndpoint(ctx, req.Sig(), func(ctx context.Context, p *pool.Endpoint) error {
message, err := p.SendRequest(ctx, req)
if err != nil {
return err
}
value = message.Message
return err
})
return value, err
}
func (c *Client) Disconnect() error {
// stop all routines
c.stopRoutines()
//err := c.conn.Close()
//if err != nil {
// return errors.Wrap(err, "closing TCP connection")
//}
// TODO: закрыть каналы
// возвращаем в false, потому что мы теряем конфигурацию
// сессии, и можем ее потерять во время отключения.
return nil
}

View File

@ -1,170 +0,0 @@
package rclient
import (
"context"
"github.com/khorevaa/ras-client/messages"
"github.com/khorevaa/ras-client/serialize"
uuid "github.com/satori/go.uuid"
)
var _ clusterApi = (*Client)(nil)
func (c *Client) GetClusters(ctx context.Context) ([]*serialize.ClusterInfo, error) {
req := &messages.GetClustersRequest{}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetClustersResponse)
return response.Clusters, err
}
func (c *Client) RegCluster(ctx context.Context, info serialize.ClusterInfo) (uuid.UUID, error) {
req := &messages.RegClusterRequest{
Info: info,
}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return uuid.Nil, err
}
response := resp.(*messages.RegClusterResponse)
return response.ClusterID, err
}
func (c *Client) UnregCluster(ctx context.Context, clusterId uuid.UUID) error {
req := &messages.UnregClusterRequest{
ClusterID: clusterId,
}
_, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return err
}
return nil
}
func (c *Client) GetClusterInfo(ctx context.Context, cluster uuid.UUID) (serialize.ClusterInfo, error) {
req := &messages.GetClusterInfoRequest{ClusterID: cluster}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return serialize.ClusterInfo{}, err
}
response := resp.(*messages.GetClusterInfoResponse)
return response.Info, nil
}
func (c *Client) GetClusterManagers(ctx context.Context, id uuid.UUID) ([]*serialize.ManagerInfo, error) {
req := &messages.GetClusterManagersRequest{ClusterID: id}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetClusterManagersResponse)
for _, manager := range response.Managers {
manager.ClusterID = id
}
return response.Managers, err
}
func (c *Client) GetClusterServices(ctx context.Context, id uuid.UUID) ([]*serialize.ServiceInfo, error) {
req := &messages.GetClusterServicesRequest{ClusterID: id}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetClusterServicesResponse)
for _, service := range response.Services {
service.ClusterID = id
}
return response.Services, err
}
func (c *Client) GetClusterInfobases(ctx context.Context, id uuid.UUID) (serialize.InfobaseSummaryList, error) {
req := &messages.GetInfobasesShortRequest{ClusterID: id}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetInfobasesShortResponse)
response.Infobases.Each(func(info *serialize.InfobaseSummaryInfo) {
info.ClusterID = id
})
return response.Infobases, err
}
func (c *Client) GetClusterAdmins(ctx context.Context, clusterID uuid.UUID) (serialize.UsersList, error) {
req := &messages.GetClusterAdminsRequest{
ClusterID: clusterID,
}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetClusterAdminsResponse)
return response.Users, err
}
func (c *Client) RegClusterAdmin(ctx context.Context, clusterID uuid.UUID, user serialize.UserInfo) error {
req := &messages.RegClusterAdminRequest{
ClusterID: clusterID,
User: user,
}
_, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return err
}
return nil
}
func (c *Client) UnregClusterAdmin(ctx context.Context, clusterID uuid.UUID, user string) error {
req := &messages.UnregClusterAdminRequest{
ClusterID: clusterID,
User: user,
}
_, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return err
}
return nil
}

View File

@ -1,60 +0,0 @@
package rclient
import (
"context"
"github.com/khorevaa/ras-client/messages"
"github.com/khorevaa/ras-client/serialize"
uuid "github.com/satori/go.uuid"
)
var _ connectionsApi = (*Client)(nil)
func (c *Client) GetClusterConnections(ctx context.Context, id uuid.UUID) (serialize.ConnectionShortInfoList, error) {
req := &messages.GetConnectionsShortRequest{ClusterID: id}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetConnectionsShortResponse)
response.Connections.Each(func(info *serialize.ConnectionShortInfo) {
info.ClusterID = id
})
return response.Connections, err
}
func (c *Client) DisconnectConnection(ctx context.Context, cluster uuid.UUID, process uuid.UUID, connection uuid.UUID, infobase uuid.UUID) error {
req := &messages.DisconnectConnectionRequest{
ClusterID: cluster,
ProcessID: process,
ConnectionID: connection,
InfobaseID: infobase,
}
_, err := c.sendEndpointRequest(ctx, req)
return err
}
func (c *Client) GetInfobaseConnections(ctx context.Context, cluster uuid.UUID, infobase uuid.UUID) (serialize.ConnectionShortInfoList, error) {
req := &messages.GetInfobaseConnectionsShortRequest{ClusterID: cluster, InfobaseID: infobase}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetInfobaseConnectionsShortResponse)
response.Connections.Each(func(info *serialize.ConnectionShortInfo) {
info.ClusterID = cluster
info.InfobaseID = infobase
})
return response.Connections, nil
}

View File

@ -1,37 +0,0 @@
package rclient
import (
"github.com/khorevaa/ras-client/messages"
"regexp"
"strings"
)
var re = regexp.MustCompile(`(?m)supported=(.*?)]`)
//goland:noinspection GoUnusedParameter
func detectSupportedVersion(fail *messages.EndpointFailure) string {
if fail.Cause == nil {
return ""
}
msg := fail.Cause.Message
matchs := re.FindAllString(msg, -1)
if len(matchs) == 0 {
return ""
}
supported := matchs[0]
for i := len(serviceVersions) - 1; i >= 0; i-- {
version := serviceVersions[i]
if strings.Contains(supported, version) {
return version
}
}
return ""
}

View File

@ -1,72 +0,0 @@
package rclient
import (
"context"
"github.com/khorevaa/ras-client/messages"
"github.com/khorevaa/ras-client/serialize"
uuid "github.com/satori/go.uuid"
)
var _ infobaseApi = (*Client)(nil)
func (c *Client) CreateInfobase(ctx context.Context, cluster uuid.UUID, infobase serialize.InfobaseInfo, mode int) (serialize.InfobaseInfo, error) {
req := &messages.CreateInfobaseRequest{
ClusterID: cluster,
Infobase: &infobase,
Mode: mode,
}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return serialize.InfobaseInfo{}, err
}
response := resp.(*messages.CreateInfobaseResponse)
return c.GetInfobaseInfo(ctx, cluster, response.InfobaseID)
}
func (c *Client) DropInfobase(ctx context.Context, cluster uuid.UUID, infobase uuid.UUID, mode int) error {
req := &messages.DropInfobaseRequest{
ClusterID: cluster,
InfobaseID: infobase,
Mode: mode,
}
_, err := c.sendEndpointRequest(ctx, req)
return err
}
func (c *Client) UpdateSummaryInfobase(ctx context.Context, cluster uuid.UUID, infobase serialize.InfobaseSummaryInfo) error {
req := &messages.UpdateInfobaseShortRequest{ClusterID: cluster, Infobase: infobase}
_, err := c.sendEndpointRequest(ctx, req)
return err
}
func (c *Client) UpdateInfobase(ctx context.Context, cluster uuid.UUID, infobase serialize.InfobaseInfo) error {
req := &messages.UpdateInfobaseRequest{ClusterID: cluster, Infobase: infobase}
_, err := c.sendEndpointRequest(ctx, req)
return err
}
func (c *Client) GetInfobaseInfo(ctx context.Context, cluster uuid.UUID, infobase uuid.UUID) (serialize.InfobaseInfo, error) {
req := &messages.GetInfobaseInfoRequest{ClusterID: cluster, InfobaseID: infobase}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return serialize.InfobaseInfo{}, err
}
response := resp.(*messages.GetInfobaseInfoResponse)
response.Infobase.ClusterID = cluster
return response.Infobase, err
}

View File

@ -1,245 +0,0 @@
package pool
import (
"context"
"io"
"net"
"sync"
"sync/atomic"
"time"
)
// IOError is the data type for errors occurring in case of failure.
type IOError struct {
Connection net.Conn
Error error
}
type Conn struct {
connMU *sync.Mutex
_locked uint32
_closed uint32
netConn net.Conn
onError func(err IOError)
endpoints []*Endpoint
closer func(ctx context.Context, conn *Conn, endpoint *Endpoint) error
createdAt time.Time
usedAt uint32 // atomic
pooled bool
Inited bool
}
func NewConn(netConn net.Conn) *Conn {
cn := &Conn{
createdAt: time.Now(),
connMU: &sync.Mutex{},
}
cn.SetNetConn(netConn)
cn.SetUsedAt(time.Now())
return cn
}
func (c *Conn) SendPacket(packet *Packet) error {
c.SetUsedAt(time.Now())
err := packet.Write(c.netConn)
return err
}
func (c *Conn) GetPacket(ctx context.Context) (packet *Packet, err error) {
c.SetUsedAt(time.Now())
return c.readContext(ctx)
}
func (c *Conn) UsedAt() time.Time {
unix := atomic.LoadUint32(&c.usedAt)
return time.Unix(int64(unix), 0)
}
func (c *Conn) SetUsedAt(tm time.Time) {
atomic.StoreUint32(&c.usedAt, uint32(tm.Unix()))
}
func (c *Conn) RemoteAddr() net.Addr {
return c.netConn.RemoteAddr()
}
func (c *Conn) SetNetConn(netConn net.Conn) {
c.netConn = netConn
}
func (c *Conn) closed() bool {
if atomic.LoadUint32(&c._closed) == 1 {
return true
}
_ = c.netConn.SetReadDeadline(time.Now())
_, err := c.netConn.Read(make([]byte, 0))
var zero time.Time
_ = c.netConn.SetReadDeadline(zero)
if err == nil {
return false
}
netErr, _ := err.(net.Error)
if err != io.EOF && !netErr.Timeout() {
atomic.StoreUint32(&c._closed, 1)
return true
}
return false
}
func (c *Conn) Close() error {
if !atomic.CompareAndSwapUint32(&c._closed, 0, 1) {
return nil
}
if c.closer != nil {
for _, endpoint := range c.endpoints {
_ = c.closer(context.Background(), c, endpoint)
}
}
return c.netConn.Close()
}
//func (conn *Conn) lock() {
//
// conn.connMU.Lock()
// atomic.StoreUint32(&conn._locked, 1)
//}
//
//func (conn *Conn) unlock() {
//
// atomic.StoreUint32(&conn._locked, 0)
// conn.connMU.Unlock()
//
//}
func (c *Conn) Locked() bool {
return atomic.LoadUint32(&c._locked) == 1
}
func (c *Conn) readContext(ctx context.Context) (*Packet, error) {
recvDone := make(chan *Packet)
errChan := make(chan error)
go c.readPacket(recvDone, errChan)
// setup the cancellation to abort reads in process
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
// Close() can be used if this isn't necessarily a TCP connection
case err := <-errChan:
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
go c.readPacket(recvDone, errChan)
continue
}
return nil, err
case packet := <-recvDone:
return packet, nil
}
}
}
func (c *Conn) readPacket(recvDone chan *Packet, errChan chan error) {
//c.lock()
//defer c.unlock()
err := c.netConn.SetReadDeadline(time.Now().Add(5 * time.Second))
if err != nil {
errChan <- err
return
}
typeBuffer := make([]byte, 1)
_, err = c.netConn.Read(typeBuffer)
if err != nil {
if c.onError != nil {
c.onError(IOError{c.netConn, err})
}
errChan <- err
return
}
size, err := decodeSize(c.netConn)
if err != nil {
if c.onError != nil {
c.onError(IOError{c.netConn, err})
}
errChan <- err
return
}
data := make([]byte, size)
readLength := 0
n := 0
for readLength < len(data) {
n, err = c.netConn.Read(data[readLength:])
readLength += n
if err != nil {
if c.onError != nil {
c.onError(IOError{c.netConn, err})
}
errChan <- err
return
}
}
recvDone <- NewPacket(typeBuffer[0], data)
}
func decodeSize(r io.Reader) (int, error) {
ff := 0xFFFFFF80
b1, err := readByte(r)
if err != nil {
return 0, err
}
cur := int(b1 & 0xFF)
size := cur & 0x7F
for shift := 7; (cur & ff) != 0x0; {
b1, err = readByte(r)
if err != nil {
return 0, err
}
cur = int(b1 & 0xFF)
size += (cur & 0x7F) << shift
shift += 7
}
return size, nil
}
func readByte(r io.Reader) (byte, error) {
byteBuffer := make([]byte, 1)
_, err := r.Read(byteBuffer)
return byteBuffer[0], err
}

View File

@ -1,287 +0,0 @@
package pool
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"github.com/khorevaa/ras-client/messages"
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize/esig"
"io"
"strings"
"sync/atomic"
"time"
)
func NewEndpoint(endpoint EndpointInfo) *Endpoint {
return &Endpoint{
id: endpoint.ID(),
version: endpoint.Version(),
format: endpoint.Format(),
serviceID: endpoint.ServiceID(),
codec: endpoint.Codec(),
}
}
type EndpointInfo interface {
ID() int
Version() int
Format() int16
ServiceID() string
Codec() codec.Codec
}
type Endpoint struct {
id int
version int
format int16
serviceID string
codec codec.Codec
conn *Conn
createdAt time.Time
usedAt uint32 // atomic
pooled bool
Inited bool
sig esig.ESIG
clusterHash string
infobaseHash string
onRequest func(ctx context.Context, endpoint *Endpoint, req messages.EndpointRequestMessage) error
}
func calcHash(in string) string {
str := base64.StdEncoding.EncodeToString([]byte(in))
return str
}
func checkHash(val1, val2 string) bool {
return strings.EqualFold(val1, val2)
}
func (e *Endpoint) Sig() esig.ESIG {
return e.sig
}
func (e *Endpoint) SetSig(sig esig.ESIG) {
e.sig = sig
}
func (e *Endpoint) UsedAt() time.Time {
unix := atomic.LoadUint32(&e.usedAt)
return time.Unix(int64(unix), 0)
}
func (e *Endpoint) SetUsedAt(tm time.Time) {
atomic.StoreUint32(&e.usedAt, uint32(tm.Unix()))
}
func (e *Endpoint) ID() int {
return e.id
}
func (e *Endpoint) Version() int {
return e.version
}
func (e *Endpoint) Format() int16 {
return e.format
}
func (e *Endpoint) ServiceID() string {
return e.serviceID
}
func (e *Endpoint) Codec() codec.Codec {
return e.codec
}
func (e *Endpoint) CheckClusterAuth(user, pwd string) bool {
return checkHash(e.clusterHash, calcHash(fmt.Sprintf("%s:%s", user, pwd)))
}
func (e *Endpoint) SetClusterAuth(user, pwd string) {
e.clusterHash = calcHash(fmt.Sprintf("%s:%s", user, pwd))
}
func (e *Endpoint) CheckInfobaseAuth(user, pwd string) bool {
return checkHash(e.infobaseHash, calcHash(fmt.Sprintf("%s:%s", user, pwd)))
}
func (e *Endpoint) SetInfobaseAuth(user, pwd string) {
e.infobaseHash = calcHash(fmt.Sprintf("%s:%s", user, pwd))
}
func (e *Endpoint) sendRequest(ctx context.Context, message *messages.EndpointMessage) (*messages.EndpointMessage, error) {
e.SetUsedAt(time.Now())
body := bytes.NewBuffer([]byte{})
message.Format(e.codec.Encoder(), e.version, body)
packet := NewPacket(messages.ENDPOINT_MESSAGE, body.Bytes())
err := e.conn.SendPacket(packet)
if err != nil {
return nil, err
}
answer, err := e.conn.GetPacket(ctx)
if err != nil {
return nil, err
}
return e.tryParseMessage(answer)
}
func (e *Endpoint) sendVoidRequest(_ context.Context, conn *Conn, m messages.EndpointMessage) error {
body := bytes.NewBuffer([]byte{})
m.Format(e.codec.Encoder(), e.version, body)
packet := NewPacket(byte(m.Type), body.Bytes())
err := conn.SendPacket(packet)
if err != nil {
return err
}
return nil
}
func (e *Endpoint) tryParseMessage(packet *Packet) (message *messages.EndpointMessage, err error) {
defer func() {
if e := recover(); e != nil {
switch val := e.(type) {
case string:
err = errors.New(val)
case error:
err = val
default:
panic(e)
}
}
}()
switch packet.Type {
case messages.ENDPOINT_MESSAGE:
decoder := e.codec.Decoder()
endpointID := decoder.EndpointId(packet)
format := decoder.Short(packet)
message = &messages.EndpointMessage{
EndpointID: endpointID,
EndpointFormat: format,
}
message.Parse(decoder, e.version, packet)
case messages.ENDPOINT_FAILURE:
decoder := e.codec.Decoder()
err := &messages.EndpointFailure{}
err.Parse(decoder, packet)
return nil, err
default:
return nil, &messages.UnknownMessageError{
Type: packet.Type,
Data: packet.Data,
EndpointID: e.id,
ServiceID: e.serviceID,
Err: ErrUnknownMessage}
}
return
}
func (e *Endpoint) tryFormatMessage(message *messages.EndpointMessage, writer io.Writer) (err error) {
defer func() {
if e := recover(); e != nil {
switch val := e.(type) {
case string:
err = errors.New(val)
case error:
err = val
default:
panic(e)
}
}
}()
encoder := e.codec.Encoder()
message.Format(encoder, e.version, writer)
return
}
func (e *Endpoint) SendRequest(ctx context.Context, req messages.EndpointRequestMessage) (*messages.EndpointMessage, error) {
if e.onRequest != nil {
err := e.onRequest(ctx, e, req)
if err != nil {
return nil, err
}
}
message := e.newEndpointMessage(req)
answer, err := e.sendRequest(ctx, message)
if err != nil {
return nil, err
}
switch err := answer.Message.(type) {
case *messages.EndpointMessageFailure:
return nil, err
case *messages.EndpointFailure:
return nil, err
}
return answer, err
}
func (e *Endpoint) newEndpointMessage(req messages.EndpointRequestMessage) *messages.EndpointMessage {
message := &messages.EndpointMessage{
EndpointID: e.id,
EndpointFormat: e.format,
Message: req,
Type: req.Type(),
Kind: messages.MESSAGE_KIND,
}
return message
}

View File

@ -1,122 +0,0 @@
package pool
import (
"github.com/khorevaa/ras-client/serialize/esig"
uuid "github.com/satori/go.uuid"
"sort"
)
type IdleConns []*Conn
func (c *IdleConns) Pop(sig esig.ESIG, maxOpenEndpoints int) *Endpoint {
type finder struct {
connIdx int
endpointIdx int
order int
cap int
usedAt int64
}
var finders []finder
var findConnIdx int
var findEndpoint *Endpoint
conns := *c
for idx, conn := range conns {
if len(conn.endpoints) == 0 {
finders = append(finders, finder{idx, -1, 0, 0, conn.UsedAt().Unix()})
continue
}
capEnd := len(conn.endpoints)
for i, endpoint := range conn.endpoints {
if esig.Equal(endpoint.sig, sig) {
findEndpoint = endpoint
findConnIdx = idx
break
}
orderByte := 2
if esig.HighBoundEqual(endpoint.sig, sig.High()) && uuid.Equal(endpoint.sig.Low(), uuid.Nil) {
orderByte = 1
}
finders = append(finders, finder{idx, i, orderByte, capEnd, endpoint.UsedAt().Unix()})
}
if findEndpoint != nil {
break
}
}
if findEndpoint != nil {
c.remove(findConnIdx)
return findEndpoint
}
if len(finders) == 0 {
return nil
}
sort.Slice(finders, func(i, j int) bool {
if finders[i].order < finders[j].order {
return true
}
if finders[i].order > finders[j].order {
return false
}
if finders[i].cap < finders[j].cap {
return true
}
if finders[i].cap > finders[j].cap {
return false
}
return finders[i].usedAt < finders[j].usedAt
})
f := finders[0]
conn := conns[f.connIdx]
switch f.order {
case 0:
findEndpoint = &Endpoint{
conn: conn,
}
case 1:
findEndpoint = conn.endpoints[f.endpointIdx]
case 2:
if len(conn.endpoints) < maxOpenEndpoints {
findEndpoint = &Endpoint{
conn: conn,
}
} else {
findEndpoint = conn.endpoints[f.endpointIdx]
}
}
c.remove(f.connIdx)
return findEndpoint
}
func (c *IdleConns) remove(i int) {
conns := *c
conns[i] = conns[len(conns)-1]
// We do not need to put s[i] at the end, as it will be discarded anyway
*c = conns[:len(conns)-1]
}

View File

@ -1,54 +0,0 @@
package pool
import "context"
type SingleConnPool struct {
pool Pooler
cn *Conn
stickyErr error
}
var _ Pooler = (*SingleConnPool)(nil)
func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
return &SingleConnPool{
pool: pool,
cn: cn,
}
}
func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
return p.pool.NewConn(ctx)
}
func (p *SingleConnPool) CloseConn(cn *Conn) error {
return p.pool.CloseConn(cn)
}
func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
if p.stickyErr != nil {
return nil, p.stickyErr
}
return p.cn, nil
}
func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
p.cn = nil
p.stickyErr = reason
}
func (p *SingleConnPool) Close() error {
p.cn = nil
p.stickyErr = ErrClosed
return nil
}
func (p *SingleConnPool) Len() int {
return 0
}
func (p *SingleConnPool) IdleLen() int {
return 0
}

View File

@ -1,23 +0,0 @@
package pool
import (
"context"
"net"
"time"
)
type Options struct {
Dialer func(ctx context.Context) (net.Conn, error)
OnClose func(conn *Conn) error
OpenEndpoint func(ctx context.Context, conn *Conn) (EndpointInfo, error)
CloseEndpoint func(ctx context.Context, conn *Conn, endpoint *Endpoint) error
InitConnection func(ctx context.Context, conn *Conn) error
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
MaxOpenEndpoints int
}

View File

@ -1,118 +0,0 @@
package pool
import (
"bytes"
"io"
)
const MaxShift = 7
// Send buffer size determines how many bytes we send in a single TCP write call.
// This can be anything from 1 to 65495.
// A good default value for this can be readPacket from: /proc/sys/net/ipv4/tcp_wmem
const sendBufferSize = 16384
// Packet represents a single network message.
// It has a byte code indicating the type of the message
// and a data payload in the form of a byte slice.
type Packet struct {
Type byte
Length int
Data []byte
reader *bytes.Reader
}
// New creates a new packet.
// It expects a byteCode for the type of message and
// a data parameter in the form of a byte slice.
func NewPacket(byteCode byte, data []byte) *Packet {
return &Packet{
Type: byteCode,
Length: len(data),
Data: data,
reader: bytes.NewReader(data),
}
}
// Read read the packet data
func (packet *Packet) Read(p []byte) (n int, err error) {
return packet.reader.Read(p)
}
// Write writes the packet to the IO device.
func (packet *Packet) Write(writer io.Writer) error {
// Для типа 0 NEGOTIATE пишем только тело
if packet.Type != 0 {
buf := bytes.NewBuffer([]byte{packet.Type})
encodeSize(packet.Length, buf)
_, err := buf.WriteTo(writer)
if err != nil {
return err
}
}
bytesWritten := 0
writeUntil := 0
for bytesWritten < len(packet.Data) {
writeUntil = bytesWritten + sendBufferSize
if writeUntil > len(packet.Data) {
writeUntil = len(packet.Data)
}
n, err := writer.Write(packet.Data[bytesWritten:writeUntil])
if err != nil {
return err
}
bytesWritten += n
}
return nil
}
// Bytes returns the raw byte slice serialization of the packet.
func (packet *Packet) Bytes() []byte {
result := []byte{packet.Type}
size := bytes.NewBuffer([]byte{})
encodeSize(packet.Length, size)
result = append(result, size.Bytes()...)
result = append(result, packet.Data...)
return result
}
func encodeSize(val int, buf *bytes.Buffer) {
var b1 int
msb := val >> MaxShift
if msb != 0 {
b1 = -128
} else {
b1 = 0
}
buf.Write([]byte{byte(b1 | (val & 0x7F))})
for val = msb; val > 0; val = msb {
msb >>= MaxShift
if msb != 0 {
b1 = -128
} else {
b1 = 0
}
buf.Write([]byte{byte(b1 | (val & 0x7F))})
}
}

View File

@ -1,817 +0,0 @@
package pool
import (
"context"
"errors"
"github.com/khorevaa/ras-client/messages"
"github.com/khorevaa/ras-client/serialize/esig"
uuid "github.com/satori/go.uuid"
"sync"
"sync/atomic"
"time"
)
// Что есть
// 1. Клиент подключения к сервесу.
// 2. На клиенте есть несколько endpoint
// - требует авторизации на кластере
// - требует авториазции базы ( 1 раз для каждой базы,
// если меняешь базы или используется другиой пароль доступа
// нужна переавторизация
// 3. Деление endpoint по:
// - общая инфорация (сообщения в целом на кластер)
// - информация по базам (сообщения с ключем infobase
//
// Надо организовать пул клиентов
// пул endpoint для переключения авторизации по базам
// Клиент читает сообщения для endpoint и отправляем его в ответ.
/*
Алгоритм работы с подключением
1. Создается клиент подключения (далее клиент)
2. Создается 1 проверочное соединение.
2.1. Выполняются начальные команды:
- NewNegotiateMessage(protocolVersion, c.codec.Version()))
- &ConnectMessage{params: map[string]interface{}{
"connect.timeout": int64(2000) Ъ}
2.2. Соединение переходит в ожидание, при отсутствии ошибок. При ошибке клиент не создается.
3. Основной цикл работы
Запрос на данные - Ответ пользователю
3.1. Открывает точку обмена
3.2. Авторизация на кластере -> Возможна ошибка прав/авторизации -> возврат ошибки
3.3. Если необходимо авторизация на информациооной базе -> Возможна ошибка прав/авторизации -> возврат ошибки
3.4. Выполнение запроса -> Возможна ошибка парсинга -> возврат ошибки
3.5. Ожидание ответа. Для запросов (VIOD_MESSAGE) не ошидания ответа, переход сразу к пункту 3.8
3.6. Разбор ответа. -> возможна ошибка запроса
3.7. Отправка ответа пользователю
3.8. Перевод точки обмена в ожидание. По двум критериям
- запрос был только на данные кластера (переиспользование для аналогичных запросов)
- была авторизация по ИБ. (переиспользование для запросов по данной базе)
по истечении, н-минут переход в исползование по другим базам, с повторной авторизацией
4. Цикла работы точки обмена
4.1. Открытие
4.2. Отправка собщения
Установка блокировки на соединение -> Запись ланных в соединение
4.2. Чтение данных из соединения -> Получение сообщения
Снятие блокировки на соедиенение
4.3. Ожидание для срока жизни / повторение цикла с пункта 4.2.
4.4. Закрытие точки обмена
4.5. Завершение при закрытии соединения
5. Работа с открытым соединением
5.1. Блокировка использования другими точками обмена
5.2. Запись данных
5.3. Ожидание ответа -> чтение данных. При открытой точке обмена всегда приходит ответ на посланный запрос
Даже если он не требует явного ответа, например Авторизация на кластере или в информационной базе
5.4. Разблокировка по таймауту или при получении ответа
*/
var (
ErrClosed = errors.New("protocol: pool is closed")
ErrUnknownMessage = errors.New("protocol: unknown message packet")
ErrPoolTimeout = errors.New("protocol: endpoint pool timeout")
)
var timers = sync.Pool{
New: func() interface{} {
t := time.NewTimer(time.Hour)
t.Stop()
return t
},
}
var _ EndpointPool = (*endpointPool)(nil)
func NewEndpointPool(opt *Options) EndpointPool {
p := &endpointPool{
opt: opt,
queue: make(chan struct{}, opt.PoolSize),
conns: make([]*Conn, 0, opt.PoolSize),
idleConns: make([]*Conn, 0, opt.PoolSize),
authInfobaseIdx: make(map[uuid.UUID]struct{ user, password string }),
authClusterIdx: make(map[uuid.UUID]struct{ user, password string }),
}
p.connsMu.Lock()
p.checkMinIdleConns()
p.connsMu.Unlock()
if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
go p.reaper(opt.IdleCheckFrequency)
}
return p
}
type EndpointPool interface {
NewEndpoint(ctx context.Context) (*Endpoint, error)
CloseEndpoint(endpoint *Endpoint) error
Get(ctx context.Context, sig esig.ESIG) (*Endpoint, error)
Put(ctx context.Context, endpoint *Endpoint)
Remove(ctx context.Context, endpoint *Endpoint, err error)
Len() int
IdleLen() int
Close() error
SetAgentAuth(user, password string)
SetClusterAuth(id uuid.UUID, user, password string)
SetInfobaseAuth(id uuid.UUID, user, password string)
GetClusterAuth(id uuid.UUID) (user, password string)
GetInfobaseAuth(id uuid.UUID) (user, password string)
}
type Pooler interface {
NewConn(context.Context) (*Conn, error)
CloseConn(*Conn) error
Get(context.Context) (*Conn, error)
Put(context.Context, *Conn)
Remove(context.Context, *Conn, error)
Len() int
IdleLen() int
Close() error
}
type endpointPool struct {
opt *Options
dialErrorsNum uint32 // atomic
_closed uint32 // atomic
lastDialErrorMu sync.RWMutex
lastDialError error
queue chan struct{}
poolSize int
idleConnsLen int
connsMu sync.Mutex
conns []*Conn
idleConns IdleConns
authClusterIdx map[uuid.UUID]struct{ user, password string }
authInfobaseIdx map[uuid.UUID]struct{ user, password string }
authAgent struct{ user, password string }
}
func (p *endpointPool) NewEndpoint(ctx context.Context) (*Endpoint, error) {
if p.closed() {
return nil, ErrClosed
}
err := p.waitTurn(ctx)
if err != nil {
return nil, err
}
for {
p.connsMu.Lock()
endpoint := p.popIdle(esig.ESIG{})
p.connsMu.Unlock()
if endpoint == nil {
break
}
if p.isStaleConn(endpoint.conn) {
_ = p.CloseConn(endpoint.conn)
continue
}
if !endpoint.Inited {
endpoint, err = p.openEndpoint(ctx, endpoint.conn)
if err != nil {
return nil, err
}
}
return endpoint, nil
}
newConn, err := p.newConn(ctx, true)
if err != nil {
p.freeTurn()
return nil, err
}
endpoint, err := p.openEndpoint(ctx, newConn)
return endpoint, err
}
func (p *endpointPool) Put(ctx context.Context, cn *Endpoint) {
if !cn.conn.pooled {
p.Remove(ctx, cn, nil)
return
}
p.connsMu.Lock()
p.idleConns = append(p.idleConns, cn.conn)
p.idleConnsLen++
p.connsMu.Unlock()
p.freeTurn()
}
// Get returns existed connection from the pool or creates a new one.
func (p *endpointPool) Get(ctx context.Context, sig esig.ESIG) (*Endpoint, error) {
if p.closed() {
return nil, ErrClosed
}
err := p.waitTurn(ctx)
if err != nil {
return nil, err
}
for {
p.connsMu.Lock()
endpoint := p.popIdle(sig)
p.connsMu.Unlock()
if endpoint == nil {
break
}
if p.isStaleConn(endpoint.conn) {
_ = p.CloseConn(endpoint.conn)
continue
}
if !endpoint.Inited {
endpoint, err = p.openEndpoint(ctx, endpoint.conn)
if err != nil {
return nil, err
}
}
return endpoint, nil
}
newConn, err := p.newConn(ctx, true)
if err != nil {
p.freeTurn()
return nil, err
}
endpoint, err := p.openEndpoint(ctx, newConn)
return endpoint, err
}
func (p *endpointPool) Remove(_ context.Context, cn *Endpoint, _ error) {
p.removeConnWithLock(cn.conn)
p.freeTurn()
_ = p.closeConn(cn.conn)
}
func (p *endpointPool) CloseConn(cn *Conn) error {
p.removeConnWithLock(cn)
return p.closeConn(cn)
}
func (p *endpointPool) SetAgentAuth(user, password string) {
p.authAgent = struct{ user, password string }{user: user, password: password}
}
func (p *endpointPool) SetClusterAuth(id uuid.UUID, user, password string) {
p.authClusterIdx[id] = struct{ user, password string }{user: user, password: password}
}
func (p *endpointPool) SetInfobaseAuth(id uuid.UUID, user, password string) {
p.authInfobaseIdx[id] = struct{ user, password string }{user: user, password: password}
}
func (p *endpointPool) GetClusterAuth(id uuid.UUID) (user, password string) {
return p.getAuth(p.authClusterIdx, id)
}
func (p *endpointPool) GetInfobaseAuth(id uuid.UUID) (user, password string) {
return p.getAuth(p.authInfobaseIdx, id)
}
// Len returns total number of connections.
func (p *endpointPool) Len() int {
p.connsMu.Lock()
n := len(p.conns)
p.connsMu.Unlock()
return n
}
// IdleLen returns number of idle connections.
func (p *endpointPool) IdleLen() int {
p.connsMu.Lock()
n := p.idleConnsLen
p.connsMu.Unlock()
return n
}
func (p *endpointPool) Close() error {
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
return ErrClosed
}
var firstErr error
p.connsMu.Lock()
for _, cn := range p.conns {
if err := p.closeConn(cn); err != nil && firstErr == nil {
firstErr = err
}
}
p.conns = nil
p.poolSize = 0
p.idleConns = nil
p.idleConnsLen = 0
p.connsMu.Unlock()
return firstErr
}
func (p *endpointPool) CloseEndpoint(*Endpoint) error {
panic("implement me")
}
func (p *endpointPool) ReapStaleConns() (int, error) {
var n int
for {
p.getTurn()
p.connsMu.Lock()
cn := p.reapStaleConn()
p.connsMu.Unlock()
p.freeTurn()
if cn != nil {
_ = p.closeConn(cn)
n++
} else {
break
}
}
return n, nil
}
func (p *endpointPool) openEndpoint(ctx context.Context, conn *Conn) (*Endpoint, error) {
if p.closed() {
return nil, ErrClosed
}
if !conn.Inited {
err := p.opt.InitConnection(ctx, conn)
if err != nil {
return nil, err
}
conn.Inited = true
}
openAck, err := p.opt.OpenEndpoint(ctx, conn)
if err != nil {
return nil, err
}
endpoint := NewEndpoint(openAck)
endpoint.Inited = true
endpoint.onRequest = p.onRequest
endpoint.conn = conn
conn.endpoints = append(conn.endpoints, endpoint)
return endpoint, nil
}
func needAgentAuth(req messages.EndpointRequestMessage) bool {
switch req.(type) {
case *messages.GetAgentAdminsRequest, *messages.RegAgentAdminRequest, *messages.UnregAgentAdminRequest,
*messages.RegClusterRequest, *messages.UnregClusterRequest:
return true
}
return false
}
// Get returns existed connection from the pool or creates a new one.
func (p *endpointPool) onRequest(ctx context.Context, endpoint *Endpoint, req messages.EndpointRequestMessage) error {
if needAgentAuth(req) {
err := p.setAgentAuth(ctx, endpoint)
if err != nil {
return err
}
}
sig := req.Sig()
if esig.IsNul(sig) {
return nil
}
if esig.Equal(endpoint.sig, sig) {
return p.updateAuthIfNeed(ctx, endpoint, sig.High(), sig.Low())
}
err := p.updateAuthIfNeed(ctx, endpoint, sig.High(), sig.Low())
if err != nil {
return err
}
endpoint.sig = sig
return nil
}
func (p *endpointPool) updateAuthIfNeed(ctx context.Context, endpoint *Endpoint, clusterID, infobaseID uuid.UUID) error {
if user, password := p.GetClusterAuth(clusterID); !endpoint.CheckClusterAuth(user, password) {
err := p.updateClusterAuth(ctx, endpoint, clusterID, user, password)
if err != nil {
return err
}
}
if user, password := p.GetInfobaseAuth(infobaseID); !endpoint.CheckInfobaseAuth(user, password) {
err := p.updateInfobaseAuth(ctx, endpoint, clusterID, user, password)
if err != nil {
return err
}
}
return nil
}
func (p *endpointPool) updateClusterAuth(ctx context.Context, endpoint *Endpoint, clusterID uuid.UUID, user, password string) error {
authMessage := endpoint.newEndpointMessage(messages.ClusterAuthenticateRequest{
ClusterID: clusterID,
User: user,
Password: password,
})
message, err := endpoint.sendRequest(ctx, authMessage)
if err != nil {
return err
}
switch err := message.Message.(type) {
case *messages.EndpointMessageFailure:
return err
}
endpoint.SetClusterAuth(user, password)
return nil
}
func (p *endpointPool) setAgentAuth(ctx context.Context, endpoint *Endpoint) error {
authMessage := endpoint.newEndpointMessage(messages.AuthenticateAgentRequest{
User: p.authAgent.user,
Password: p.authAgent.password,
})
message, err := endpoint.sendRequest(ctx, authMessage)
if err != nil {
return err
}
switch err := message.Message.(type) {
case *messages.EndpointMessageFailure:
return err
}
return nil
}
func (p *endpointPool) updateInfobaseAuth(ctx context.Context, endpoint *Endpoint, clusterID uuid.UUID, user, password string) error {
authMessage := endpoint.newEndpointMessage(messages.AuthenticateInfobaseRequest{
ClusterID: clusterID,
User: user,
Password: password,
})
message, err := endpoint.sendRequest(ctx, authMessage)
if err != nil {
return err
}
switch err := message.Message.(type) {
case *messages.EndpointMessageFailure:
return err
}
endpoint.SetInfobaseAuth(user, password)
return nil
}
func (p *endpointPool) getAuth(idx map[uuid.UUID]struct{ user, password string }, id uuid.UUID) (user, password string) {
if auth, ok := idx[id]; ok {
user, password = auth.user, auth.password
return
}
if auth, ok := idx[uuid.Nil]; ok {
user, password = auth.user, auth.password
}
return
}
func (p *endpointPool) checkMinIdleConns() {
if p.opt.MinIdleConns == 0 {
return
}
for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
p.poolSize++
p.idleConnsLen++
go func() {
err := p.addIdleConn()
if err != nil {
p.connsMu.Lock()
p.poolSize--
p.idleConnsLen--
p.connsMu.Unlock()
}
}()
}
}
func (p *endpointPool) addIdleConn() error {
cn, err := p.dialConn(context.TODO(), true)
if err != nil {
return err
}
p.connsMu.Lock()
p.conns = append(p.conns, cn)
p.idleConns = append(p.idleConns, cn)
p.connsMu.Unlock()
return nil
}
func (p *endpointPool) newConn(c context.Context, pooled bool) (*Conn, error) {
cn, err := p.dialConn(c, pooled)
if err != nil {
return nil, err
}
cn.closer = p.opt.CloseEndpoint
p.connsMu.Lock()
p.conns = append(p.conns, cn)
if pooled {
// If pool is full remove the cn on next Put.
if p.poolSize >= p.opt.PoolSize {
cn.pooled = false
} else {
p.poolSize++
}
}
p.connsMu.Unlock()
return cn, nil
}
func (p *endpointPool) dialConn(c context.Context, pooled bool) (*Conn, error) {
if p.closed() {
return nil, ErrClosed
}
if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
return nil, p.getLastDialError()
}
netConn, err := p.opt.Dialer(c)
if err != nil {
p.setLastDialError(err)
if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
go p.tryDial()
}
return nil, err
}
cn := NewConn(netConn)
cn.pooled = pooled
return cn, nil
}
func (p *endpointPool) tryDial() {
for {
if p.closed() {
return
}
conn, err := p.opt.Dialer(context.TODO())
if err != nil {
p.setLastDialError(err)
time.Sleep(time.Second)
continue
}
atomic.StoreUint32(&p.dialErrorsNum, 0)
_ = conn.Close()
return
}
}
func (p *endpointPool) setLastDialError(err error) {
p.lastDialErrorMu.Lock()
p.lastDialError = err
p.lastDialErrorMu.Unlock()
}
func (p *endpointPool) getLastDialError() error {
p.lastDialErrorMu.RLock()
err := p.lastDialError
p.lastDialErrorMu.RUnlock()
return err
}
func (p *endpointPool) getTurn() {
p.queue <- struct{}{}
}
func (p *endpointPool) waitTurn(c context.Context) error {
select {
case <-c.Done():
return c.Err()
default:
}
select {
case p.queue <- struct{}{}:
return nil
default:
}
timer := timers.Get().(*time.Timer)
timer.Reset(p.opt.PoolTimeout)
select {
case <-c.Done():
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
return c.Err()
case p.queue <- struct{}{}:
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
return nil
case <-timer.C:
timers.Put(timer)
//atomic.AddUint32(&p.stats.Timeouts, 1)
return ErrPoolTimeout
}
}
func (p *endpointPool) freeTurn() {
<-p.queue
}
func (p *endpointPool) popIdle(sig esig.ESIG) *Endpoint {
if len(p.idleConns) == 0 {
return nil
}
endpoint := p.idleConns.Pop(sig, p.opt.MaxOpenEndpoints)
if endpoint == nil {
return nil
}
p.idleConnsLen--
p.checkMinIdleConns()
return endpoint
}
func (p *endpointPool) removeConnWithLock(cn *Conn) {
p.connsMu.Lock()
p.removeConn(cn)
p.connsMu.Unlock()
}
func (p *endpointPool) removeConn(cn *Conn) {
for i, c := range p.conns {
if c == cn {
p.conns = append(p.conns[:i], p.conns[i+1:]...)
if cn.pooled {
p.poolSize--
p.checkMinIdleConns()
}
return
}
}
}
func (p *endpointPool) closeConn(cn *Conn) error {
if p.opt.OnClose != nil {
_ = p.opt.OnClose(cn)
}
return cn.Close()
}
func (p *endpointPool) closed() bool {
return atomic.LoadUint32(&p._closed) == 1
}
func (p *endpointPool) reaper(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for range ticker.C {
if p.closed() {
break
}
_, err := p.ReapStaleConns()
if err != nil {
continue
}
}
}
func (p *endpointPool) reapStaleConn() *Conn {
if len(p.idleConns) == 0 {
return nil
}
cn := p.idleConns[0]
if !p.isStaleConn(cn) {
return nil
}
p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
p.idleConnsLen--
p.removeConn(cn)
return cn
}
func (p *endpointPool) isStaleConn(cn *Conn) bool {
if cn.closed() {
return true
}
if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
return false
}
now := time.Now()
if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
return true
}
if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
return true
}
return false
}

View File

@ -1,99 +0,0 @@
package rclient
import (
"context"
"github.com/khorevaa/ras-client/messages"
"github.com/khorevaa/ras-client/serialize"
uuid "github.com/satori/go.uuid"
)
var _ locksApi = (*Client)(nil)
func (c *Client) GetClusterLocks(ctx context.Context, cluster uuid.UUID) (serialize.LocksList, error) {
req := &messages.GetLocksRequest{
ClusterID: cluster,
}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetLocksResponse)
response.List.Each(func(info *serialize.LockInfo) {
info.ClusterID = cluster
})
return response.List, err
}
func (c *Client) GetInfobaseLocks(ctx context.Context, cluster uuid.UUID, infobase uuid.UUID) (serialize.LocksList, error) {
req := &messages.GetInfobaseLockRequest{
ClusterID: cluster,
InfobaseID: infobase,
}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetInfobaseLockResponse)
response.List.Each(func(info *serialize.LockInfo) {
info.ClusterID = cluster
info.InfobaseID = infobase
})
return response.List, err
}
func (c *Client) GetSessionLocks(ctx context.Context, cluster uuid.UUID, infobase uuid.UUID, session uuid.UUID) (serialize.LocksList, error) {
req := &messages.GetSessionLockRequest{
ClusterID: cluster,
InfobaseID: infobase,
SessionID: session,
}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetSessionLockResponse)
response.List.Each(func(info *serialize.LockInfo) {
info.ClusterID = cluster
info.InfobaseID = infobase
})
return response.List, err
}
func (c *Client) GetConnectionLocks(ctx context.Context, cluster uuid.UUID, connection uuid.UUID) (serialize.LocksList, error) {
req := &messages.GetConnectionLockRequest{
ClusterID: cluster,
ConnectionID: connection,
}
resp, err := c.sendEndpointRequest(ctx, req)
if err != nil {
return nil, err
}
response := resp.(*messages.GetConnectionLockResponse)
response.List.Each(func(info *serialize.LockInfo) {
info.ClusterID = cluster
})
return response.List, err
}

View File

@ -1,120 +0,0 @@
package messages
import (
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize"
"github.com/khorevaa/ras-client/serialize/esig"
"io"
)
// GetAgentVersionRequest получение версии агента
//
// type GET_AGENT_VERSION_REQUEST
// respond GetAgentAdminsResponse
type GetAgentVersionRequest struct{}
func (r *GetAgentVersionRequest) Sig() esig.ESIG {
return esig.Nil
}
func (r *GetAgentVersionRequest) Format(_ codec.Encoder, _ int, _ io.Writer) {}
func (_ *GetAgentVersionRequest) Type() EndpointMessageType {
return GET_AGENT_VERSION_REQUEST
}
// GetAgentVersionResponse ответ с версией агента кластера
//
// type GET_AGENT_VERSION_RESPONSE
// Users serialize.UsersList
type GetAgentVersionResponse struct {
Version string
}
func (res *GetAgentVersionResponse) Parse(decoder codec.Decoder, _ int, r io.Reader) {
decoder.StringPtr(&res.Version, r)
}
func (_ *GetAgentVersionResponse) Type() EndpointMessageType {
return GET_AGENT_VERSION_RESPONSE
}
// GetAgentAdminsRequest получение списка админов агента
//
// type GET_AGENT_ADMINS_REQUEST
// respond GetAgentAdminsResponse
type GetAgentAdminsRequest struct{}
func (r *GetAgentAdminsRequest) Sig() esig.ESIG {
return esig.Nil
}
func (r *GetAgentAdminsRequest) Format(_ codec.Encoder, _ int, _ io.Writer) {}
func (_ *GetAgentAdminsRequest) Type() EndpointMessageType {
return GET_AGENT_ADMINS_REQUEST
}
// GetAgentAdminsResponse ответ со списком админов агента кластера
//
// type REG_CLUSTER_RESPONSE
// Users serialize.UsersList
type GetAgentAdminsResponse struct {
Users serialize.UsersList
}
func (res *GetAgentAdminsResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.UsersList{}
list.Parse(decoder, version, r)
res.Users = list
}
func (_ *GetAgentAdminsResponse) Type() EndpointMessageType {
return GET_AGENT_ADMINS_RESPONSE
}
// RegAgentAdminRequest регистрация админа агента
//
// type REG_AGENT_ADMIN_REQUEST
type RegAgentAdminRequest struct {
User serialize.UserInfo
}
func (r *RegAgentAdminRequest) Sig() esig.ESIG {
return esig.Nil
}
func (r *RegAgentAdminRequest) Format(e codec.Encoder, v int, w io.Writer) {
r.User.Format(e, v, w)
}
func (_ *RegAgentAdminRequest) Type() EndpointMessageType {
return REG_AGENT_ADMIN_REQUEST
}
// UnregAgentAdminRequest удаление админа агента
//
// type REG_AGENT_ADMIN_REQUEST
type UnregAgentAdminRequest struct {
User string
}
func (r *UnregAgentAdminRequest) Sig() esig.ESIG {
return esig.Nil
}
func (r *UnregAgentAdminRequest) Format(e codec.Encoder, v int, w io.Writer) {
e.String(r.User, w)
}
func (_ *UnregAgentAdminRequest) Type() EndpointMessageType {
return UNREG_AGENT_ADMIN_REQUEST
}

View File

@ -1,11 +0,0 @@
package messages
//
//APPLY_ASSIGNMENT_RULES_REQUEST EndpointMessageType = 81 + iota
//REG_ASSIGNMENT_RULE_REQUEST
//REG_ASSIGNMENT_RULE_RESPONSE
//UNREG_ASSIGNMENT_RULE_REQUEST
//GET_ASSIGNMENT_RULES_REQUEST
//GET_ASSIGNMENT_RULES_RESPONSE
//GET_ASSIGNMENT_RULE_INFO_REQUEST
//GET_ASSIGNMENT_RULE_INFO_RESPONSE

View File

@ -1,82 +0,0 @@
package messages
import (
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize/esig"
uuid "github.com/satori/go.uuid"
"io"
)
// ClusterAuthenticateRequest установка авторизации на кластере
//
// type AUTHENTICATE_REQUEST = 10
// kind MESSAGE_KIND = 1
// respond nothing
type ClusterAuthenticateRequest struct {
ClusterID uuid.UUID
User, Password string
}
func (r ClusterAuthenticateRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (r ClusterAuthenticateRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.String(r.User, w)
encoder.String(r.Password, w)
}
func (_ ClusterAuthenticateRequest) Type() EndpointMessageType {
return AUTHENTICATE_REQUEST
}
// AuthenticateAgentRequest установка авторизации на агенте
//
// type AUTHENTICATE_AGENT_REQUEST = 9
// kind MESSAGE_KIND = 1
// respond nothing
type AuthenticateAgentRequest struct {
User, Password string
}
func (_ AuthenticateAgentRequest) Sig() esig.ESIG {
return esig.Nil
}
func (_ AuthenticateAgentRequest) Type() EndpointMessageType {
return AUTHENTICATE_AGENT_REQUEST
}
func (r AuthenticateAgentRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.String(r.User, w)
encoder.String(r.Password, w)
}
// AuthenticateInfobaseRequest установка авторизации в информационной базе
//
// type ADD_AUTHENTICATION_REQUEST = 11
// kind MESSAGE_KIND = 1
// respond nothing
type AuthenticateInfobaseRequest struct {
ClusterID uuid.UUID
User, Password string
}
func (r AuthenticateInfobaseRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (_ AuthenticateInfobaseRequest) Type() EndpointMessageType {
return ADD_AUTHENTICATION_REQUEST
}
func (r AuthenticateInfobaseRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.String(r.User, w)
encoder.String(r.Password, w)
}

View File

@ -1,236 +0,0 @@
package messages
import (
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize"
"github.com/khorevaa/ras-client/serialize/esig"
uuid "github.com/satori/go.uuid"
"io"
)
var _ EndpointRequestMessage = (*GetClustersRequest)(nil)
// GetClustersRequest получение списка кластеров
//
// type GET_CLUSTERS_REQUEST = 11
// kind MESSAGE_KIND = 1
// respond GetClustersResponse
type GetClustersRequest struct{}
func (_ *GetClustersRequest) Sig() esig.ESIG {
return esig.Nil
}
func (_ *GetClustersRequest) Format(_ codec.Encoder, _ int, _ io.Writer) {}
func (_ GetClustersRequest) Type() EndpointMessageType {
return GET_CLUSTERS_REQUEST
}
// GetClustersResponse ответ со списком кластеров
//
// type GET_CLUSTERS_RESPONSE = 12
// kind MESSAGE_KIND = 1
// Clusters []*serialize.ClusterInfo
type GetClustersResponse struct {
Clusters []*serialize.ClusterInfo
}
func (res *GetClustersResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
count := decoder.Size(r)
for i := 0; i < count; i++ {
info := &serialize.ClusterInfo{}
info.Parse(decoder, version, r)
res.Clusters = append(res.Clusters, info)
}
}
func (_ *GetClustersResponse) Type() EndpointMessageType {
return GET_CLUSTERS_RESPONSE
}
var _ EndpointRequestMessage = (*GetClusterInfoRequest)(nil)
// GetClusterInfoRequest получение информации о кластере
//
// type GET_CLUSTER_INFO_REQUEST = 13
// kind MESSAGE_KIND = 1
// respond GetClustersResponse
type GetClusterInfoRequest struct {
ClusterID uuid.UUID
response *GetClusterInfoResponse
}
func (r *GetClusterInfoRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (r *GetClusterInfoRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
}
func (_ *GetClusterInfoRequest) Type() EndpointMessageType {
return GET_CLUSTER_INFO_REQUEST
}
// GetClustersResponse ответ со списком кластеров
//
// type GET_CLUSTERS_RESPONSE = 14
// Info serialize.ClusterInfo
type GetClusterInfoResponse struct {
Info serialize.ClusterInfo
}
func (res *GetClusterInfoResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
info := serialize.ClusterInfo{}
info.Parse(decoder, version, r)
res.Info = info
}
func (_ *GetClusterInfoResponse) Type() EndpointMessageType {
return GET_CLUSTER_INFO_RESPONSE
}
// RegClusterRequest регистрация нового кластера
//
// type REG_CLUSTER_REQUEST
// respond GetClustersResponse
type RegClusterRequest struct {
Info serialize.ClusterInfo
}
func (r *RegClusterRequest) Sig() esig.ESIG {
return esig.Nil
}
func (r *RegClusterRequest) Format(encoder codec.Encoder, version int, w io.Writer) {
r.Info.Format(encoder, version, w)
}
func (_ *RegClusterRequest) Type() EndpointMessageType {
return REG_CLUSTER_REQUEST
}
// GetClustersResponse ответ id созданного кластера кластеров
//
// type REG_CLUSTER_RESPONSE
// ClusterID uuid.UUID
type RegClusterResponse struct {
ClusterID uuid.UUID
}
func (res *RegClusterResponse) Parse(decoder codec.Decoder, r io.Reader) {
decoder.UuidPtr(&res.ClusterID, r)
}
func (_ *RegClusterResponse) Type() EndpointMessageType {
return REG_CLUSTER_RESPONSE
}
// UnregClusterRequest регистрация нового кластера
//
// type REG_CLUSTER_REQUEST
// respond GetClustersResponse
type UnregClusterRequest struct {
ClusterID uuid.UUID
}
func (r *UnregClusterRequest) Sig() esig.ESIG {
return esig.Nil
}
func (r *UnregClusterRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
}
func (_ *UnregClusterRequest) Type() EndpointMessageType {
return UNREG_CLUSTER_REQUEST
}
// GetClusterAdminsRequest получение списка админов кластера
//
// type GET_CLUSTER_ADMINS_REQUEST
// respond GetClusterAdminsResponse
type GetClusterAdminsRequest struct {
ClusterID uuid.UUID
}
func (r *GetClusterAdminsRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (r *GetClusterAdminsRequest) Format(e codec.Encoder, _ int, w io.Writer) {
e.Uuid(r.ClusterID, w)
}
func (_ *GetClusterAdminsRequest) Type() EndpointMessageType {
return GET_CLUSTER_ADMINS_REQUEST
}
// GetAgentAdminsResponse ответ со списком админов агента кластера
//
// type GET_CLUSTER_ADMINS_RESPONSE
// Users serialize.UsersList
type GetClusterAdminsResponse struct {
Users serialize.UsersList
}
func (res *GetClusterAdminsResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.UsersList{}
list.Parse(decoder, version, r)
res.Users = list
}
func (_ *GetClusterAdminsResponse) Type() EndpointMessageType {
return GET_CLUSTER_ADMINS_RESPONSE
}
// RegClusterAdminRequest регистрация админа кластера
//
// type REG_CLUSTER_ADMIN_REQUEST
type RegClusterAdminRequest struct {
ClusterID uuid.UUID
User serialize.UserInfo
}
func (r *RegClusterAdminRequest) Sig() esig.ESIG {
return esig.Nil
}
func (r *RegClusterAdminRequest) Format(e codec.Encoder, v int, w io.Writer) {
e.Uuid(r.ClusterID, w)
r.User.Format(e, v, w)
}
func (_ *RegClusterAdminRequest) Type() EndpointMessageType {
return REG_CLUSTER_ADMIN_REQUEST
}
// UnregClusterAdminRequest удаление админа кластера
//
// type REG_AGENT_ADMIN_REQUEST
type UnregClusterAdminRequest struct {
ClusterID uuid.UUID
User string
}
func (r *UnregClusterAdminRequest) Sig() esig.ESIG {
return esig.Nil
}
func (r *UnregClusterAdminRequest) Format(e codec.Encoder, v int, w io.Writer) {
e.Uuid(r.ClusterID, w)
e.String(r.User, w)
}
func (_ *UnregClusterAdminRequest) Type() EndpointMessageType {
return UNREG_CLUSTER_ADMIN_REQUEST
}

View File

@ -1,128 +0,0 @@
package messages
import (
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize"
"github.com/khorevaa/ras-client/serialize/esig"
uuid "github.com/satori/go.uuid"
"io"
)
var _ EndpointRequestMessage = (*GetConnectionsShortRequest)(nil)
// GetConnectionsShortRequest получение списка соединений кластера
//
// type GET_CONNECTIONS_SHORT_REQUEST = 51
// kind MESSAGE_KIND = 1
// respond GetConnectionsShortResponse
type GetConnectionsShortRequest struct {
ClusterID uuid.UUID
}
func (r *GetConnectionsShortRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (_ *GetConnectionsShortRequest) Type() EndpointMessageType {
return GET_CONNECTIONS_SHORT_REQUEST
}
func (r *GetConnectionsShortRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
}
// GetConnectionsShortResponse ответ со списком соединений кластера
//
// type GET_CONNECTIONS_SHORT_RESPONSE = 52
// kind MESSAGE_KIND = 1
// respond serialize.ConnectionShortInfoList
type GetConnectionsShortResponse struct {
Connections serialize.ConnectionShortInfoList
}
func (_ *GetConnectionsShortResponse) Type() EndpointMessageType {
return GET_CONNECTIONS_SHORT_RESPONSE
}
func (res *GetConnectionsShortResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.ConnectionShortInfoList{}
list.Parse(decoder, version, r)
res.Connections = list
}
var _ EndpointRequestMessage = (*DisconnectConnectionRequest)(nil)
// DisconnectConnectionRequest отключение соединения
//
// type DISCONNECT_REQUEST = 59
// respond nothing
type DisconnectConnectionRequest struct {
ClusterID uuid.UUID
ProcessID uuid.UUID
InfobaseID uuid.UUID
ConnectionID uuid.UUID
}
func (r *DisconnectConnectionRequest) Sig() esig.ESIG {
return esig.From2Uuid(r.ClusterID, r.InfobaseID)
}
func (_ *DisconnectConnectionRequest) Type() EndpointMessageType {
return DISCONNECT_REQUEST
}
func (r *DisconnectConnectionRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.Uuid(r.ProcessID, w)
encoder.Uuid(r.ConnectionID, w)
}
var _ EndpointRequestMessage = (*GetInfobaseConnectionsShortRequest)(nil)
// GetInfobaseConnectionsShortRequest получение списка соединений кластера
//
// type GET_INFOBASE_CONNECTIONS_SHORT_REQUEST = 52
// kind MESSAGE_KIND = 1
// respond GetInfobaseConnectionsShortResponse
type GetInfobaseConnectionsShortRequest struct {
ClusterID uuid.UUID
InfobaseID uuid.UUID
}
func (r *GetInfobaseConnectionsShortRequest) Sig() esig.ESIG {
return esig.From2Uuid(r.ClusterID, r.InfobaseID)
}
func (_ *GetInfobaseConnectionsShortRequest) Type() EndpointMessageType {
return GET_INFOBASE_CONNECTIONS_SHORT_REQUEST
}
func (r *GetInfobaseConnectionsShortRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.Uuid(r.InfobaseID, w)
}
// GetConnectionsShortResponse ответ со списком соединений кластера
//
// type GET_INFOBASE_CONNECTIONS_SHORT_RESPONSE = 53
// kind MESSAGE_KIND = 1
// respond Connections serialize.ConnectionShortInfoList
type GetInfobaseConnectionsShortResponse struct {
Connections serialize.ConnectionShortInfoList
}
func (_ *GetInfobaseConnectionsShortResponse) Type() EndpointMessageType {
return GET_INFOBASE_CONNECTIONS_SHORT_RESPONSE
}
func (res *GetInfobaseConnectionsShortResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.ConnectionShortInfoList{}
list.Parse(decoder, version, r)
res.Connections = list
}

View File

@ -1,220 +0,0 @@
package messages
const (
NEGOTIATE byte = iota
CONNECT
CONNECT_ACK
START_TLS // Deprecated: Нереализовано в апи
DISCONNECT
SASL_NEGOTIATE // Deprecated: Нереализовано в апи
SASL_AUTH // Deprecated: Нереализовано в апи
SASL_CHALLENGE // Deprecated: Нереализовано в апи
SASL_SUCCESS // Deprecated: Нереализовано в апи
SASL_FAILURE // Deprecated: Нереализовано в апи
SASL_ABORT // Deprecated: Нереализовано в апи
ENDPOINT_OPEN
ENDPOINT_OPEN_ACK
ENDPOINT_CLOSE
ENDPOINT_MESSAGE
ENDPOINT_FAILURE
KEEP_ALIVE
NULL_TYPE = 127
)
const (
GET_AGENT_ADMINS_REQUEST EndpointMessageType = iota
GET_AGENT_ADMINS_RESPONSE
GET_CLUSTER_ADMINS_REQUEST
GET_CLUSTER_ADMINS_RESPONSE
REG_AGENT_ADMIN_REQUEST
REG_CLUSTER_ADMIN_REQUEST
UNREG_AGENT_ADMIN_REQUEST
UNREG_CLUSTER_ADMIN_REQUEST
AUTHENTICATE_AGENT_REQUEST
AUTHENTICATE_REQUEST
ADD_AUTHENTICATION_REQUEST
GET_CLUSTERS_REQUEST
GET_CLUSTERS_RESPONSE
GET_CLUSTER_INFO_REQUEST
GET_CLUSTER_INFO_RESPONSE
REG_CLUSTER_REQUEST
REG_CLUSTER_RESPONSE
UNREG_CLUSTER_REQUEST
GET_CLUSTER_MANAGERS_REQUEST
GET_CLUSTER_MANAGERS_RESPONSE
GET_CLUSTER_MANAGER_INFO_REQUEST
GET_CLUSTER_MANAGER_INFO_RESPONSE
GET_WORKING_SERVERS_REQUEST
GET_WORKING_SERVERS_RESPONSE
GET_WORKING_SERVER_INFO_REQUEST
GET_WORKING_SERVER_INFO_RESPONSE
REG_WORKING_SERVER_REQUEST
REG_WORKING_SERVER_RESPONSE
UNREG_WORKING_SERVER_REQUEST
GET_WORKING_PROCESSES_REQUEST
GET_WORKING_PROCESSES_RESPONSE
GET_WORKING_PROCESS_INFO_REQUEST
GET_WORKING_PROCESS_INFO_RESPONSE
GET_SERVER_WORKING_PROCESSES_REQUEST
GET_SERVER_WORKING_PROCESSES_RESPONSE
GET_CLUSTER_SERVICES_REQUEST
GET_CLUSTER_SERVICES_RESPONSE
CREATE_INFOBASE_REQUEST
CREATE_INFOBASE_RESPONSE
UPDATE_INFOBASE_SHORT_REQUEST
UPDATE_INFOBASE_REQUEST
DROP_INFOBASE_REQUEST
GET_INFOBASES_SHORT_REQUEST
GET_INFOBASES_SHORT_RESPONSE
GET_INFOBASES_REQUEST // Deprecated: Bug: сервис не отвечает на данный запрос
GET_INFOBASES_RESPONSE // Deprecated: Bug: см. GET_INFOBASES_REQUEST
GET_INFOBASE_SHORT_INFO_REQUEST
GET_INFOBASE_SHORT_INFO_RESPONSE
GET_INFOBASE_INFO_REQUEST
GET_INFOBASE_INFO_RESPONSE
GET_CONNECTIONS_SHORT_REQUEST
GET_CONNECTIONS_SHORT_RESPONSE
GET_INFOBASE_CONNECTIONS_SHORT_REQUEST
GET_INFOBASE_CONNECTIONS_SHORT_RESPONSE
GET_CONNECTION_INFO_SHORT_REQUEST
GET_CONNECTION_INFO_SHORT_RESPONSE
GET_INFOBASE_CONNECTIONS_REQUEST
GET_INFOBASE_CONNECTIONS_RESPONSE
)
const (
DISCONNECT_REQUEST EndpointMessageType = 64 + iota // 64
GET_SESSIONS_REQUEST
GET_SESSIONS_RESPONSE
GET_INFOBASE_SESSIONS_REQUEST
GET_INFOBASE_SESSIONS_RESPONSE
GET_SESSION_INFO_REQUEST
GET_SESSION_INFO_RESPONSE
TERMINATE_SESSION_REQUEST
GET_LOCKS_REQUEST
GET_LOCKS_RESPONSE
GET_INFOBASE_LOCKS_REQUEST
GET_INFOBASE_LOCKS_RESPONSE
GET_CONNECTION_LOCKS_REQUEST
GET_CONNECTION_LOCKS_RESPONSE
GET_SESSION_LOCKS_REQUEST
GET_SESSION_LOCKS_RESPONSE
)
const (
APPLY_ASSIGNMENT_RULES_REQUEST EndpointMessageType = 81 + iota
REG_ASSIGNMENT_RULE_REQUEST
REG_ASSIGNMENT_RULE_RESPONSE
UNREG_ASSIGNMENT_RULE_REQUEST
GET_ASSIGNMENT_RULES_REQUEST
GET_ASSIGNMENT_RULES_RESPONSE
GET_ASSIGNMENT_RULE_INFO_REQUEST
GET_ASSIGNMENT_RULE_INFO_RESPONSE
GET_SECURITY_PROFILES_REQUEST
GET_SECURITY_PROFILES_RESPONSE
CREATE_SECURITY_PROFILE_REQUEST
DROP_SECURITY_PROFILE_REQUEST
GET_VIRTUAL_DIRECTORIES_REQUEST
GET_VIRTUAL_DIRECTORIES_RESPONSE
CREATE_VIRTUAL_DIRECTORY_REQUEST
DROP_VIRTUAL_DIRECTORY_REQUEST
GET_COM_CLASSES_REQUEST
GET_COM_CLASSES_RESPONSE
CREATE_COM_CLASS_REQUEST
DROP_COM_CLASS_REQUEST
GET_ALLOWED_ADDINS_REQUEST
GET_ALLOWED_ADDINS_RESPONSE
CREATE_ALLOWED_ADDIN_REQUEST
DROP_ALLOWED_ADDIN_REQUEST
GET_EXTERNAL_MODULES_REQUEST
GET_EXTERNAL_MODULES_RESPONSE
CREATE_EXTERNAL_MODULE_REQUEST
DROP_EXTERNAL_MODULE_REQUEST
GET_ALLOWED_APPLICATIONS_REQUEST
GET_ALLOWED_APPLICATIONS_RESPONSE
CREATE_ALLOWED_APPLICATION_REQUEST
DROP_ALLOWED_APPLICATION_REQUEST
GET_INTERNET_RESOURCES_REQUEST
GET_INTERNET_RESOURCES_RESPONSE
CREATE_INTERNET_RESOURCE_REQUEST
DROP_INTERNET_RESOURCE_REQUEST
INTERRUPT_SESSION_CURRENT_SERVER_CALL_REQUEST
GET_RESOURCE_COUNTERS_REQUEST
GET_RESOURCE_COUNTERS_RESPONSE
GET_RESOURCE_COUNTER_INFO_REQUEST
GET_RESOURCE_COUNTER_INFO_RESPONSE
REG_RESOURCE_COUNTER_REQUEST
UNREG_RESOURCE_COUNTER_REQUEST
GET_RESOURCE_LIMITS_REQUEST
GET_RESOURCE_LIMITS_RESPONSE
GET_RESOURCE_LIMIT_INFO_REQUEST
GET_RESOURCE_LIMIT_INFO_RESPONSE
REG_RESOURCE_LIMIT_REQUEST
UNREG_RESOURCE_LIMIT_REQUEST
GET_COUNTER_VALUES_REQUEST
GET_COUNTER_VALUES_RESPONSE
CLEAR_COUNTER_VALUE_REQUEST
GET_COUNTER_ACCUMULATED_VALUES_REQUEST
GET_COUNTER_ACCUMULATED_VALUES_RESPONSE
GET_AGENT_VERSION_REQUEST
GET_AGENT_VERSION_RESPONSE
)
type EndpointMessageType byte
func (t EndpointMessageType) Parser() interface{} {
switch t {
case GET_CLUSTERS_RESPONSE:
return &GetClustersResponse{}
case GET_CLUSTER_INFO_RESPONSE:
return &GetClusterInfoResponse{}
case GET_CLUSTER_MANAGERS_RESPONSE:
return &GetClusterManagersResponse{}
case GET_CLUSTER_SERVICES_RESPONSE:
return &GetClusterServicesResponse{}
case CREATE_INFOBASE_RESPONSE:
return &CreateInfobaseResponse{}
case GET_INFOBASES_SHORT_RESPONSE:
return &GetInfobasesShortResponse{}
case GET_INFOBASE_INFO_RESPONSE:
return &GetInfobaseInfoResponse{}
case GET_CONNECTIONS_SHORT_RESPONSE:
return &GetConnectionsShortResponse{}
case GET_INFOBASE_CONNECTIONS_SHORT_RESPONSE:
return &GetInfobaseConnectionsShortResponse{}
case GET_SESSIONS_RESPONSE:
return &GetSessionsResponse{}
case GET_INFOBASE_SESSIONS_RESPONSE:
return &GetInfobaseSessionsResponse{}
case GET_LOCKS_RESPONSE:
return &GetLocksResponse{}
case GET_INFOBASE_LOCKS_RESPONSE:
return &GetInfobaseLockResponse{}
case GET_CONNECTION_LOCKS_RESPONSE:
return &GetConnectionLockResponse{}
case GET_SESSION_LOCKS_RESPONSE:
return &GetSessionLockResponse{}
case GET_WORKING_PROCESS_INFO_RESPONSE:
return &GetWorkingProcessInfoResponse{}
case GET_WORKING_PROCESSES_RESPONSE:
return &GetWorkingProcessesResponse{}
case GET_WORKING_SERVERS_RESPONSE:
return &GetWorkingServersResponse{}
case GET_WORKING_SERVER_INFO_RESPONSE:
return &GetWorkingServerInfoResponse{}
case REG_WORKING_SERVER_RESPONSE:
return &RegWorkingServerResponse{}
case REG_CLUSTER_RESPONSE:
return &RegClusterResponse{}
case GET_AGENT_ADMINS_RESPONSE:
return &GetAgentAdminsResponse{}
case GET_CLUSTER_ADMINS_RESPONSE:
return &GetClusterAdminsResponse{}
case GET_AGENT_VERSION_RESPONSE:
return &GetAgentVersionResponse{}
default:
panic("unknown typed parser id: " + string(t))
}
}

View File

@ -1,111 +0,0 @@
package messages
import (
"fmt"
"github.com/k0kubun/pp"
"github.com/khorevaa/ras-client/protocol/codec"
"io"
"strings"
)
//goland:noinspection ALL
const (
VOID_MESSAGE_KIND EndpointMessageKind = 0
MESSAGE_KIND EndpointMessageKind = 1
EXCEPTION_KIND EndpointMessageKind = 0xff
)
type EndpointMessageKind int
func (e EndpointMessageKind) Type() byte {
return byte(e)
}
type EndpointMessageFailure struct {
ServiceID string `json:"service_id"`
ErrorType string `json:"type"`
EndpointID int `json:"endpoint_id,omitempty"`
Message string `json:"message"`
}
func (m *EndpointMessageFailure) Parse(c codec.Decoder, r io.Reader) {
c.StringPtr(&m.ServiceID, r)
c.StringPtr(&m.Message, r)
msg := strings.Split(m.ServiceID, "#")
if len(msg) == 2 {
m.ServiceID = msg[0]
m.ErrorType = msg[1]
}
}
func (m *EndpointMessageFailure) String() string {
return pp.Sprintln(m)
}
func (m *EndpointMessageFailure) Type() EndpointMessageKind {
return EXCEPTION_KIND
}
func (m *EndpointMessageFailure) Error() string {
return fmt.Sprintf("endpoint: %d service: %s msg: %s", m.EndpointID, m.ServiceID, m.Message)
}
type EndpointMessage struct {
EndpointID int
EndpointFormat int16
Message interface{}
Type EndpointMessageType
Kind EndpointMessageKind
}
func (m *EndpointMessage) Parse(decoder codec.Decoder, version int, reader io.Reader) {
m.Kind = EndpointMessageKind(decoder.Byte(reader))
switch m.Kind {
case VOID_MESSAGE_KIND:
return
case EXCEPTION_KIND:
fail := &EndpointMessageFailure{EndpointID: m.EndpointID}
fail.Parse(decoder, reader)
m.Message = fail
case MESSAGE_KIND:
respondType := decoder.Byte(reader)
m.Type = EndpointMessageType(respondType)
respond := m.Type.Parser()
parser := respond.(codec.BinaryParser)
// TODO Сделать получение ответа по типу
parser.Parse(decoder, version, reader)
m.Message = parser
default:
panic("unknown message kind")
}
}
func (m *EndpointMessage) Format(encoder codec.Encoder, version int, w io.Writer) {
encoder.EndpointId(m.EndpointID, w)
encoder.Short(m.EndpointFormat, w)
encoder.Byte(byte(m.Kind), w)
encoder.Byte(byte(m.Type), w) // МАГИЯ без этого байта требует авторизации на центральном кластере
formatter := m.Message.(codec.BinaryWriter)
formatter.Format(encoder, version, w) // запись тебя сообщения
}

View File

@ -1,15 +0,0 @@
package messages
type UnknownMessageError struct {
Type byte
Data []byte
EndpointID int
Err error
ServiceID string
}
func (m *UnknownMessageError) Error() string {
return m.Err.Error()
}

View File

@ -1,222 +0,0 @@
package messages
import (
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize"
"github.com/khorevaa/ras-client/serialize/esig"
uuid "github.com/satori/go.uuid"
"io"
)
var _ EndpointRequestMessage = (*GetInfobasesShortRequest)(nil)
// GetInfobasesShortRequest получение списка инфорамационных баз кластера
//
// type GET_INFOBASES_SHORT_REQUEST = 43
// kind MESSAGE_KIND = 1
// respond GetInfobasesShortResponse
type GetInfobasesShortRequest struct {
ClusterID uuid.UUID
}
func (r GetInfobasesShortRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (r GetInfobasesShortRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
}
func (_ GetInfobasesShortRequest) Type() EndpointMessageType {
return GET_INFOBASES_SHORT_REQUEST
}
// GetInfobasesShortResponse
// type GET_INFOBASES_SHORT_RESPONSE = 44
type GetInfobasesShortResponse struct {
Infobases serialize.InfobaseSummaryList
}
func (res *GetInfobasesShortResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.InfobaseSummaryList{}
list.Parse(decoder, version, r)
res.Infobases = list
}
func (_ *GetInfobasesShortResponse) Type() EndpointMessageType {
return GET_INFOBASES_SHORT_RESPONSE
}
var _ EndpointRequestMessage = (*CreateInfobaseRequest)(nil)
// CreateInfobaseRequest запрос на создание новой базы
//
// type CREATE_INFOBASE_REQUEST = 38
// kind MESSAGE_KIND = 1
// respond CreateInfobaseResponse
type CreateInfobaseRequest struct {
ClusterID uuid.UUID
Infobase *serialize.InfobaseInfo
Mode int // Mode 1 - создавать базу на сервере, 0 - не создавать
}
func (r *CreateInfobaseRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (_ *CreateInfobaseRequest) Type() EndpointMessageType {
return CREATE_INFOBASE_REQUEST
}
func (r *CreateInfobaseRequest) Format(encoder codec.Encoder, version int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
r.Infobase.Format(encoder, version, w)
encoder.Int(r.Mode, w)
}
// CreateInfobaseResponse ответ создания новой информационной базы
// type CREATE_INFOBASE_RESPONSE = 39
// return uuid.UUID созданной базы
type CreateInfobaseResponse struct {
InfobaseID uuid.UUID
}
func (_ *CreateInfobaseResponse) Type() EndpointMessageType {
return CREATE_INFOBASE_RESPONSE
}
func (res *CreateInfobaseResponse) Parse(decoder codec.Decoder, _ int, r io.Reader) {
decoder.UuidPtr(&res.InfobaseID, r)
}
var _ EndpointRequestMessage = (*GetInfobaseInfoRequest)(nil)
// GetInfobaseInfoRequest запрос получение информации по информационной базе
//
// type GET_INFOBASE_INFO_REQUEST = 49
// kind MESSAGE_KIND = 1
// respond GetInfobaseInfoResponse
type GetInfobaseInfoRequest struct {
ClusterID uuid.UUID
InfobaseID uuid.UUID
}
func (r *GetInfobaseInfoRequest) Sig() esig.ESIG {
return esig.From2Uuid(r.ClusterID, r.InfobaseID)
}
func (_ *GetInfobaseInfoRequest) Type() EndpointMessageType {
return GET_INFOBASE_INFO_REQUEST
}
func (r *GetInfobaseInfoRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.Uuid(r.InfobaseID, w)
}
// GetInfobaseInfoResponse ответ с информацией о информационной базы
// type GET_INFOBASE_INFO_RESPONSE = 50
// return serialize.InfobaseInfo
type GetInfobaseInfoResponse struct {
Infobase serialize.InfobaseInfo
}
func (_ *GetInfobaseInfoResponse) Type() EndpointMessageType {
return GET_INFOBASE_INFO_RESPONSE
}
func (res *GetInfobaseInfoResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
info := &serialize.InfobaseInfo{}
info.Parse(decoder, version, r)
res.Infobase = *info
}
var _ EndpointRequestMessage = (*DropInfobaseRequest)(nil)
// DropInfobaseRequest запрос удаление информационной базы
//
// type DROP_INFOBASE_REQUEST = 42
// kind MESSAGE_KIND = 1
// respond nothing
type DropInfobaseRequest struct {
ClusterID uuid.UUID
InfobaseID uuid.UUID
Mode int
}
func (r *DropInfobaseRequest) Sig() esig.ESIG {
return esig.From2Uuid(r.ClusterID, r.InfobaseID)
}
func (_ *DropInfobaseRequest) Type() EndpointMessageType {
return DROP_INFOBASE_REQUEST
}
func (r *DropInfobaseRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.Uuid(r.InfobaseID, w)
encoder.Int(r.Mode, w)
}
var _ EndpointRequestMessage = (*UpdateInfobaseRequest)(nil)
// UpdateInfobaseRequest запрос обновление данных по информационной базы
//
// type UPDATE_INFOBASE_REQUEST = 40
// kind MESSAGE_KIND = 1
// respond nothing
type UpdateInfobaseRequest struct {
ClusterID uuid.UUID
Infobase serialize.InfobaseInfo
}
func (r *UpdateInfobaseRequest) Sig() esig.ESIG {
return esig.From2Uuid(r.ClusterID, r.Infobase.UUID)
}
func (_ *UpdateInfobaseRequest) Type() EndpointMessageType {
return UPDATE_INFOBASE_REQUEST
}
func (r *UpdateInfobaseRequest) Format(encoder codec.Encoder, version int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
r.Infobase.Format(encoder, version, w)
}
var _ EndpointRequestMessage = (*UpdateInfobaseShortRequest)(nil)
// UpdateInfobaseShortRequest запрос обновление данных по информационной базы
//
// type UPDATE_INFOBASE_REQUEST = 40
// kind MESSAGE_KIND = 1
// respond nothing
type UpdateInfobaseShortRequest struct {
ClusterID uuid.UUID
Infobase serialize.InfobaseSummaryInfo
}
func (r *UpdateInfobaseShortRequest) Sig() esig.ESIG {
return esig.From2Uuid(r.ClusterID, r.Infobase.UUID)
}
func (_ *UpdateInfobaseShortRequest) Type() EndpointMessageType {
return UPDATE_INFOBASE_SHORT_REQUEST
}
func (r *UpdateInfobaseShortRequest) Format(encoder codec.Encoder, version int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
r.Infobase.Format(encoder, version, w)
}

View File

@ -1,197 +0,0 @@
package messages
import (
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize"
"github.com/khorevaa/ras-client/serialize/esig"
uuid "github.com/satori/go.uuid"
"io"
)
var _ EndpointRequestMessage = (*GetLocksRequest)(nil)
// GetLocksRequest получение списка блокировок кластера
//
// type GET_LOCKS_REQUEST = 66
// kind MESSAGE_KIND = 1
// respond GetSessionsResponse
type GetLocksRequest struct {
ClusterID uuid.UUID
}
func (r *GetLocksRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (_ *GetLocksRequest) Type() EndpointMessageType {
return GET_LOCKS_REQUEST
}
func (r *GetLocksRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
}
// GetLocksResponse ответ со списком блокировок кластера
//
// type GET_LOCKS_RESPONSE = 67
// kind MESSAGE_KIND = 1
// respond Sessions serialize.SessionInfoList
type GetLocksResponse struct {
List serialize.LocksList
}
func (_ *GetLocksResponse) Type() EndpointMessageType {
return GET_LOCKS_RESPONSE
}
func (res *GetLocksResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.LocksList{}
list.Parse(decoder, version, r)
res.List = list
}
var _ EndpointRequestMessage = (*GetInfobaseLockRequest)(nil)
// GetInfobaseLockRequest получение списка блокировок информационной базы кластера
//
// type GET_INFOBASE_LOCKS_REQUEST = 68
// kind MESSAGE_KIND = 1
// respond GetInfobaseSessionsResponse
type GetInfobaseLockRequest struct {
ClusterID uuid.UUID
InfobaseID uuid.UUID
}
func (r *GetInfobaseLockRequest) Sig() esig.ESIG {
return esig.From2Uuid(r.ClusterID, r.InfobaseID)
}
func (_ *GetInfobaseLockRequest) Type() EndpointMessageType {
return GET_INFOBASE_LOCKS_REQUEST
}
func (r *GetInfobaseLockRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.Uuid(r.InfobaseID, w)
}
// GetInfobaseLockResponse ответ со списком сблокировок иб
//
// type GET_INFOBASE_LOCKS_RESPONSE = 69
// kind MESSAGE_KIND = 1
// respond Sessions serialize.SessionInfoList
type GetInfobaseLockResponse struct {
List serialize.LocksList
}
func (_ *GetInfobaseLockResponse) Type() EndpointMessageType {
return GET_INFOBASE_LOCKS_RESPONSE
}
func (res *GetInfobaseLockResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.LocksList{}
list.Parse(decoder, version, r)
res.List = list
}
var _ EndpointRequestMessage = (*GetSessionLockRequest)(nil)
// GetSessionLockRequest получение списка блокировок сессии информационной базы кластера
//
// type GET_SESSION_LOCKS_REQUEST = 72
// kind MESSAGE_KIND = 1
// respond GetInfobaseSessionsResponse
type GetSessionLockRequest struct {
ClusterID uuid.UUID
InfobaseID uuid.UUID
SessionID uuid.UUID
}
func (r *GetSessionLockRequest) Sig() esig.ESIG {
return esig.From2Uuid(r.ClusterID, r.InfobaseID)
}
func (_ *GetSessionLockRequest) Type() EndpointMessageType {
return GET_SESSION_LOCKS_REQUEST
}
func (r *GetSessionLockRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.Uuid(r.InfobaseID, w)
encoder.Uuid(r.SessionID, w)
}
// GetSessionLockResponse ответ со списком блокировок сессии иб
//
// type GET_SESSION_LOCKS_RESPONSE = 73
// kind MESSAGE_KIND = 1
// respond Sessions serialize.SessionInfoList
type GetSessionLockResponse struct {
List serialize.LocksList
}
func (_ *GetSessionLockResponse) Type() EndpointMessageType {
return GET_SESSION_LOCKS_RESPONSE
}
func (res *GetSessionLockResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.LocksList{}
list.Parse(decoder, version, r)
res.List = list
}
var _ EndpointRequestMessage = (*GetConnectionLockRequest)(nil)
// GetSessionLockRequest получение списка блокировок сессии информационной базы кластера
//
// type GET_CONNECTION_LOCKS_REQUEST = 70
// kind MESSAGE_KIND = 1
// respond GetConnectionLockResponse
type GetConnectionLockRequest struct {
ClusterID uuid.UUID
ConnectionID uuid.UUID
}
func (r *GetConnectionLockRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (_ *GetConnectionLockRequest) Type() EndpointMessageType {
return GET_CONNECTION_LOCKS_REQUEST
}
func (r *GetConnectionLockRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
encoder.Uuid(r.ConnectionID, w)
}
// GetSessionLockResponse ответ со списком блокировок сессии иб
//
// type GET_CONNECTION_LOCKS_RESPONSE = 71
// kind MESSAGE_KIND = 1
// respond Sessions serialize.SessionInfoList
type GetConnectionLockResponse struct {
List serialize.LocksList
}
func (_ *GetConnectionLockResponse) Type() EndpointMessageType {
return GET_CONNECTION_LOCKS_RESPONSE
}
func (res *GetConnectionLockResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
list := serialize.LocksList{}
list.Parse(decoder, version, r)
res.List = list
}

View File

@ -1,58 +0,0 @@
package messages
import (
"github.com/khorevaa/ras-client/protocol/codec"
"github.com/khorevaa/ras-client/serialize"
"github.com/khorevaa/ras-client/serialize/esig"
uuid "github.com/satori/go.uuid"
"io"
)
var _ EndpointRequestMessage = (*GetClusterManagersRequest)(nil)
// GetClusterManagersRequest получение списка менеджеров кластера
//
// type GET_CLUSTER_MANAGERS_REQUEST = 19
// kind MESSAGE_KIND = 1
// respond GetClusterManagersResponse
type GetClusterManagersRequest struct {
ClusterID uuid.UUID
}
func (r *GetClusterManagersRequest) Sig() esig.ESIG {
return esig.FromUuid(r.ClusterID)
}
func (_ *GetClusterManagersRequest) Type() EndpointMessageType {
return GET_CLUSTER_MANAGERS_REQUEST
}
func (r *GetClusterManagersRequest) Format(encoder codec.Encoder, _ int, w io.Writer) {
encoder.Uuid(r.ClusterID, w)
}
// GetClusterManagersResponse содержит список менеджеров кластера
// type GET_CLUSTER_MANAGERS_RESPONSE = 20
// Managers serialize.ManagerInfo
type GetClusterManagersResponse struct {
Managers []*serialize.ManagerInfo
}
func (res *GetClusterManagersResponse) Parse(decoder codec.Decoder, version int, r io.Reader) {
count := decoder.Size(r)
for i := 0; i < count; i++ {
info := &serialize.ManagerInfo{}
info.Parse(decoder, version, r)
res.Managers = append(res.Managers, info)
}
}
func (_ *GetClusterManagersResponse) Type() EndpointMessageType {
return GET_CLUSTER_MANAGERS_RESPONSE
}

Some files were not shown because too many files have changed in this diff Show More