diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..ffbed81e6 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +# These are supported funding model platforms + +github: [tonybase] diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml deleted file mode 100644 index 704643d43..000000000 --- a/.github/workflows/go.yml +++ /dev/null @@ -1,131 +0,0 @@ -name: Go - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - - build: - name: Build on ${{ matrix.os }} - Go${{ matrix.go_version }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - go_version: - - 1.13 - os: - - ubuntu-latest - - steps: - - - name: Set up Go ${{ matrix.go_version }} - uses: actions/setup-go@v1 - with: - go-version: ${{ matrix.go_version }} - id: go - - - name: Set up Env - run: | - echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV - echo "$(go env GOPATH)/bin" >> $GITHUB_PATH - - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - - name: Cache dependencies - uses: actions/cache@v2 - with: - # Cache - path: ~/go/pkg/mod - # Cache key - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - # An ordered list of keys to use for restoring the cache if no cache hit occurred for key - restore-keys: | - ${{ runner.os }}-go- - - - name: Get dependencies - run: | - go get -v -t -d ./... - if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure - fi - - - name: Build - run: go build ./... - - - name: Golangci - run: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.33.0 - golangci-lint run --out-format=github-actions - - - name: Test - run: go test ./... -coverprofile=coverage.txt -covermode=atomic - - - name: Coverage - run: bash <(curl -s https://codecov.io/bash) - - scaffold: - - name: Scaffold Test on ${{ matrix.os }} - Go${{ matrix.go_version }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - go_version: - - 1.13 - os: - - ubuntu-latest - - steps: - - - name: Set up Go ${{ matrix.go_version }} - uses: actions/setup-go@v1 - with: - go-version: ${{ matrix.go_version }} - id: go - - - name: Set up Env - run: | - echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV - echo "$(go env GOPATH)/bin" >> $GITHUB_PATH - - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - - name: Cache dependencies - uses: actions/cache@v2 - with: - # Cache - path: ~/go/pkg/mod - # Cache key - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - # An ordered list of keys to use for restoring the cache if no cache hit occurred for key - restore-keys: | - ${{ runner.os }}-go- - - - name: Get dependencies - run: | - go get -v -t -d ./... - if [ -f Gopkg.toml ]; then - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh - dep ensure - fi - wget https://github.com/google/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip - unzip protoc-3.11.4-linux-x86_64.zip - chmod +x bin/protoc - sudo mv bin/protoc /usr/local/bin - sudo mv include /usr/local/bin - go get -u github.com/golang/protobuf/protoc-gen-go - go get -u github.com/gogo/protobuf/protoc-gen-gofast - - - name: Tool - run: | - go install ./... - mkdir -p $GOPATH/src - cp -R ../kratos $GOPATH/src - cd $GOPATH/src - kratos new kratos-demo - cd kratos-demo - go build ./... diff --git a/.gitignore b/.gitignore index f9f02aeb8..c29907463 100644 --- a/.gitignore +++ b/.gitignore @@ -1,32 +1,36 @@ -# idea ignore -.idea/ -*.ipr -*.iml -*.iws -.vscode/ - -# temp ignore -*.log -*.cache -*.diff +# Reference https://github.com/github/gitignore/blob/master/Go.gitignore +# Binaries for programs and plugins *.exe *.exe~ -*.patch -*.swp -*.tmp +*.dll +*.so +*.dylib -# system ignore -.DS_Store +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# OS General Thumbs.db +.DS_Store # project *.cert *.key -tool/kratos/kratos -tool/kratos-protoc/kratos-protoc -tool/kratos-gen-bts/kratos-gen-bts -tool/kratos-gen-mc/kratos-gen-mc -tool/kratos/kratos-protoc/kratos-protoc -tool/kratos/protobuf/protoc-gen-bm/protoc-gen-bm -tool/kratos/protobuf/protoc-gen-ecode/protoc-gen-ecode -tool/kratos/protobuf/protoc-gen-bswagger/protoc-gen-bswagger +*.log +bin/ + +# Develop tools +.vscode/ +.idea/ +*.swp diff --git a/.golangci.yml b/.golangci.yml deleted file mode 100644 index 3ce2dbc0a..000000000 --- a/.golangci.yml +++ /dev/null @@ -1,138 +0,0 @@ -# [index] https://github.com/golangci/golangci-lint -# [example] https://github.com/golangci/golangci-lint/blob/master/.golangci.example.yml - -run: - tests: true #是否包含测试文件 - issues-exit-code: 0 - -linters-settings: - # govet: - # check-shadowing: true #启用了对同名变量名在函数中被隐藏的警告 - gofmt: - simplify: true - goimports: - local-prefixes: "github.com/go-kratos/kratos" # 格式化代码时,本地代码单独块 - gocritic: - enabled-tags: - - diagnostic - # - style - # - performance - disabled-checks: - #- wrapperFunc - #- dupImport # https://github.com/go-critic/go-critic/issues/845 - - commentedOutCode - - ifElseChain - - elseif - settings: # settings passed to gocritic - captLocal: # must be valid enabled check name - paramsOnly: true - # rangeValCopy: - # sizeThreshold: 32 - lll: - line-length: 500 - funlen: - lines: 500 - statements: 500 - gocyclo: - min-complexity: 100 - -linters: - disable-all: true - enable: - # https://golangci-lint.run/usage/configuration/ - - bodyclose # http.resp.body 内存泄露检查 - - deadcode # 无用的变量声明检查 - - depguard # 自定义依赖包白、黑名单 控制导包 - - dogsled # 空白标识符的赋值检查 默认为2 - #- dupl # 重复代码检查 - - errcheck # 未判断的error返回值检查 - - funlen # 接口最大行数检查 - #- gochecknoinits # 包中定义init()函数检查 - #- goconst # 常量字符串检查 - - gocritic # - - gocyclo # 代码复杂度检查 - - gofmt # 优化代码 - - goimports # 自动增加和删除包 - - golint # 代码风格检查 - #- gomnd # 参数、赋值、用例、条件、操作和返回语句检查 - - goprintffuncname # - - gosec # 源代码安全检查 - - gosimple # 可以优化的代码检查 注:该工具已整合到staticcheck中 - - govet # 代码正确性检查 - - ineffassign # 无效赋值检查 - - interfacer # 建议接口的使用方式 - - lll # 行最大字符 - - misspell # 拼写错误检查 - - nakedret # 大于指定函数长度的函数的无约束返回值检查 - - nolintlint # - - rowserrcheck # sql.Rows.Err检查 - - scopelint # 循环变量引用检查,排除test文件 - - staticcheck # 静态检查 - - structcheck # 结构体字段的约束条件检查 - - stylecheck # 代码风格检查 - - typecheck # 类型检查 - - unconvert # 类型转换检查 - - unparam # 未使用参数检查 - #- unused # 未使用变量、函数检查 - - varcheck # 报告exported变量和常量 - - whitespace # 空行检查 - -severity: - # Default value is empty string. - # Set the default severity for issues. If severity rules are defined and the issues - # do not match or no severity is provided to the rule this will be the default - # severity applied. Severities should match the supported severity names of the - # selected out format. - # - Code climate: https://docs.codeclimate.com/docs/issues#issue-severity - # - Checkstyle: https://checkstyle.sourceforge.io/property_types.html#severity - # - Github: https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message - default-severity: error - # The default value is false. - # If set to true severity-rules regular expressions become case sensitive. - case-sensitive: false - # Default value is empty list. - # When a list of severity rules are provided, severity information will be added to lint - # issues. Severity rules have the same filtering capability as exclude rules except you - # are allowed to specify one matcher per severity rule. - # Only affects out formats that support setting severity information. - rules: - - linters: - - dupl - - nakedret - - lll - - misspell - - goprintffuncname - - stylecheck - - deadcode - - whitespace - - unparam - - golint - - gosec - - staticcheck - - structcheck - - gocritic - - errcheck - - rowserrcheck - - unconvert - - gosimple - - rowserrcheck - - ineffassign - severity: warning - -issues: - # Excluding configuration per-path, per-linter, per-text and per-source - exclude-rules: - - path: _test\.go - linters: - - gomnd - - gocyclo - - errcheck - - dupl - - gosec - - scopelint - - interfacer - - govet - # https://github.com/go-critic/go-critic/issues/926 - - linters: - - gocritic - text: "unnecessaryDefer:" \ No newline at end of file diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 3d041a9b6..000000000 --- a/.travis.yml +++ /dev/null @@ -1,59 +0,0 @@ -language: go - -go: - - 1.12.x - - 1.13.x - -services: - - docker - -# Only clone the most recent commit. -git: - depth: 1 - -# Force-enable Go modules. This will be unnecessary when Go 1.12 lands. -env: - global: - - GO111MODULE=on - - REGION=sh - - ZONE=sh001 - - DEPLOY_ENV=dev - - DISCOVERY_NODES=127.0.0.1:7171 - - HTTP_PERF=tcp://0.0.0.0:0 - - DOCKER_COMPOSE_VERSION=1.24.1 - - ZK_VERSION=3.5.6 - -before_install: - # docker-compose - - sudo rm /usr/local/bin/docker-compose - - curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose - - chmod +x docker-compose - - sudo mv docker-compose /usr/local/bin - # zookeeper - - wget "http://apache.cs.utah.edu/zookeeper/zookeeper-${ZK_VERSION}/apache-zookeeper-${ZK_VERSION}-bin.tar.gz" - - tar -xvf "apache-zookeeper-${ZK_VERSION}-bin.tar.gz" - - mv apache-zookeeper-${ZK_VERSION}-bin zk - - chmod +x ./zk/bin/zkServer.sh - -# Skip the install step. Don't `go get` dependencies. Only build with the code -# in vendor/ -install: true - -# Anything in before_script that returns a nonzero exit code will flunk the -# build and immediately stop. It's sorta like having set -e enabled in bash. -# Make sure golangci-lint is vendored. -before_script: - - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $GOPATH/bin - # discovery - - curl -sfL https://raw.githubusercontent.com/bilibili/discovery/master/install.sh | sh -s -- -b $GOPATH/bin - - curl -sfL https://raw.githubusercontent.com/bilibili/discovery/master/cmd/discovery/discovery-example.toml -o $GOPATH/bin/discovery.toml - - nohup bash -c "$GOPATH/bin/discovery -conf $GOPATH/bin/discovery.toml &" - # zookeeper - - sudo ./zk/bin/zkServer.sh start ./zk/conf/zoo_sample.cfg 1> /dev/null - -script: - - go build ./... - - go test ./... - -after_success: - - golangci-lint run # run a bunch of code checkers/linters in parallel diff --git a/LICENSE b/LICENSE index 3ee19b4a1..684318c72 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018 bilibili +Copyright (c) 2020 go-kratos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 268c924a9..55270151c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![kratos](docs/img/kratos3.png) +![kratos](docs/images/kratos.png) [![Language](https://img.shields.io/badge/Language-Go-blue.svg)](https://golang.org/) [![Build Status](https://github.com/go-kratos/kratos/workflows/Go/badge.svg)](https://github.com/go-kratos/kratos/actions) @@ -8,69 +8,86 @@ # Kratos -Kratos是[bilibili](https://www.bilibili.com)开源的一套Go微服务框架,包含大量微服务相关框架及工具。 +Kratos 一套轻量级 Go 微服务框架,包含大量微服务相关框架及工具。 > 名字来源于:《战神》游戏以希腊神话为背景,讲述由凡人成为战神的奎托斯(Kratos)成为战神并展开弑神屠杀的冒险历程。 ## Goals -我们致力于提供完整的微服务研发体验,整合相关框架及工具后,微服务治理相关部分可对整体业务开发周期无感,从而更加聚焦于业务交付。对每位开发者而言,整套Kratos框架也是不错的学习仓库,可以了解和参考到[bilibili](https://www.bilibili.com)在微服务方面的技术积累和经验。 +我们致力于提供完整的微服务研发体验,整合相关框架及工具后,微服务治理相关部分可对整体业务开发周期无感,从而更加聚焦于业务交付。对每位开发者而言,整套Kratos框架也是不错的学习仓库,可以了解和参考到微服务方面的技术积累和经验。 + +### Principles + +* 简单:不过度设计,代码平实简单; +* 通用:通用业务开发所需要的基础库的功能; +* 高效:提高业务迭代的效率; +* 稳定:基础库可测试性高,覆盖率高,有线上实践安全可靠; +* 健壮:通过良好的基础库设计,减少错用; +* 高性能:性能高,但不特定为了性能做hack优化,引入unsafe; +* 扩展性:良好的接口设计,来扩展实现,或者通过新增基础库目录来扩展功能; +* 容错性:为失败设计,大量引入对SRE的理解,鲁棒性高; +* 工具链:包含大量工具链,比如cache代码生成,lint工具等等; ## Features -* HTTP Blademaster:核心基于[gin](https://github.com/gin-gonic/gin)进行模块化设计,简单易用、核心足够轻量; -* GRPC Warden:基于官方gRPC开发,集成[discovery](https://github.com/bilibili/discovery)服务发现,并融合P2C负载均衡; -* Cache:优雅的接口化设计,非常方便的缓存序列化,推荐结合代理模式[overlord](https://github.com/bilibili/overlord); -* Database:集成MySQL/HBase/TiDB,添加熔断保护和统计支持,可快速发现数据层压力; -* Config:方便易用的[paladin sdk](https://go-kratos.github.io/kratos/#/config),可配合远程配置中心,实现配置版本管理和更新; -* Log:类似[zap](https://github.com/uber-go/zap)的field实现高性能日志库,并结合log-agent实现远程日志管理; -* Trace:基于opentracing,集成了全链路trace支持(gRPC/HTTP/MySQL/Redis/Memcached); -* Kratos Tool:工具链,可快速生成标准项目,或者通过Protobuf生成代码,非常便捷使用gRPC、HTTP、swagger文档; +* APIs:协议通信以 HTTP/gRPC 为基础,通过 Protobuf 进行定义; +* Errors:通过 Protobuf 的 Enum 作为错误码定义,以及工具生成判定接口; +* Metadata:在协议通信 HTTP/gRPC 中,通过 Middleware 规范化服务元信息传递; +* Config:通过KeyValue方式实现,对多种配置源进行铺平,以Atomic方式支持动态配置; +* Logger:标准日志接口,可方便集成三方 log 库,并可通过 fluentd 收集日志; +* Metrics:统一指标接口,可以实现各种指标系统,默认集成 Prometheus; +* Tracing:遵循 OpenTracing 规范定义,以实现微服务链路追踪; +* Encoding:支持Accept和Content-Type进行自动选择内容编码; +* Transport:通用的 HTTP/gRPC 传输层,实现统一的 Middleware 插件支持; +* Server:进行基础的 Server 层封装,统一以 Options 方式配置使用; -## Quick start +## Getting Started +### Required +- [go](https://golang.org/dl/) +- [protoc](https://github.com/protocolbuffers/protobuf) +- [protoc-gen-go](https://github.com/protocolbuffers/protobuf-go) -### Requirments +### Install Kratos +``` +# 安装生成工具 +go get github.com/go-kratos/kratos/cmd/kratos +go get github.com/go-kratos/kratos/cmd/protoc-gen-go-http +go get github.com/go-kratos/kratos/cmd/protoc-gen-go-errors -Go version>=1.13 +# 或者通过 Source 安装 +cd cmd/kratos && go install +cd cmd/protoc-gen-go-http && go install +cd cmd/protoc-gen-go-errors && go install +``` +### Create a service +``` +# 创建项目模板 +kratos new helloworld -### Installation -```shell -# Linux/macOS -GO111MODULE=on && go get -u github.com/go-kratos/kratos/tool/kratos +cd helloworld +# 生成proto模板 +kratos proto add api/helloworld/helloworld.proto +# 生成service模板 +kratos proto service api/helloworld/helloworld.proto -t internal/service -# Windows (Powershell) -go env -w GO111MODULE=on ; go get -u github.com/go-kratos/kratos/tool/kratos - -# Windows (CMD) -go env -w GO111MODULE=on && go get -u github.com/go-kratos/kratos/tool/kratos - -cd $GOPATH/src -kratos new kratos-demo +# 生成api下所有proto文件 +make proto +# 编码cmd下所有main文件 +make build +# 进行单元测试 +make test ``` -通过 `kratos new` 会快速生成基于kratos库的脚手架代码,如生成 [kratos-demo](https://github.com/bilibili/kratos-demo) +## Service Layout +* [Service Layout](https://github.com/go-kratos/kratos-layout) -### Build & Run - -```shell -cd kratos-demo/cmd -go build -./cmd -conf ../configs -``` - -打开浏览器访问:[http://localhost:8000/kratos-demo/start](http://localhost:8000/kratos-demo/start),你会看到输出了`Golang 大法好 !!!` - -[快速开始](https://go-kratos.github.io/kratos/#/quickstart) [kratos工具](https://go-kratos.github.io/kratos/#/kratos-tool) - -## Documentation - -> [简体中文](https://go-kratos.github.io/kratos) -> [简体中文(国内镜像)](https://go-kratos.gitee.io/kratos/) -> [FAQ](https://go-kratos.github.io/kratos/#/FAQ) - -## 社区 -* [官方微信群](https://github.com/go-kratos/kratos/issues/682) (推荐) +## Community +* [Wechat Group](https://github.com/go-kratos/kratos/issues/682) * [Discord Group](https://discord.gg/BWzJsUJ) +* QQ Group: 716486124 + +## Sponsors and Backers + +![kratos](docs/images/alipay.png) ## License -Kratos is under the MIT license. See the [LICENSE](./LICENSE) file for details. - +Kratos is MIT licensed. See the [LICENSE](./LICENSE) file for details. diff --git a/api/README.md b/api/README.md new file mode 100644 index 000000000..159e965c4 --- /dev/null +++ b/api/README.md @@ -0,0 +1 @@ +# API proto diff --git a/api/kratos/api/annotations.pb.go b/api/kratos/api/annotations.pb.go new file mode 100644 index 000000000..a6743b84c --- /dev/null +++ b/api/kratos/api/annotations.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.13.0 +// source: kratos/api/annotations.proto + +package api + +import ( + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +var file_kratos_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1000, + Name: "kratos.api.errors", + Tag: "varint,1000,opt,name=errors", + Filename: "kratos/api/annotations.proto", + }, +} + +// Extension fields to descriptor.EnumOptions. +var ( + // optional bool errors = 1000; + E_Errors = &file_kratos_api_annotations_proto_extTypes[0] +) + +var File_kratos_api_annotations_proto protoreflect.FileDescriptor + +var file_kratos_api_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, + 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x35, 0x0a, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x73, 0x42, 0x58, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x50, 0x01, 0x5a, 0x31, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x6b, 0x72, + 0x61, 0x74, 0x6f, 0x73, 0x2f, 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x61, 0x70, + 0x69, 0xa2, 0x02, 0x09, 0x4b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_kratos_api_annotations_proto_goTypes = []interface{}{ + (*descriptor.EnumOptions)(nil), // 0: google.protobuf.EnumOptions +} +var file_kratos_api_annotations_proto_depIdxs = []int32{ + 0, // 0: kratos.api.errors:extendee -> google.protobuf.EnumOptions + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_kratos_api_annotations_proto_init() } +func file_kratos_api_annotations_proto_init() { + if File_kratos_api_annotations_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_kratos_api_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_kratos_api_annotations_proto_goTypes, + DependencyIndexes: file_kratos_api_annotations_proto_depIdxs, + ExtensionInfos: file_kratos_api_annotations_proto_extTypes, + }.Build() + File_kratos_api_annotations_proto = out.File + file_kratos_api_annotations_proto_rawDesc = nil + file_kratos_api_annotations_proto_goTypes = nil + file_kratos_api_annotations_proto_depIdxs = nil +} diff --git a/api/kratos/api/annotations.proto b/api/kratos/api/annotations.proto new file mode 100644 index 000000000..34c4b17e6 --- /dev/null +++ b/api/kratos/api/annotations.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; +package kratos.api; + +option go_package = "github.com/go-kratos/kratos/v2/api/kratos/api;api"; +option java_multiple_files = true; +option java_package = "com.github.kratos.api"; +option objc_class_prefix = "KratosAPI"; + +import "google/protobuf/descriptor.proto"; + +extend google.protobuf.EnumOptions { + bool errors = 1000; +} diff --git a/app.go b/app.go new file mode 100644 index 000000000..ebf119176 --- /dev/null +++ b/app.go @@ -0,0 +1,134 @@ +package kratos + +import ( + "context" + "errors" + "os" + "os/signal" + "syscall" + + "github.com/go-kratos/kratos/v2/log" + "github.com/go-kratos/kratos/v2/registry" + "github.com/go-kratos/kratos/v2/transport" + + "github.com/google/uuid" + "golang.org/x/sync/errgroup" +) + +// App is an application components lifecycle manager +type App struct { + opts options + ctx context.Context + cancel func() + instance *registry.ServiceInstance + log *log.Helper +} + +// New create an application lifecycle manager. +func New(opts ...Option) *App { + options := options{ + ctx: context.Background(), + logger: log.DefaultLogger, + sigs: []os.Signal{syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGINT}, + } + if id, err := uuid.NewUUID(); err == nil { + options.id = id.String() + } + for _, o := range opts { + o(&options) + } + ctx, cancel := context.WithCancel(options.ctx) + return &App{ + opts: options, + ctx: ctx, + cancel: cancel, + instance: serviceInstance(options), + log: log.NewHelper("app", options.logger), + } +} + +// Logger returns logger. +func (a *App) Logger() log.Logger { + return a.opts.logger +} + +// Server returns transport servers. +func (a *App) Server() []transport.Server { + return a.opts.servers +} + +// Registry returns registry. +func (a *App) Registry() registry.Registry { + return a.opts.registry +} + +// Run executes all OnStart hooks registered with the application's Lifecycle. +func (a *App) Run() error { + a.log.Infow( + "service_id", a.opts.id, + "service_name", a.opts.name, + "version", a.opts.version, + ) + g, ctx := errgroup.WithContext(a.ctx) + for _, srv := range a.opts.servers { + srv := srv + g.Go(func() error { + <-ctx.Done() // wait for stop signal + return srv.Stop() + }) + g.Go(func() error { + return srv.Start() + }) + } + if a.opts.registry != nil { + if err := a.opts.registry.Register(a.instance); err != nil { + return err + } + } + c := make(chan os.Signal, 1) + signal.Notify(c, a.opts.sigs...) + g.Go(func() error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-c: + a.Stop() + } + } + }) + if err := g.Wait(); err != nil && !errors.Is(err, context.Canceled) { + return err + } + return nil +} + +// Stop gracefully stops the application. +func (a *App) Stop() error { + if a.opts.registry != nil { + if err := a.opts.registry.Deregister(a.instance); err != nil { + return err + } + } + if a.cancel != nil { + a.cancel() + } + return nil +} + +func serviceInstance(o options) *registry.ServiceInstance { + if len(o.endpoints) == 0 { + for _, srv := range o.servers { + if e, err := srv.Endpoint(); err == nil { + o.endpoints = append(o.endpoints, e) + } + } + } + return ®istry.ServiceInstance{ + ID: o.id, + Name: o.name, + Version: o.version, + Metadata: o.metadata, + Endpoints: o.endpoints, + } +} diff --git a/app_test.go b/app_test.go new file mode 100644 index 000000000..bea39401f --- /dev/null +++ b/app_test.go @@ -0,0 +1,25 @@ +package kratos + +import ( + "testing" + "time" + + "github.com/go-kratos/kratos/v2/transport/grpc" + "github.com/go-kratos/kratos/v2/transport/http" +) + +func TestApp(t *testing.T) { + hs := http.NewServer() + gs := grpc.NewServer() + app := New( + Name("kratos"), + Version("v1.0.0"), + Server(hs, gs), + ) + time.AfterFunc(time.Second, func() { + app.Stop() + }) + if err := app.Run(); err != nil { + t.Fatal(err) + } +} diff --git a/cmd/kratos/go.mod b/cmd/kratos/go.mod new file mode 100644 index 000000000..4807cef88 --- /dev/null +++ b/cmd/kratos/go.mod @@ -0,0 +1,10 @@ +module github.com/go-kratos/kratos/cmd/kratos + +go 1.15 + +require ( + github.com/emicklei/proto v1.9.0 + github.com/go-git/go-git/v5 v5.2.0 + github.com/spf13/cobra v1.1.1 + golang.org/x/mod v0.4.0 +) diff --git a/cmd/kratos/go.sum b/cmd/kratos/go.sum new file mode 100644 index 000000000..53bdfb6e9 --- /dev/null +++ b/cmd/kratos/go.sum @@ -0,0 +1,354 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/emicklei/proto v1.9.0 h1:l0QiNT6Qs7Yj0Mb4X6dnWBQer4ebei2BFcgQLbGqUDc= +github.com/emicklei/proto v1.9.0/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= +github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= +github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0 h1:sfUMP1Gu8qASkorDVjnMuvgJzwFbTZSeXFiGBYAVdl4= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/cmd/kratos/internal/base/mod.go b/cmd/kratos/internal/base/mod.go new file mode 100644 index 000000000..4df815018 --- /dev/null +++ b/cmd/kratos/internal/base/mod.go @@ -0,0 +1,16 @@ +package base + +import ( + "io/ioutil" + + "golang.org/x/mod/modfile" +) + +// ModulePath returns go module path. +func ModulePath(filename string) (string, error) { + modBytes, err := ioutil.ReadFile(filename) + if err != nil { + return "", err + } + return modfile.ModulePath(modBytes), nil +} diff --git a/cmd/kratos/internal/base/path.go b/cmd/kratos/internal/base/path.go new file mode 100644 index 000000000..32c5f118d --- /dev/null +++ b/cmd/kratos/internal/base/path.go @@ -0,0 +1,100 @@ +package base + +import ( + "bytes" + "io/ioutil" + "log" + "os" + "path" +) + +func kratosHome() string { + dir, err := os.UserHomeDir() + if err != nil { + log.Fatal(err) + } + home := path.Join(dir, ".kratos") + if _, err := os.Stat(home); os.IsNotExist(err) { + if err := os.MkdirAll(home, 0700); err != nil { + log.Fatal(err) + } + } + return home +} + +func kratosHomeWithDir(dir string) string { + home := path.Join(kratosHome(), dir) + if _, err := os.Stat(home); os.IsNotExist(err) { + if err := os.MkdirAll(home, 0700); err != nil { + log.Fatal(err) + } + } + return home +} + +func copyFile(src, dst string, replaces []string) error { + var err error + srcinfo, err := os.Stat(src) + if err != nil { + return err + } + buf, err := ioutil.ReadFile(src) + if err != nil { + return err + } + var old string + for i, next := range replaces { + if i%2 == 0 { + old = next + continue + } + buf = bytes.ReplaceAll(buf, []byte(old), []byte(next)) + } + return ioutil.WriteFile(dst, buf, srcinfo.Mode()) +} + +func copyDir(src, dst string, replaces, ignores []string) error { + var err error + var fds []os.FileInfo + var srcinfo os.FileInfo + + if srcinfo, err = os.Stat(src); err != nil { + return err + } + + if err = os.MkdirAll(dst, srcinfo.Mode()); err != nil { + return err + } + + if fds, err = ioutil.ReadDir(src); err != nil { + return err + } + for _, fd := range fds { + if hasSets(fd.Name(), ignores) { + continue + } + + srcfp := path.Join(src, fd.Name()) + dstfp := path.Join(dst, fd.Name()) + + if fd.IsDir() { + if err = copyDir(srcfp, dstfp, replaces, ignores); err != nil { + return err + } + } else { + if err = copyFile(srcfp, dstfp, replaces); err != nil { + return err + } + } + } + return nil +} + +func hasSets(name string, sets []string) bool { + for _, ig := range sets { + if ig == name { + return true + } + } + return false +} diff --git a/cmd/kratos/internal/base/repo.go b/cmd/kratos/internal/base/repo.go new file mode 100644 index 000000000..8414cca13 --- /dev/null +++ b/cmd/kratos/internal/base/repo.go @@ -0,0 +1,71 @@ +package base + +import ( + "context" + "errors" + "os" + "path" + "strings" + + "github.com/go-git/go-git/v5" +) + +// Repo is git repository manager. +type Repo struct { + url string + home string +} + +// NewRepo new a repository manager. +func NewRepo(url string) *Repo { + return &Repo{ + url: url, + home: kratosHomeWithDir("repo"), + } +} + +func (r *Repo) Path() string { + start := strings.LastIndex(r.url, "/") + end := strings.LastIndex(r.url, ".git") + return path.Join(r.home, r.url[start+1:end]) +} + +func (r *Repo) Pull(ctx context.Context, url string) error { + repo, err := git.PlainOpen(r.Path()) + if err != nil { + return err + } + w, err := repo.Worktree() + if err != nil { + return err + } + if err = w.PullContext(ctx, &git.PullOptions{ + RemoteName: "origin", + Progress: os.Stdout, + }); errors.Is(err, git.NoErrAlreadyUpToDate) { + return nil + } + return err +} + +func (r *Repo) Clone(ctx context.Context) error { + if _, err := os.Stat(r.Path()); !os.IsNotExist(err) { + return r.Pull(ctx, r.url) + } + _, err := git.PlainCloneContext(ctx, r.Path(), false, &git.CloneOptions{ + URL: r.url, + Progress: os.Stdout, + }) + return err +} + +func (r *Repo) CopyTo(ctx context.Context, to string, modPath string, ignores []string) error { + if err := r.Clone(ctx); err != nil { + return err + } + mod, err := ModulePath(path.Join(r.Path(), "go.mod")) + if err != nil { + return err + } + return copyDir(r.Path(), to, []string{mod, modPath}, ignores) +} diff --git a/cmd/kratos/internal/base/repo_test.go b/cmd/kratos/internal/base/repo_test.go new file mode 100644 index 000000000..69fa17c35 --- /dev/null +++ b/cmd/kratos/internal/base/repo_test.go @@ -0,0 +1,16 @@ +package base + +import ( + "context" + "testing" +) + +func TestRepo(t *testing.T) { + r := NewRepo(RepoURL("https://github.com/go-kratos/service-layout.git")) + if err := r.Clone(context.Background()); err != nil { + t.Fatal(err) + } + if err := r.CopyTo(context.Background(), "/tmp/test_repo"); err != nil { + t.Fatal(err) + } +} diff --git a/cmd/kratos/internal/new/new.go b/cmd/kratos/internal/new/new.go new file mode 100644 index 000000000..3893c40b1 --- /dev/null +++ b/cmd/kratos/internal/new/new.go @@ -0,0 +1,36 @@ +package new + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/spf13/cobra" +) + +// CmdNew represents the new command. +var CmdNew = &cobra.Command{ + Use: "new", + Short: "Create a service template", + Long: "Create a service project using the repository template. Example: kratos new helloworld", + Run: run, +} + +func run(cmd *cobra.Command, args []string) { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "\033[31mERROR: project name is required.\033[m Example: kratos new helloworld\n") + return + } + p := &Project{Name: args[0]} + if err := p.Generate(ctx, wd); err != nil { + fmt.Fprintf(os.Stderr, "\033[31mERROR: %s\033[m\n", err) + return + } +} diff --git a/cmd/kratos/internal/new/project.go b/cmd/kratos/internal/new/project.go new file mode 100644 index 000000000..b95d869be --- /dev/null +++ b/cmd/kratos/internal/new/project.go @@ -0,0 +1,38 @@ +package new + +import ( + "context" + "fmt" + "os" + "path" + + "github.com/go-kratos/kratos/cmd/kratos/internal/base" +) + +const ( + serviceLayoutURL = "https://github.com/go-kratos/kratos-layout.git" +) + +// Project is a project template. +type Project struct { + Name string +} + +// Generate generate template project. +func (p *Project) Generate(ctx context.Context, dir string) error { + to := path.Join(dir, p.Name) + if _, err := os.Stat(to); !os.IsNotExist(err) { + return fmt.Errorf("%s already exists", p.Name) + } + fmt.Printf("Creating service %s\n", p.Name) + repo := base.NewRepo(serviceLayoutURL) + + if err := repo.CopyTo(ctx, to, p.Name, []string{".git", ".github"}); err != nil { + return err + } + os.Rename( + path.Join(to, "cmd", "server"), + path.Join(to, "cmd", p.Name), + ) + return nil +} diff --git a/cmd/kratos/internal/proto/add/add.go b/cmd/kratos/internal/proto/add/add.go new file mode 100644 index 000000000..e8fa29f34 --- /dev/null +++ b/cmd/kratos/internal/proto/add/add.go @@ -0,0 +1,65 @@ +package add + +import ( + "fmt" + "io/ioutil" + "strings" + + "github.com/spf13/cobra" + "golang.org/x/mod/modfile" +) + +// CmdAdd represents the add command. +var CmdAdd = &cobra.Command{ + Use: "add", + Short: "Add a proto API template", + Long: "Add a proto API template. Example: kratos add helloworld/v1/hello.proto", + Run: run, +} + +func run(cmd *cobra.Command, args []string) { + // kratos add helloworld/v1/helloworld.proto + input := args[0] + n := strings.LastIndex(input, "/") + path := input[:n] + fileName := input[n+1:] + pkgName := strings.ReplaceAll(path, "/", ".") + + p := &Proto{ + Name: fileName, + Path: path, + Package: pkgName, + GoPackage: goPackage(path), + JavaPackage: javaPackage(pkgName), + Service: serviceName(fileName), + } + if err := p.Generate(); err != nil { + fmt.Println(err) + return + } +} + +func modName() string { + modBytes, err := ioutil.ReadFile("go.mod") + if err != nil { + if modBytes, err = ioutil.ReadFile("../go.mod"); err != nil { + return "" + } + } + return modfile.ModulePath(modBytes) +} + +func goPackage(path string) string { + s := strings.Split(path, "/") + return modName() + "/" + path + ";" + s[len(s)-1] +} + +func javaPackage(name string) string { + return name +} + +func serviceName(name string) string { + return unexport(strings.Split(name, ".")[0]) +} + +func unexport(s string) string { return strings.ToUpper(s[:1]) + s[1:] } diff --git a/cmd/kratos/internal/proto/add/proto.go b/cmd/kratos/internal/proto/add/proto.go new file mode 100644 index 000000000..dbc082eb0 --- /dev/null +++ b/cmd/kratos/internal/proto/add/proto.go @@ -0,0 +1,41 @@ +package add + +import ( + "fmt" + "io/ioutil" + "os" + "path" +) + +// Proto is a proto generator. +type Proto struct { + Name string + Path string + Service string + Package string + GoPackage string + JavaPackage string +} + +// Generate generate a proto template. +func (p *Proto) Generate() error { + body, err := p.execute() + if err != nil { + return err + } + wd, err := os.Getwd() + if err != nil { + panic(err) + } + to := path.Join(wd, p.Path) + if _, err := os.Stat(to); os.IsNotExist(err) { + if err := os.MkdirAll(to, 0700); err != nil { + return err + } + } + name := path.Join(to, p.Name) + if _, err := os.Stat(name); !os.IsNotExist(err) { + return fmt.Errorf("%s already exists", p.Name) + } + return ioutil.WriteFile(name, []byte(body), 0644) +} diff --git a/cmd/kratos/internal/proto/add/template.go b/cmd/kratos/internal/proto/add/template.go new file mode 100644 index 000000000..805596777 --- /dev/null +++ b/cmd/kratos/internal/proto/add/template.go @@ -0,0 +1,52 @@ +package add + +import ( + "bytes" + "strings" + "text/template" +) + +const protoTemplate = ` +syntax = "proto3"; + +package {{.Package}}; + +option go_package = "{{.GoPackage}}"; +option java_multiple_files = true; +option java_package = "{{.JavaPackage}}"; + +service {{.Service}} { + rpc Create{{.Service}} (Create{{.Service}}Request) returns (Create{{.Service}}Reply); + rpc Update{{.Service}} (Update{{.Service}}Request) returns (Update{{.Service}}Reply); + rpc Delete{{.Service}} (Delete{{.Service}}Request) returns (Delete{{.Service}}Reply); + rpc Get{{.Service}} (Get{{.Service}}Request) returns (Get{{.Service}}Reply); + rpc List{{.Service}} (List{{.Service}}Request) returns (List{{.Service}}Reply); +} + +message Create{{.Service}}Request {} +message Create{{.Service}}Reply {} + +message Update{{.Service}}Request {} +message Update{{.Service}}Reply {} + +message Delete{{.Service}}Request {} +message Delete{{.Service}}Reply {} + +message Get{{.Service}}Request {} +message Get{{.Service}}Reply {} + +message List{{.Service}}Request {} +message List{{.Service}}Reply {} +` + +func (p *Proto) execute() ([]byte, error) { + buf := new(bytes.Buffer) + tmpl, err := template.New("proto").Parse(strings.TrimSpace(protoTemplate)) + if err != nil { + return nil, err + } + if err := tmpl.Execute(buf, p); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/cmd/kratos/internal/proto/proto.go b/cmd/kratos/internal/proto/proto.go new file mode 100644 index 000000000..77d2ec161 --- /dev/null +++ b/cmd/kratos/internal/proto/proto.go @@ -0,0 +1,27 @@ +package proto + +import ( + "github.com/go-kratos/kratos/cmd/kratos/internal/proto/add" + "github.com/go-kratos/kratos/cmd/kratos/internal/proto/service" + "github.com/go-kratos/kratos/cmd/kratos/internal/proto/source" + + "github.com/spf13/cobra" +) + +// CmdProto represents the proto command. +var CmdProto = &cobra.Command{ + Use: "proto", + Short: "Generate the proto files", + Long: "Generate the proto files.", + Run: run, +} + +func init() { + CmdProto.AddCommand(add.CmdAdd) + CmdProto.AddCommand(source.CmdSource) + CmdProto.AddCommand(service.CmdService) +} + +func run(cmd *cobra.Command, args []string) { + +} diff --git a/cmd/kratos/internal/proto/service/service.go b/cmd/kratos/internal/proto/service/service.go new file mode 100644 index 000000000..a2dca6276 --- /dev/null +++ b/cmd/kratos/internal/proto/service/service.go @@ -0,0 +1,86 @@ +package service + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path" + "strings" + + "github.com/emicklei/proto" + "github.com/spf13/cobra" +) + +// CmdService represents the service command. +var CmdService = &cobra.Command{ + Use: "service", + Short: "Generate the proto Service implementations", + Long: "Generate the proto Service implementations. Example: kratos proto service api/xxx.proto -target-dir=internal/service", + Run: run, +} +var targetDir string + +func init() { + CmdService.Flags().StringVarP(&targetDir, "-target-dir", "t", "internal/service", "generate target directory") +} + +func run(cmd *cobra.Command, args []string) { + if len(args) == 0 { + fmt.Fprintln(os.Stderr, "Please specify the proto file. Example: kratos proto service api/xxx.proto") + return + } + reader, err := os.Open(args[0]) + if err != nil { + log.Fatal(err) + } + defer reader.Close() + + parser := proto.NewParser(reader) + definition, err := parser.Parse() + if err != nil { + log.Fatal(err) + } + + var ( + pkg string + res []*Service + ) + proto.Walk(definition, + proto.WithOption(func(o *proto.Option) { + if o.Name == "go_package" { + pkg = strings.Split(o.Constant.Source, ";")[0] + } + }), + proto.WithService(func(s *proto.Service) { + cs := &Service{ + Package: pkg, + Service: s.Name, + } + for _, e := range s.Elements { + r, ok := e.(*proto.RPC) + if ok { + cs.Methods = append(cs.Methods, &Method{Service: s.Name, Name: r.Name, Request: r.RequestType, Reply: r.ReturnsType}) + } + } + res = append(res, cs) + }), + ) + for _, s := range res { + to := path.Join(targetDir, strings.ToLower(s.Service)+".go") + _, err := os.Stat(to) + if !os.IsNotExist(err) { + fmt.Fprintf(os.Stderr, "%s already exists\n", s.Service) + continue + } + if err = os.MkdirAll(targetDir, os.ModeDir); err != nil { + fmt.Fprintf(os.Stderr, "Failed to create file directory: %s\n", targetDir) + continue + } + b, err := s.execute() + if err != nil { + log.Fatal(err) + } + ioutil.WriteFile(to, b, 0644) + } +} diff --git a/cmd/kratos/internal/proto/service/template.go b/cmd/kratos/internal/proto/service/template.go new file mode 100644 index 000000000..b2f255187 --- /dev/null +++ b/cmd/kratos/internal/proto/service/template.go @@ -0,0 +1,56 @@ +package service + +import ( + "bytes" + "html/template" +) + +var serviceTemplate = ` +package service + +import( + "context" + + pb "{{.Package}}" +) + +type {{.Service}}Service struct { + pb.Unimplemented{{.Service}}Server +} + +func New{{.Service}}Service() pb.{{.Service}}Server { + return &{{.Service}}Service{} +} +{{ range .Methods }} +func (s *{{.Service}}Service) {{.Name}}(ctx context.Context, req *pb.{{.Request}}) (*pb.{{.Reply}}, error) { + return &pb.{{.Reply}}{}, nil +} +{{- end }} +` + +// Service is a proto service. +type Service struct { + Package string + Service string + Methods []*Method +} + +// Method is a proto method. +type Method struct { + Service string + Name string + Request string + Reply string +} + +func (s *Service) execute() ([]byte, error) { + buf := new(bytes.Buffer) + tmpl, err := template.New("service").Parse(serviceTemplate) + if err != nil { + return nil, err + } + if err := tmpl.Execute(buf, s); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/cmd/kratos/internal/proto/source/source.go b/cmd/kratos/internal/proto/source/source.go new file mode 100644 index 000000000..799056de5 --- /dev/null +++ b/cmd/kratos/internal/proto/source/source.go @@ -0,0 +1,28 @@ +package source + +import ( + "fmt" + "log" + "os/exec" + + "github.com/spf13/cobra" +) + +// CmdSource represents the source command. +var CmdSource = &cobra.Command{ + Use: "source", + Short: "Generate the proto source code", + Long: "Generate the proto source code. Example: kratos proto source ./**/*.proto", + Run: run, +} + +func run(cmd *cobra.Command, args []string) { + input := []string{"--go_out=paths=source_relative:.", "--go-grpc_out=paths=source_relative:."} + input = append(input, args...) + do := exec.Command("protoc", input...) + out, err := do.CombinedOutput() + if err != nil { + log.Fatalf("failed to execute: %s\n", err) + } + fmt.Println(string(out)) +} diff --git a/cmd/kratos/main.go b/cmd/kratos/main.go new file mode 100644 index 000000000..77c3eec6d --- /dev/null +++ b/cmd/kratos/main.go @@ -0,0 +1,27 @@ +package main + +import ( + "log" + + "github.com/go-kratos/kratos/cmd/kratos/internal/new" + "github.com/go-kratos/kratos/cmd/kratos/internal/proto" + "github.com/spf13/cobra" +) + +var rootCmd = &cobra.Command{ + Use: "kratos", + Short: "Kratos: An elegant toolkit for Go microservices.", + Long: `Kratos: An elegant toolkit for Go microservices.`, + Version: Version, +} + +func init() { + rootCmd.AddCommand(new.CmdNew) + rootCmd.AddCommand(proto.CmdProto) +} + +func main() { + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} diff --git a/cmd/kratos/version.go b/cmd/kratos/version.go new file mode 100644 index 000000000..8d657404f --- /dev/null +++ b/cmd/kratos/version.go @@ -0,0 +1,13 @@ +package main + +// go build -ldflags "-X main.Version=x.y.yz" +var ( + // Version is the version of the compiled software. + Version string = "v2.0.0" + // Branch is current branch name the code is built off + Branch string + // Revision is the short commit hash of source tree + Revision string + // BuildDate is the date when the binary was built. + BuildDate string +) diff --git a/cmd/protoc-gen-go-errors/errors.go b/cmd/protoc-gen-go-errors/errors.go new file mode 100644 index 000000000..081b132eb --- /dev/null +++ b/cmd/protoc-gen-go-errors/errors.go @@ -0,0 +1,58 @@ +package main + +import ( + pb "github.com/go-kratos/kratos/v2/api/kratos/api" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/proto" +) + +const ( + errorsPackage = protogen.GoImportPath("github.com/go-kratos/kratos/v2/errors") +) + +// generateFile generates a _http.pb.go file containing kratos errors definitions. +func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + if len(file.Enums) == 0 { + return nil + } + filename := file.GeneratedFilenamePrefix + "_errors.pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + g.P("// Code generated by protoc-gen-go-errors. DO NOT EDIT.") + g.P() + g.P("package ", file.GoPackageName) + g.P() + generateFileContent(gen, file, g) + return g +} + +// generateFileContent generates the kratos errors definitions, excluding the package statement. +func generateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) { + if len(file.Enums) == 0 { + return + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the kratos package it is being compiled against.") + g.P("const _ = ", errorsPackage.Ident("SupportPackageIsVersion1")) + g.P() + for _, enum := range file.Enums { + genErrorsReason(gen, file, g, enum) + } +} + +func genErrorsReason(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, enum *protogen.Enum) { + err := proto.GetExtension(enum.Desc.Options(), pb.E_Errors) + if ok := err.(bool); !ok { + return + } + var ew errorWrapper + for _, v := range enum.Values { + err := &errorInfo{ + Name: string(enum.Desc.Name()), + Value: string(v.Desc.Name()), + } + ew.Errors = append(ew.Errors, err) + } + g.P(ew.execute()) +} diff --git a/cmd/protoc-gen-go-errors/go.mod b/cmd/protoc-gen-go-errors/go.mod new file mode 100644 index 000000000..bb42ca06f --- /dev/null +++ b/cmd/protoc-gen-go-errors/go.mod @@ -0,0 +1,9 @@ +module github.com/go-kratos/kratos/cmd/protoc-gen-go-errors + +go 1.15 + +require ( + github.com/go-kratos/kratos/v2 v2.0.0-20210217083752-d86d233d93ce + github.com/golang/protobuf v1.4.3 + google.golang.org/protobuf v1.25.0 +) diff --git a/cmd/protoc-gen-go-errors/go.sum b/cmd/protoc-gen-go-errors/go.sum new file mode 100644 index 000000000..c1d1b7f04 --- /dev/null +++ b/cmd/protoc-gen-go-errors/go.sum @@ -0,0 +1,101 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-kratos/kratos v0.6.0 h1:aGuIQQoj1EiWtBCIaPHvhPBcDx3WfL/Mw6q+5C5ehgg= +github.com/go-kratos/kratos/v2 v2.0.0-20201205165131-10618a745c96 h1:Qyiy167FmsHZucnc4vZ+hv938YyNTkodXVgb5PC2Y+U= +github.com/go-kratos/kratos/v2 v2.0.0-20201205165131-10618a745c96/go.mod h1:YwgE84UUomqngCzthva1VOYcuIZqXtYo5nTu22i1nbo= +github.com/go-kratos/kratos/v2 v2.0.0-20201205170920-f2b7f99a678c h1:V4dxjxZX4QEp8vAtumiA/6ibbqU9XZUUTA81piwk8nc= +github.com/go-kratos/kratos/v2 v2.0.0-20201205170920-f2b7f99a678c/go.mod h1:YwgE84UUomqngCzthva1VOYcuIZqXtYo5nTu22i1nbo= +github.com/go-kratos/kratos/v2 v2.0.0-20210207074933-00633b4860e1 h1:/blwKtlboosqLB9p+oICb8LkS13Ph0aP8V4rTil8pxE= +github.com/go-kratos/kratos/v2 v2.0.0-20210214123813-9afce08b3687 h1:3CVMCLzegnkCewvFoPBKqaYgmXG2zPCeV7Nro/JVyTk= +github.com/go-kratos/kratos/v2 v2.0.0-20210214172044-a42fa7820493 h1:39X3q7mGWL9yYZl9jwbHjLHzlsZlYajKQdewdignTVU= +github.com/go-kratos/kratos/v2 v2.0.0-20210214172044-a42fa7820493/go.mod h1:MIIjRu+ZXyn7IYlM1TBHbr75RLZOZM/CBBjT6luWL+Q= +github.com/go-kratos/kratos/v2 v2.0.0-20210217083752-d86d233d93ce h1:LfOsLN9s8tAxR8xIZGWQvEVWxHfipTnBSE0dvG4h3k8= +github.com/go-kratos/kratos/v2 v2.0.0-20210217083752-d86d233d93ce/go.mod h1:oLvFyDBJkkWN8TPqb+NmpvRrSy9uM/K+XQubVRc11a8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/cmd/protoc-gen-go-errors/main.go b/cmd/protoc-gen-go-errors/main.go new file mode 100644 index 000000000..e590c0bc5 --- /dev/null +++ b/cmd/protoc-gen-go-errors/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" + "fmt" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/types/pluginpb" +) + +const version = "0.0.1" + +func main() { + showVersion := flag.Bool("version", false, "print the version and exit") + flag.Parse() + if *showVersion { + fmt.Printf("protoc-gen-go-errors %v\n", version) + return + } + + var flags flag.FlagSet + + protogen.Options{ + ParamFunc: flags.Set, + }.Run(func(gen *protogen.Plugin) error { + gen.SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + for _, f := range gen.Files { + if !f.Generate { + continue + } + generateFile(gen, f) + } + return nil + }) +} diff --git a/cmd/protoc-gen-go-errors/template.go b/cmd/protoc-gen-go-errors/template.go new file mode 100644 index 000000000..1d86bb1d2 --- /dev/null +++ b/cmd/protoc-gen-go-errors/template.go @@ -0,0 +1,40 @@ +package main + +import ( + "bytes" + "text/template" +) + +var errorsTemplate = `const ( +{{ range .Errors }} + Errors_{{.Value}} = "{{.Name}}_{{.Value}}" +{{- end }} +) + +{{ range .Errors }} + +func Is{{.Value}}(err error) bool { + return errors.Reason(err) == Errors_{{.Value}} +} +{{- end }} +` + +type errorInfo struct { + Name string + Value string +} +type errorWrapper struct { + Errors []*errorInfo +} + +func (e *errorWrapper) execute() string { + buf := new(bytes.Buffer) + tmpl, err := template.New("errors").Parse(errorsTemplate) + if err != nil { + panic(err) + } + if err := tmpl.Execute(buf, e); err != nil { + panic(err) + } + return string(buf.Bytes()) +} diff --git a/cmd/protoc-gen-go-http/go.mod b/cmd/protoc-gen-go-http/go.mod new file mode 100644 index 000000000..e7cfe961e --- /dev/null +++ b/cmd/protoc-gen-go-http/go.mod @@ -0,0 +1,11 @@ +module github.com/go-kratos/kratos/cmd/protoc-gen-go-http + +go 1.15 + +require ( + github.com/go-kratos/kratos/v2 v2.0.0-20210217083752-d86d233d93ce + github.com/golang/protobuf v1.4.3 + google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119 + google.golang.org/grpc v1.35.0 + google.golang.org/protobuf v1.25.0 +) diff --git a/cmd/protoc-gen-go-http/go.sum b/cmd/protoc-gen-go-http/go.sum new file mode 100644 index 000000000..1ae024583 --- /dev/null +++ b/cmd/protoc-gen-go-http/go.sum @@ -0,0 +1,99 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-kratos/kratos/v2 v2.0.0-20210210104528-47c8db1163db h1:p5pejan0Lbn7e43dQCUTzqOswWJqLIE5lMU5TUbwCt4= +github.com/go-kratos/kratos/v2 v2.0.0-20210210104528-47c8db1163db/go.mod h1:MIIjRu+ZXyn7IYlM1TBHbr75RLZOZM/CBBjT6luWL+Q= +github.com/go-kratos/kratos/v2 v2.0.0-20210211132943-131c3975fc3d/go.mod h1:MIIjRu+ZXyn7IYlM1TBHbr75RLZOZM/CBBjT6luWL+Q= +github.com/go-kratos/kratos/v2 v2.0.0-20210217083752-d86d233d93ce/go.mod h1:oLvFyDBJkkWN8TPqb+NmpvRrSy9uM/K+XQubVRc11a8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119 h1:m9+RjTMas6brUP8DBxSAa/WIPFy7FIhKpvk+9Ppce8E= +google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/cmd/protoc-gen-go-http/http.go b/cmd/protoc-gen-go-http/http.go new file mode 100644 index 000000000..d03f75306 --- /dev/null +++ b/cmd/protoc-gen-go-http/http.go @@ -0,0 +1,210 @@ +package main + +import ( + "fmt" + "strings" + + "google.golang.org/genproto/googleapis/api/annotations" + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + contextPackage = protogen.GoImportPath("context") + httpPackage = protogen.GoImportPath("net/http") + transportPackage = protogen.GoImportPath("github.com/go-kratos/kratos/v2/transport/http") + middlewarePackage = protogen.GoImportPath("github.com/go-kratos/kratos/v2/middleware") +) + +var methodSets = make(map[string]int) + +// generateFile generates a _http.pb.go file containing kratos errors definitions. +func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + if len(file.Services) == 0 { + return nil + } + filename := file.GeneratedFilenamePrefix + "_http.pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + g.P("// Code generated by protoc-gen-go-http. DO NOT EDIT.") + g.P() + g.P("package ", file.GoPackageName) + g.P() + generateFileContent(gen, file, g) + return g +} + +// generateFileContent generates the kratos errors definitions, excluding the package statement. +func generateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) { + if len(file.Services) == 0 { + return + } + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the kratos package it is being compiled against.") + g.P("// ", contextPackage.Ident(""), "/", httpPackage.Ident(""), "/", middlewarePackage.Ident("")) + g.P("const _ = ", transportPackage.Ident("SupportPackageIsVersion1")) + g.P() + + for _, service := range file.Services { + genService(gen, file, g, service) + } +} + +func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + // HTTP Server. + sd := &serviceDesc{ + ServiceType: service.GoName, + ServiceName: string(service.Desc.FullName()), + Metadata: file.Desc.Path(), + } + for _, method := range service.Methods { + rule, ok := proto.GetExtension(method.Desc.Options(), annotations.E_Http).(*annotations.HttpRule) + if rule != nil && ok { + for _, bind := range rule.AdditionalBindings { + sd.Methods = append(sd.Methods, buildHTTPRule(method, bind)) + } + sd.Methods = append(sd.Methods, buildHTTPRule(method, rule)) + } else { + path := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + sd.Methods = append(sd.Methods, buildMethodDesc(method, "POST", path)) + + } + } + g.P(sd.execute()) +} + +func buildHTTPRule(m *protogen.Method, rule *annotations.HttpRule) *methodDesc { + var ( + path string + method string + body string + responseBody string + ) + switch pattern := rule.Pattern.(type) { + case *annotations.HttpRule_Get: + path = pattern.Get + method = "GET" + case *annotations.HttpRule_Put: + path = pattern.Put + method = "PUT" + case *annotations.HttpRule_Post: + path = pattern.Post + method = "POST" + case *annotations.HttpRule_Delete: + path = pattern.Delete + method = "DELETE" + case *annotations.HttpRule_Patch: + path = pattern.Patch + method = "PATCH" + case *annotations.HttpRule_Custom: + path = pattern.Custom.Path + method = pattern.Custom.Kind + } + body = rule.Body + responseBody = rule.ResponseBody + md := buildMethodDesc(m, method, path) + if body != "" { + md.Body = "." + camelCaseVars(body) + } + if responseBody != "" { + md.ResponseBody = "." + camelCaseVars(responseBody) + } + return md +} + +func buildMethodDesc(m *protogen.Method, method, path string) *methodDesc { + defer func() { methodSets[m.GoName]++ }() + return &methodDesc{ + Name: m.GoName, + Num: methodSets[m.GoName], + Request: m.Input.GoIdent.GoName, + Reply: m.Output.GoIdent.GoName, + Path: path, + Method: method, + Vars: buildPathVars(m, path), + } +} + +func buildPathVars(method *protogen.Method, path string) (res []string) { + for _, v := range strings.Split(path, "/") { + if strings.HasPrefix(v, "{") && strings.HasSuffix(v, "}") { + name := strings.TrimRight(strings.TrimLeft(v, "{"), "}") + res = append(res, name) + } + } + return +} + +func camelCaseVars(s string) string { + var ( + vars []string + subs = strings.Split(s, ".") + ) + for _, sub := range subs { + vars = append(vars, camelCase(sub)) + } + return strings.Join(vars, ".") +} + +// camelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func camelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +const deprecationComment = "// Deprecated: Do not use." diff --git a/cmd/protoc-gen-go-http/internal/testproto/echo_service.pb.go b/cmd/protoc-gen-go-http/internal/testproto/echo_service.pb.go new file mode 100644 index 000000000..5820c3787 --- /dev/null +++ b/cmd/protoc-gen-go-http/internal/testproto/echo_service.pb.go @@ -0,0 +1,669 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.13.0 +// source: echo_service.proto + +package testproto + +import ( + proto "github.com/golang/protobuf/proto" + _struct "github.com/golang/protobuf/ptypes/struct" + _ "google.golang.org/genproto/googleapis/api/annotations" + field_mask "google.golang.org/genproto/protobuf/field_mask" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type Corpus int32 + +const ( + Corpus_UNIVERSAL Corpus = 0 + Corpus_WEB Corpus = 1 + Corpus_IMAGES Corpus = 2 + Corpus_LOCAL Corpus = 3 + Corpus_NEWS Corpus = 4 + Corpus_PRODUCTS Corpus = 5 + Corpus_VIDEO Corpus = 6 +) + +// Enum value maps for Corpus. +var ( + Corpus_name = map[int32]string{ + 0: "UNIVERSAL", + 1: "WEB", + 2: "IMAGES", + 3: "LOCAL", + 4: "NEWS", + 5: "PRODUCTS", + 6: "VIDEO", + } + Corpus_value = map[string]int32{ + "UNIVERSAL": 0, + "WEB": 1, + "IMAGES": 2, + "LOCAL": 3, + "NEWS": 4, + "PRODUCTS": 5, + "VIDEO": 6, + } +) + +func (x Corpus) Enum() *Corpus { + p := new(Corpus) + *p = x + return p +} + +func (x Corpus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Corpus) Descriptor() protoreflect.EnumDescriptor { + return file_echo_service_proto_enumTypes[0].Descriptor() +} + +func (Corpus) Type() protoreflect.EnumType { + return &file_echo_service_proto_enumTypes[0] +} + +func (x Corpus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Corpus.Descriptor instead. +func (Corpus) EnumDescriptor() ([]byte, []int) { + return file_echo_service_proto_rawDescGZIP(), []int{0} +} + +// Embedded represents a message embedded in SimpleMessage. +type Embedded struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Mark: + // *Embedded_Progress + // *Embedded_Note + Mark isEmbedded_Mark `protobuf_oneof:"mark"` +} + +func (x *Embedded) Reset() { + *x = Embedded{} + if protoimpl.UnsafeEnabled { + mi := &file_echo_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Embedded) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Embedded) ProtoMessage() {} + +func (x *Embedded) ProtoReflect() protoreflect.Message { + mi := &file_echo_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Embedded.ProtoReflect.Descriptor instead. +func (*Embedded) Descriptor() ([]byte, []int) { + return file_echo_service_proto_rawDescGZIP(), []int{0} +} + +func (m *Embedded) GetMark() isEmbedded_Mark { + if m != nil { + return m.Mark + } + return nil +} + +func (x *Embedded) GetProgress() int64 { + if x, ok := x.GetMark().(*Embedded_Progress); ok { + return x.Progress + } + return 0 +} + +func (x *Embedded) GetNote() string { + if x, ok := x.GetMark().(*Embedded_Note); ok { + return x.Note + } + return "" +} + +type isEmbedded_Mark interface { + isEmbedded_Mark() +} + +type Embedded_Progress struct { + Progress int64 `protobuf:"varint,1,opt,name=progress,proto3,oneof"` +} + +type Embedded_Note struct { + Note string `protobuf:"bytes,2,opt,name=note,proto3,oneof"` +} + +func (*Embedded_Progress) isEmbedded_Mark() {} + +func (*Embedded_Note) isEmbedded_Mark() {} + +// SimpleMessage represents a simple message sent to the Echo service. +type SimpleMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Id represents the message identifier. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Num int64 `protobuf:"varint,2,opt,name=num,proto3" json:"num,omitempty"` + // Types that are assignable to Code: + // *SimpleMessage_LineNum + // *SimpleMessage_Lang + Code isSimpleMessage_Code `protobuf_oneof:"code"` + Status *Embedded `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` + // Types that are assignable to Ext: + // *SimpleMessage_En + // *SimpleMessage_No + Ext isSimpleMessage_Ext `protobuf_oneof:"ext"` + Corpus Corpus `protobuf:"varint,8,opt,name=corpus,proto3,enum=testproto.Corpus" json:"corpus,omitempty"` +} + +func (x *SimpleMessage) Reset() { + *x = SimpleMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_echo_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SimpleMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SimpleMessage) ProtoMessage() {} + +func (x *SimpleMessage) ProtoReflect() protoreflect.Message { + mi := &file_echo_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SimpleMessage.ProtoReflect.Descriptor instead. +func (*SimpleMessage) Descriptor() ([]byte, []int) { + return file_echo_service_proto_rawDescGZIP(), []int{1} +} + +func (x *SimpleMessage) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SimpleMessage) GetNum() int64 { + if x != nil { + return x.Num + } + return 0 +} + +func (m *SimpleMessage) GetCode() isSimpleMessage_Code { + if m != nil { + return m.Code + } + return nil +} + +func (x *SimpleMessage) GetLineNum() int64 { + if x, ok := x.GetCode().(*SimpleMessage_LineNum); ok { + return x.LineNum + } + return 0 +} + +func (x *SimpleMessage) GetLang() string { + if x, ok := x.GetCode().(*SimpleMessage_Lang); ok { + return x.Lang + } + return "" +} + +func (x *SimpleMessage) GetStatus() *Embedded { + if x != nil { + return x.Status + } + return nil +} + +func (m *SimpleMessage) GetExt() isSimpleMessage_Ext { + if m != nil { + return m.Ext + } + return nil +} + +func (x *SimpleMessage) GetEn() int64 { + if x, ok := x.GetExt().(*SimpleMessage_En); ok { + return x.En + } + return 0 +} + +func (x *SimpleMessage) GetNo() *Embedded { + if x, ok := x.GetExt().(*SimpleMessage_No); ok { + return x.No + } + return nil +} + +func (x *SimpleMessage) GetCorpus() Corpus { + if x != nil { + return x.Corpus + } + return Corpus_UNIVERSAL +} + +type isSimpleMessage_Code interface { + isSimpleMessage_Code() +} + +type SimpleMessage_LineNum struct { + LineNum int64 `protobuf:"varint,3,opt,name=line_num,json=lineNum,proto3,oneof"` +} + +type SimpleMessage_Lang struct { + Lang string `protobuf:"bytes,4,opt,name=lang,proto3,oneof"` +} + +func (*SimpleMessage_LineNum) isSimpleMessage_Code() {} + +func (*SimpleMessage_Lang) isSimpleMessage_Code() {} + +type isSimpleMessage_Ext interface { + isSimpleMessage_Ext() +} + +type SimpleMessage_En struct { + En int64 `protobuf:"varint,6,opt,name=en,proto3,oneof"` +} + +type SimpleMessage_No struct { + No *Embedded `protobuf:"bytes,7,opt,name=no,proto3,oneof"` +} + +func (*SimpleMessage_En) isSimpleMessage_Ext() {} + +func (*SimpleMessage_No) isSimpleMessage_Ext() {} + +// DynamicMessage represents a message which can have its structure +// built dynamically using Struct and Values. +type DynamicMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StructField *_struct.Struct `protobuf:"bytes,1,opt,name=struct_field,json=structField,proto3" json:"struct_field,omitempty"` + ValueField *_struct.Value `protobuf:"bytes,2,opt,name=value_field,json=valueField,proto3" json:"value_field,omitempty"` +} + +func (x *DynamicMessage) Reset() { + *x = DynamicMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_echo_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicMessage) ProtoMessage() {} + +func (x *DynamicMessage) ProtoReflect() protoreflect.Message { + mi := &file_echo_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicMessage.ProtoReflect.Descriptor instead. +func (*DynamicMessage) Descriptor() ([]byte, []int) { + return file_echo_service_proto_rawDescGZIP(), []int{2} +} + +func (x *DynamicMessage) GetStructField() *_struct.Struct { + if x != nil { + return x.StructField + } + return nil +} + +func (x *DynamicMessage) GetValueField() *_struct.Value { + if x != nil { + return x.ValueField + } + return nil +} + +type DynamicMessageUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Body *DynamicMessage `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *DynamicMessageUpdate) Reset() { + *x = DynamicMessageUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_echo_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DynamicMessageUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DynamicMessageUpdate) ProtoMessage() {} + +func (x *DynamicMessageUpdate) ProtoReflect() protoreflect.Message { + mi := &file_echo_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DynamicMessageUpdate.ProtoReflect.Descriptor instead. +func (*DynamicMessageUpdate) Descriptor() ([]byte, []int) { + return file_echo_service_proto_rawDescGZIP(), []int{3} +} + +func (x *DynamicMessageUpdate) GetBody() *DynamicMessage { + if x != nil { + return x.Body + } + return nil +} + +func (x *DynamicMessageUpdate) GetUpdateMask() *field_mask.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +var File_echo_service_proto protoreflect.FileDescriptor + +var file_echo_service_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x65, 0x63, 0x68, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x46, 0x0a, + 0x08, 0x45, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x70, + 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x42, 0x06, 0x0a, + 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x22, 0x84, 0x02, 0x0a, 0x0d, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x75, 0x6d, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x75, 0x6d, 0x12, 0x1b, 0x0a, 0x08, 0x6c, 0x69, 0x6e, + 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x07, 0x6c, + 0x69, 0x6e, 0x65, 0x4e, 0x75, 0x6d, 0x12, 0x14, 0x0a, 0x04, 0x6c, 0x61, 0x6e, 0x67, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x6c, 0x61, 0x6e, 0x67, 0x12, 0x2b, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, + 0x64, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x02, 0x65, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x02, 0x65, 0x6e, 0x12, 0x25, 0x0a, 0x02, 0x6e, + 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x62, 0x65, 0x64, 0x64, 0x65, 0x64, 0x48, 0x01, 0x52, 0x02, + 0x6e, 0x6f, 0x12, 0x29, 0x0a, 0x06, 0x63, 0x6f, 0x72, 0x70, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, + 0x6f, 0x72, 0x70, 0x75, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x72, 0x70, 0x75, 0x73, 0x42, 0x06, 0x0a, + 0x04, 0x63, 0x6f, 0x64, 0x65, 0x42, 0x05, 0x0a, 0x03, 0x65, 0x78, 0x74, 0x22, 0x85, 0x01, 0x0a, + 0x0e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x3a, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0b, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x0b, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x22, 0x82, 0x01, 0x0a, 0x14, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x3b, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x2a, 0x5a, 0x0a, 0x06, 0x43, 0x6f, 0x72, + 0x70, 0x75, 0x73, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x41, 0x4c, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x45, 0x42, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x49, + 0x4d, 0x41, 0x47, 0x45, 0x53, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x4f, 0x43, 0x41, 0x4c, + 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, 0x57, 0x53, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, + 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x54, 0x53, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x49, + 0x44, 0x45, 0x4f, 0x10, 0x06, 0x32, 0xbc, 0x04, 0x0a, 0x0b, 0x45, 0x63, 0x68, 0x6f, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xf2, 0x01, 0x0a, 0x04, 0x45, 0x63, 0x68, 0x6f, 0x12, 0x18, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x18, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0xb5, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xae, 0x01, 0x22, 0x15, 0x2f, 0x76, + 0x31, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2f, 0x65, 0x63, 0x68, 0x6f, 0x2f, 0x7b, + 0x69, 0x64, 0x7d, 0x5a, 0x1d, 0x12, 0x1b, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x2f, 0x65, 0x63, 0x68, 0x6f, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x2f, 0x7b, 0x6e, 0x75, + 0x6d, 0x7d, 0x5a, 0x24, 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x2f, 0x65, 0x63, 0x68, 0x6f, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x2f, 0x7b, 0x6e, 0x75, 0x6d, + 0x7d, 0x2f, 0x7b, 0x6c, 0x61, 0x6e, 0x67, 0x7d, 0x5a, 0x31, 0x12, 0x2f, 0x2f, 0x76, 0x31, 0x2f, + 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2f, 0x65, 0x63, 0x68, 0x6f, 0x31, 0x2f, 0x7b, 0x69, + 0x64, 0x7d, 0x2f, 0x7b, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x7d, 0x2f, 0x7b, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x6e, 0x6f, 0x74, 0x65, 0x7d, 0x5a, 0x1d, 0x12, 0x1b, 0x2f, + 0x76, 0x31, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2f, 0x65, 0x63, 0x68, 0x6f, 0x32, + 0x2f, 0x7b, 0x6e, 0x6f, 0x2e, 0x6e, 0x6f, 0x74, 0x65, 0x7d, 0x12, 0x60, 0x0a, 0x08, 0x45, 0x63, + 0x68, 0x6f, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x1a, 0x18, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x6d, + 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2f, + 0x65, 0x63, 0x68, 0x6f, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x61, 0x0a, 0x0a, + 0x45, 0x63, 0x68, 0x6f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x1a, 0x18, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1f, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x2a, 0x17, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x2f, 0x65, 0x63, 0x68, 0x6f, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x73, 0x0a, 0x09, 0x45, 0x63, 0x68, 0x6f, 0x50, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1f, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x1a, 0x1f, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x24, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x32, 0x16, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x2f, 0x65, 0x63, 0x68, 0x6f, 0x5f, 0x70, 0x61, 0x74, 0x63, 0x68, 0x3a, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x42, 0x51, 0x5a, 0x4f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2f, 0x6b, 0x72, 0x61, + 0x74, 0x6f, 0x73, 0x2f, 0x63, 0x6d, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, + 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2d, 0x68, 0x74, 0x74, 0x70, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x74, 0x65, + 0x73, 0x74, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_echo_service_proto_rawDescOnce sync.Once + file_echo_service_proto_rawDescData = file_echo_service_proto_rawDesc +) + +func file_echo_service_proto_rawDescGZIP() []byte { + file_echo_service_proto_rawDescOnce.Do(func() { + file_echo_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_echo_service_proto_rawDescData) + }) + return file_echo_service_proto_rawDescData +} + +var file_echo_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_echo_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_echo_service_proto_goTypes = []interface{}{ + (Corpus)(0), // 0: testproto.Corpus + (*Embedded)(nil), // 1: testproto.Embedded + (*SimpleMessage)(nil), // 2: testproto.SimpleMessage + (*DynamicMessage)(nil), // 3: testproto.DynamicMessage + (*DynamicMessageUpdate)(nil), // 4: testproto.DynamicMessageUpdate + (*_struct.Struct)(nil), // 5: google.protobuf.Struct + (*_struct.Value)(nil), // 6: google.protobuf.Value + (*field_mask.FieldMask)(nil), // 7: google.protobuf.FieldMask +} +var file_echo_service_proto_depIdxs = []int32{ + 1, // 0: testproto.SimpleMessage.status:type_name -> testproto.Embedded + 1, // 1: testproto.SimpleMessage.no:type_name -> testproto.Embedded + 0, // 2: testproto.SimpleMessage.corpus:type_name -> testproto.Corpus + 5, // 3: testproto.DynamicMessage.struct_field:type_name -> google.protobuf.Struct + 6, // 4: testproto.DynamicMessage.value_field:type_name -> google.protobuf.Value + 3, // 5: testproto.DynamicMessageUpdate.body:type_name -> testproto.DynamicMessage + 7, // 6: testproto.DynamicMessageUpdate.update_mask:type_name -> google.protobuf.FieldMask + 2, // 7: testproto.EchoService.Echo:input_type -> testproto.SimpleMessage + 2, // 8: testproto.EchoService.EchoBody:input_type -> testproto.SimpleMessage + 2, // 9: testproto.EchoService.EchoDelete:input_type -> testproto.SimpleMessage + 4, // 10: testproto.EchoService.EchoPatch:input_type -> testproto.DynamicMessageUpdate + 2, // 11: testproto.EchoService.Echo:output_type -> testproto.SimpleMessage + 2, // 12: testproto.EchoService.EchoBody:output_type -> testproto.SimpleMessage + 2, // 13: testproto.EchoService.EchoDelete:output_type -> testproto.SimpleMessage + 4, // 14: testproto.EchoService.EchoPatch:output_type -> testproto.DynamicMessageUpdate + 11, // [11:15] is the sub-list for method output_type + 7, // [7:11] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_echo_service_proto_init() } +func file_echo_service_proto_init() { + if File_echo_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_echo_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Embedded); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_echo_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SimpleMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_echo_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_echo_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DynamicMessageUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_echo_service_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Embedded_Progress)(nil), + (*Embedded_Note)(nil), + } + file_echo_service_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*SimpleMessage_LineNum)(nil), + (*SimpleMessage_Lang)(nil), + (*SimpleMessage_En)(nil), + (*SimpleMessage_No)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_echo_service_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_echo_service_proto_goTypes, + DependencyIndexes: file_echo_service_proto_depIdxs, + EnumInfos: file_echo_service_proto_enumTypes, + MessageInfos: file_echo_service_proto_msgTypes, + }.Build() + File_echo_service_proto = out.File + file_echo_service_proto_rawDesc = nil + file_echo_service_proto_goTypes = nil + file_echo_service_proto_depIdxs = nil +} diff --git a/cmd/protoc-gen-go-http/internal/testproto/echo_service.proto b/cmd/protoc-gen-go-http/internal/testproto/echo_service.proto new file mode 100644 index 000000000..adee8c6b3 --- /dev/null +++ b/cmd/protoc-gen-go-http/internal/testproto/echo_service.proto @@ -0,0 +1,101 @@ +syntax = "proto3"; + +option go_package = "github.com/go-kratos/kratos/cmd/protoc-gen-go-http/internal/testproto;testproto"; + +package testproto; + +import "google/api/annotations.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/struct.proto"; + +enum Corpus { + UNIVERSAL = 0; + WEB = 1; + IMAGES = 2; + LOCAL = 3; + NEWS = 4; + PRODUCTS = 5; + VIDEO = 6; +} + +// Embedded represents a message embedded in SimpleMessage. +message Embedded { + oneof mark { + int64 progress = 1; + string note = 2; + } +} + +// SimpleMessage represents a simple message sent to the Echo service. +message SimpleMessage { + // Id represents the message identifier. + string id = 1; + int64 num = 2; + oneof code { + int64 line_num = 3; + string lang = 4; + } + Embedded status = 5; + oneof ext { + int64 en = 6; + Embedded no = 7; + } + Corpus corpus = 8; +} + +// DynamicMessage represents a message which can have its structure +// built dynamically using Struct and Values. +message DynamicMessage { + google.protobuf.Struct struct_field = 1; + google.protobuf.Value value_field = 2; +} + +message DynamicMessageUpdate { + DynamicMessage body = 1; + google.protobuf.FieldMask update_mask = 2; +} + +// Echo service responds to incoming echo requests. +service EchoService { + // Echo method receives a simple message and returns it. + // + // The message posted as the id parameter will also be + // returned. + rpc Echo(SimpleMessage) returns (SimpleMessage) { + option (google.api.http) = { + post: "/v1/example/echo/{id}" + additional_bindings { + get: "/v1/example/echo/{id}/{num}" + } + additional_bindings { + get: "/v1/example/echo/{id}/{num}/{lang}" + } + additional_bindings { + get: "/v1/example/echo1/{id}/{line_num}/{status.note}" + } + additional_bindings { + get: "/v1/example/echo2/{no.note}" + } + }; + } + // EchoBody method receives a simple message and returns it. + rpc EchoBody(SimpleMessage) returns (SimpleMessage) { + option (google.api.http) = { + post: "/v1/example/echo_body" + body: "*" + }; + } + // EchoDelete method receives a simple message and returns it. + rpc EchoDelete(SimpleMessage) returns (SimpleMessage) { + option (google.api.http) = { + delete: "/v1/example/echo_delete" + }; + } + // EchoPatch method receives a NonStandardUpdateRequest and returns it. + rpc EchoPatch(DynamicMessageUpdate) returns (DynamicMessageUpdate) { + option (google.api.http) = { + patch: "/v1/example/echo_patch" + body: "body" + }; + } +} diff --git a/cmd/protoc-gen-go-http/internal/testproto/echo_service_grpc.pb.go b/cmd/protoc-gen-go-http/internal/testproto/echo_service_grpc.pb.go new file mode 100644 index 000000000..51c231a60 --- /dev/null +++ b/cmd/protoc-gen-go-http/internal/testproto/echo_service_grpc.pb.go @@ -0,0 +1,219 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package testproto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// EchoServiceClient is the client API for EchoService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EchoServiceClient interface { + // Echo method receives a simple message and returns it. + // + // The message posted as the id parameter will also be + // returned. + Echo(ctx context.Context, in *SimpleMessage, opts ...grpc.CallOption) (*SimpleMessage, error) + // EchoBody method receives a simple message and returns it. + EchoBody(ctx context.Context, in *SimpleMessage, opts ...grpc.CallOption) (*SimpleMessage, error) + // EchoDelete method receives a simple message and returns it. + EchoDelete(ctx context.Context, in *SimpleMessage, opts ...grpc.CallOption) (*SimpleMessage, error) + // EchoPatch method receives a NonStandardUpdateRequest and returns it. + EchoPatch(ctx context.Context, in *DynamicMessageUpdate, opts ...grpc.CallOption) (*DynamicMessageUpdate, error) +} + +type echoServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewEchoServiceClient(cc grpc.ClientConnInterface) EchoServiceClient { + return &echoServiceClient{cc} +} + +func (c *echoServiceClient) Echo(ctx context.Context, in *SimpleMessage, opts ...grpc.CallOption) (*SimpleMessage, error) { + out := new(SimpleMessage) + err := c.cc.Invoke(ctx, "/testproto.EchoService/Echo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *echoServiceClient) EchoBody(ctx context.Context, in *SimpleMessage, opts ...grpc.CallOption) (*SimpleMessage, error) { + out := new(SimpleMessage) + err := c.cc.Invoke(ctx, "/testproto.EchoService/EchoBody", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *echoServiceClient) EchoDelete(ctx context.Context, in *SimpleMessage, opts ...grpc.CallOption) (*SimpleMessage, error) { + out := new(SimpleMessage) + err := c.cc.Invoke(ctx, "/testproto.EchoService/EchoDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *echoServiceClient) EchoPatch(ctx context.Context, in *DynamicMessageUpdate, opts ...grpc.CallOption) (*DynamicMessageUpdate, error) { + out := new(DynamicMessageUpdate) + err := c.cc.Invoke(ctx, "/testproto.EchoService/EchoPatch", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EchoServiceServer is the server API for EchoService service. +// All implementations must embed UnimplementedEchoServiceServer +// for forward compatibility +type EchoServiceServer interface { + // Echo method receives a simple message and returns it. + // + // The message posted as the id parameter will also be + // returned. + Echo(context.Context, *SimpleMessage) (*SimpleMessage, error) + // EchoBody method receives a simple message and returns it. + EchoBody(context.Context, *SimpleMessage) (*SimpleMessage, error) + // EchoDelete method receives a simple message and returns it. + EchoDelete(context.Context, *SimpleMessage) (*SimpleMessage, error) + // EchoPatch method receives a NonStandardUpdateRequest and returns it. + EchoPatch(context.Context, *DynamicMessageUpdate) (*DynamicMessageUpdate, error) + mustEmbedUnimplementedEchoServiceServer() +} + +// UnimplementedEchoServiceServer must be embedded to have forward compatible implementations. +type UnimplementedEchoServiceServer struct { +} + +func (UnimplementedEchoServiceServer) Echo(context.Context, *SimpleMessage) (*SimpleMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") +} +func (UnimplementedEchoServiceServer) EchoBody(context.Context, *SimpleMessage) (*SimpleMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method EchoBody not implemented") +} +func (UnimplementedEchoServiceServer) EchoDelete(context.Context, *SimpleMessage) (*SimpleMessage, error) { + return nil, status.Errorf(codes.Unimplemented, "method EchoDelete not implemented") +} +func (UnimplementedEchoServiceServer) EchoPatch(context.Context, *DynamicMessageUpdate) (*DynamicMessageUpdate, error) { + return nil, status.Errorf(codes.Unimplemented, "method EchoPatch not implemented") +} +func (UnimplementedEchoServiceServer) mustEmbedUnimplementedEchoServiceServer() {} + +// UnsafeEchoServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EchoServiceServer will +// result in compilation errors. +type UnsafeEchoServiceServer interface { + mustEmbedUnimplementedEchoServiceServer() +} + +func RegisterEchoServiceServer(s grpc.ServiceRegistrar, srv EchoServiceServer) { + s.RegisterService(&_EchoService_serviceDesc, srv) +} + +func _EchoService_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EchoServiceServer).Echo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testproto.EchoService/Echo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).Echo(ctx, req.(*SimpleMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _EchoService_EchoBody_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EchoServiceServer).EchoBody(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testproto.EchoService/EchoBody", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).EchoBody(ctx, req.(*SimpleMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _EchoService_EchoDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleMessage) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EchoServiceServer).EchoDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testproto.EchoService/EchoDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).EchoDelete(ctx, req.(*SimpleMessage)) + } + return interceptor(ctx, in, info, handler) +} + +func _EchoService_EchoPatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DynamicMessageUpdate) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EchoServiceServer).EchoPatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testproto.EchoService/EchoPatch", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).EchoPatch(ctx, req.(*DynamicMessageUpdate)) + } + return interceptor(ctx, in, info, handler) +} + +var _EchoService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testproto.EchoService", + HandlerType: (*EchoServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Echo", + Handler: _EchoService_Echo_Handler, + }, + { + MethodName: "EchoBody", + Handler: _EchoService_EchoBody_Handler, + }, + { + MethodName: "EchoDelete", + Handler: _EchoService_EchoDelete_Handler, + }, + { + MethodName: "EchoPatch", + Handler: _EchoService_EchoPatch_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "echo_service.proto", +} diff --git a/cmd/protoc-gen-go-http/internal/testproto/echo_service_http.pb.go b/cmd/protoc-gen-go-http/internal/testproto/echo_service_http.pb.go new file mode 100644 index 000000000..dfb5932f1 --- /dev/null +++ b/cmd/protoc-gen-go-http/internal/testproto/echo_service_http.pb.go @@ -0,0 +1,240 @@ +// Code generated by protoc-gen-go-http. DO NOT EDIT. + +package testproto + +import ( + context "context" + middleware "github.com/go-kratos/kratos/v2/middleware" + http1 "github.com/go-kratos/kratos/v2/transport/http" + http "net/http" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the kratos package it is being compiled against. +// context./http./middleware. +const _ = http1.SupportPackageIsVersion1 + +type EchoServiceHTTPServer interface { + Echo(context.Context, *SimpleMessage) (*SimpleMessage, error) + + EchoBody(context.Context, *SimpleMessage) (*SimpleMessage, error) + + EchoDelete(context.Context, *SimpleMessage) (*SimpleMessage, error) + + EchoPatch(context.Context, *DynamicMessageUpdate) (*DynamicMessageUpdate, error) +} + +func RegisterEchoServiceHTTPServer(s http1.ServiceRegistrar, srv EchoServiceHTTPServer) { + s.RegisterService(&_HTTP_EchoService_serviceDesc, srv) +} + +func _HTTP_EchoService_Echo_0(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in SimpleMessage + + if err := http1.BindVars(req, &in); err != nil { + return nil, err + } + + if err := http1.BindForm(req, &in); err != nil { + return nil, err + } + + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).Echo(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil +} + +func _HTTP_EchoService_Echo_1(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in SimpleMessage + + if err := http1.BindVars(req, &in); err != nil { + return nil, err + } + + if err := http1.BindForm(req, &in); err != nil { + return nil, err + } + + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).Echo(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil +} + +func _HTTP_EchoService_Echo_2(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in SimpleMessage + + if err := http1.BindVars(req, &in); err != nil { + return nil, err + } + + if err := http1.BindForm(req, &in); err != nil { + return nil, err + } + + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).Echo(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil +} + +func _HTTP_EchoService_Echo_3(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in SimpleMessage + + if err := http1.BindVars(req, &in); err != nil { + return nil, err + } + + if err := http1.BindForm(req, &in); err != nil { + return nil, err + } + + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).Echo(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil +} + +func _HTTP_EchoService_Echo_4(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in SimpleMessage + + if err := http1.BindVars(req, &in); err != nil { + return nil, err + } + + if err := http1.BindForm(req, &in); err != nil { + return nil, err + } + + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).Echo(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil +} + +func _HTTP_EchoService_EchoBody_0(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in SimpleMessage + + if err := dec(&in); err != nil { + return nil, err + } + + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).EchoBody(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil +} + +func _HTTP_EchoService_EchoDelete_0(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in SimpleMessage + + if err := http1.BindForm(req, &in); err != nil { + return nil, err + } + + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).EchoDelete(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil +} + +func _HTTP_EchoService_EchoPatch_0(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in DynamicMessageUpdate + + if err := dec(in.Body); err != nil { + return nil, err + } + + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServiceServer).EchoPatch(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil +} + +var _HTTP_EchoService_serviceDesc = http1.ServiceDesc{ + ServiceName: "testproto.EchoService", + Methods: []http1.MethodDesc{ + + { + Path: "/v1/example/echo/{id}/{num}", + Method: "GET", + Handler: _HTTP_EchoService_Echo_0, + }, + + { + Path: "/v1/example/echo/{id}/{num}/{lang}", + Method: "GET", + Handler: _HTTP_EchoService_Echo_1, + }, + + { + Path: "/v1/example/echo1/{id}/{line_num}/{status.note}", + Method: "GET", + Handler: _HTTP_EchoService_Echo_2, + }, + + { + Path: "/v1/example/echo2/{no.note}", + Method: "GET", + Handler: _HTTP_EchoService_Echo_3, + }, + + { + Path: "/v1/example/echo/{id}", + Method: "POST", + Handler: _HTTP_EchoService_Echo_4, + }, + + { + Path: "/v1/example/echo_body", + Method: "POST", + Handler: _HTTP_EchoService_EchoBody_0, + }, + + { + Path: "/v1/example/echo_delete", + Method: "DELETE", + Handler: _HTTP_EchoService_EchoDelete_0, + }, + + { + Path: "/v1/example/echo_patch", + Method: "PATCH", + Handler: _HTTP_EchoService_EchoPatch_0, + }, + }, + Metadata: "echo_service.proto", +} diff --git a/cmd/protoc-gen-go-http/main.go b/cmd/protoc-gen-go-http/main.go new file mode 100644 index 000000000..86c436ef4 --- /dev/null +++ b/cmd/protoc-gen-go-http/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" + "fmt" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/types/pluginpb" +) + +const version = "0.0.1" + +func main() { + showVersion := flag.Bool("version", false, "print the version and exit") + flag.Parse() + if *showVersion { + fmt.Printf("protoc-gen-go-http %v\n", version) + return + } + + var flags flag.FlagSet + + protogen.Options{ + ParamFunc: flags.Set, + }.Run(func(gen *protogen.Plugin) error { + gen.SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + for _, f := range gen.Files { + if !f.Generate { + continue + } + generateFile(gen, f) + } + return nil + }) +} diff --git a/cmd/protoc-gen-go-http/template.go b/cmd/protoc-gen-go-http/template.go new file mode 100644 index 000000000..3537df4f9 --- /dev/null +++ b/cmd/protoc-gen-go-http/template.go @@ -0,0 +1,101 @@ +package main + +import ( + "bytes" + "html/template" + "strings" +) + +var httpTemplate = ` +type {{.ServiceType}}HTTPServer interface { +{{range .MethodSets}} + {{.Name}}(context.Context, *{{.Request}}) (*{{.Reply}}, error) +{{end}} +} +func Register{{.ServiceType}}HTTPServer(s http1.ServiceRegistrar, srv {{.ServiceType}}HTTPServer) { + s.RegisterService(&_HTTP_{{.ServiceType}}_serviceDesc, srv) +} +{{range .Methods}} +func _HTTP_{{$.ServiceType}}_{{.Name}}_{{.Num}}(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in {{.Request}} +{{if ne (len .Vars) 0}} + if err := http1.BindVars(req, &in); err != nil { + return nil, err + } +{{end}} +{{if eq .Body ""}} + if err := http1.BindForm(req, &in); err != nil { + return nil, err + } +{{else if eq .Body ".*"}} + if err := dec(&in); err != nil { + return nil, err + } +{{else}} + if err := dec(in{{.Body}}); err != nil { + return nil, err + } +{{end}} + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.({{$.ServiceType}}Server).{{.Name}}(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out{{.ResponseBody}}, nil +} +{{end}} +var _HTTP_{{.ServiceType}}_serviceDesc = http1.ServiceDesc{ + ServiceName: "{{.ServiceName}}", + Methods: []http1.MethodDesc{ +{{range .Methods}} + { + Path: "{{.Path}}", + Method: "{{.Method}}", + Handler: _HTTP_{{$.ServiceType}}_{{.Name}}_{{.Num}}, + }, +{{end}} + }, + Metadata: "{{.Metadata}}", +} +` + +type serviceDesc struct { + ServiceType string // Greeter + ServiceName string // helloworld.Greeter + Metadata string // api/helloworld/helloworld.proto + Methods []*methodDesc + MethodSets map[string]*methodDesc +} + +type methodDesc struct { + // method + Name string + Num int + Vars []string + Forms []string + Request string + Reply string + // http_rule + Path string + Method string + Body string + ResponseBody string +} + +func (s *serviceDesc) execute() string { + s.MethodSets = make(map[string]*methodDesc) + for _, m := range s.Methods { + s.MethodSets[m.Name] = m + } + buf := new(bytes.Buffer) + tmpl, err := template.New("http").Parse(strings.TrimSpace(httpTemplate)) + if err != nil { + panic(err) + } + if err := tmpl.Execute(buf, s); err != nil { + panic(err) + } + return string(buf.Bytes()) +} diff --git a/config/README.md b/config/README.md new file mode 100644 index 000000000..ea01387d0 --- /dev/null +++ b/config/README.md @@ -0,0 +1,37 @@ +# config + +可以指定多个配置源,config 会进行合并成 map[string]interface{},然后通过 Scan 或者 Value 获取值内容; + +``` +c := config.New( + config.WithSource( + file.NewSource(path), + ), + config.WithDecoder(func(kv *config.KeyValue, v map[string]interface{}) error { + // kv.Key + // kv.Value + // kv.Metadata + // 自定义实现对应的数据源解析,如果是配置中心数据源也可以指定metadata进行识别配置类型 + return yaml.Unmarshal(kv.Value, v) + }), +) +// 加载配置源: +if err := c.Load(); err != nil { + panic(err) +} +// 获取对应的值内容: +name, err := c.Value("service").String() +// 解析到结构体(由于已经合并到map[string]interface{},所以需要指定 jsonName 进行解析): +var v struct { + Service string `json:"service"` + Version string `json:"version"` +} +if err := c.Scan(&v); err != nil { + panic(err) +} +// 监听值内容变更 +c.Watch("service.name", func(key string, value config.Value) { + // 值内容变更 +}) +``` + diff --git a/config/config.go b/config/config.go new file mode 100644 index 000000000..15ef8e678 --- /dev/null +++ b/config/config.go @@ -0,0 +1,141 @@ +package config + +import ( + "encoding/json" + "errors" + "reflect" + "sync" + "time" + + "github.com/go-kratos/kratos/v2/log" +) + +var ( + // ErrNotFound is key not found. + ErrNotFound = errors.New("key not found") + // ErrTypeAssert is type assert error. + ErrTypeAssert = errors.New("type assert error") + + _ Config = (*config)(nil) +) + +// Observer is config observer. +type Observer func(string, Value) + +// Config is a config interface. +type Config interface { + Load() error + Scan(v interface{}) error + Value(key string) Value + Watch(key string, o Observer) error + Close() error +} + +type config struct { + opts options + reader Reader + cached sync.Map + observers sync.Map + watchers []Watcher + log *log.Helper +} + +// New new a config with options. +func New(opts ...Option) Config { + options := options{ + logger: log.DefaultLogger, + decoder: func(kv *KeyValue, v map[string]interface{}) error { + return json.Unmarshal(kv.Value, &v) + }, + } + for _, o := range opts { + o(&options) + } + return &config{ + opts: options, + reader: newReader(options), + log: log.NewHelper("config", options.logger), + } +} + +func (c *config) watch(w Watcher) { + for { + kvs, err := w.Next() + if err != nil { + time.Sleep(time.Second) + c.log.Errorf("Failed to watch next config: %v", err) + continue + } + if err := c.reader.Merge(kvs...); err != nil { + c.log.Errorf("Failed to merge next config: %v", err) + continue + } + c.cached.Range(func(key, value interface{}) bool { + k := key.(string) + v := value.(Value) + if n, ok := c.reader.Value(k); ok && !reflect.DeepEqual(n.Load(), v.Load()) { + v.Store(n.Load()) + if o, ok := c.observers.Load(k); ok { + o.(Observer)(k, v) + } + } + return true + }) + } +} + +func (c *config) Load() error { + for _, src := range c.opts.sources { + kvs, err := src.Load() + if err != nil { + return err + } + if err := c.reader.Merge(kvs...); err != nil { + c.log.Errorf("Failed to merge config source: %v", err) + return err + } + w, err := src.Watch() + if err != nil { + c.log.Errorf("Failed to watch config source: %v", err) + return err + } + go c.watch(w) + } + return nil +} + +func (c *config) Value(key string) Value { + if v, ok := c.cached.Load(key); ok { + return v.(Value) + } + if v, ok := c.reader.Value(key); ok { + c.cached.Store(key, v) + return v + } + return &errValue{err: ErrNotFound} +} + +func (c *config) Scan(v interface{}) error { + data, err := c.reader.Source() + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +func (c *config) Watch(key string, o Observer) error { + if v := c.Value(key); v.Load() == nil { + return ErrNotFound + } + c.observers.Store(key, o) + return nil +} + +func (c *config) Close() error { + for _, w := range c.watchers { + if err := w.Close(); err != nil { + return err + } + } + return nil +} diff --git a/config/file/file.go b/config/file/file.go new file mode 100644 index 000000000..5959666f0 --- /dev/null +++ b/config/file/file.go @@ -0,0 +1,79 @@ +package file + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/go-kratos/kratos/v2/config" +) + +var _ config.Source = (*file)(nil) + +type file struct { + path string +} + +// NewSource new a file source. +func NewSource(path string) config.Source { + return &file{path: path} +} + +func (f *file) loadFile(path string) (*config.KeyValue, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + data, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + info, err := file.Stat() + if err != nil { + return nil, err + } + return &config.KeyValue{ + Key: info.Name(), + Value: data, + }, nil +} + +func (f *file) loadDir(path string) (kvs []*config.KeyValue, err error) { + files, err := ioutil.ReadDir(f.path) + if err != nil { + return nil, err + } + for _, file := range files { + // ignore hidden files + if file.IsDir() || strings.HasPrefix(file.Name(), ".") { + continue + } + kv, err := f.loadFile(filepath.Join(f.path, file.Name())) + if err != nil { + return nil, err + } + kvs = append(kvs, kv) + } + return +} + +func (f *file) Load() (kvs []*config.KeyValue, err error) { + fi, err := os.Stat(f.path) + if err != nil { + return nil, err + } + if fi.IsDir() { + return f.loadDir(f.path) + } + kv, err := f.loadFile(f.path) + if err != nil { + return nil, err + } + return []*config.KeyValue{kv}, nil +} + +func (f *file) Watch() (config.Watcher, error) { + return newWatcher(f) +} diff --git a/config/file/file_test.go b/config/file/file_test.go new file mode 100644 index 000000000..8bfe465b7 --- /dev/null +++ b/config/file/file_test.go @@ -0,0 +1,142 @@ +package file + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/go-kratos/kratos/v2/config" +) + +const ( + _testJSON = ` +{ + "test": { + "settings" : { + "int_key": 1000, + "float_key": 1000.1, + "duration_key": 10000, + "string_key": "string_value" + }, + "server": { + "addr": "127.0.0.1", + "port": 8000 + } + } +}` +) + +func TestFile(t *testing.T) { + var ( + path = filepath.Join(os.TempDir(), "test_config") + file = filepath.Join(path, "test.json") + data = []byte(_testJSON) + ) + defer os.Remove(path) + if err := os.MkdirAll(path, 0700); err != nil { + t.Error(err) + } + if err := ioutil.WriteFile(file, data, 0666); err != nil { + t.Error(err) + } + testSource(t, file, data) + testSource(t, path, data) +} + +func testSource(t *testing.T, path string, data []byte) { + t.Log(path) + + s := NewSource(path) + kvs, err := s.Load() + if err != nil { + t.Error(err) + } + if string(kvs[0].Value) != string(data) { + t.Errorf("no expected: %s, but got: %s", kvs[0].Value, data) + } +} + +func TestConfig(t *testing.T) { + path := filepath.Join(os.TempDir(), "test_config.json") + defer os.Remove(path) + if err := ioutil.WriteFile(path, []byte(_testJSON), 0666); err != nil { + t.Error(err) + } + c := config.New(config.WithSource( + NewSource(path), + )) + testConfig(t, c) +} + +func testConfig(t *testing.T, c config.Config) { + var expected = map[string]interface{}{ + "test.settings.int_key": int64(1000), + "test.settings.float_key": float64(1000.1), + "test.settings.string_key": "string_value", + "test.settings.duration_key": time.Duration(10000), + "test.server.addr": "127.0.0.1", + "test.server.port": int64(8000), + } + if err := c.Load(); err != nil { + t.Error(err) + } + for key, value := range expected { + switch value.(type) { + case int64: + if v, err := c.Value(key).Int(); err != nil { + t.Error(key, value, err) + } else if v != value { + t.Errorf("no expect key: %s value: %v, but got: %v", key, value, v) + } + case float64: + if v, err := c.Value(key).Float(); err != nil { + t.Error(key, value, err) + } else if v != value { + t.Errorf("no expect key: %s value: %v, but got: %v", key, value, v) + } + case string: + if v, err := c.Value(key).String(); err != nil { + t.Error(key, value, err) + } else if v != value { + t.Errorf("no expect key: %s value: %v, but got: %v", key, value, v) + } + case time.Duration: + if v, err := c.Value(key).Duration(); err != nil { + t.Error(key, value, err) + } else if v != value { + t.Errorf("no expect key: %s value: %v, but got: %v", key, value, v) + } + } + } + // scan + var settings struct { + IntKey int64 `json:"int_key"` + FloatKey float64 `json:"float_key"` + StringKey string `json:"string_key"` + DurationKey time.Duration `json:"duration_key"` + } + if err := c.Value("test.settings").Scan(&settings); err != nil { + t.Error(err) + } + if v := expected["test.settings.int_key"]; settings.IntKey != v { + t.Errorf("no expect int_key value: %v, but got: %v", settings.IntKey, v) + } + if v := expected["test.settings.float_key"]; settings.FloatKey != v { + t.Errorf("no expect float_key value: %v, but got: %v", settings.FloatKey, v) + } + if v := expected["test.settings.string_key"]; settings.StringKey != v { + t.Errorf("no expect string_key value: %v, but got: %v", settings.StringKey, v) + } + if v := expected["test.settings.duration_key"]; settings.DurationKey != v { + t.Errorf("no expect duration_key value: %v, but got: %v", settings.DurationKey, v) + } + + // not found + if _, err := c.Value("not_found_key").Bool(); errors.Is(err, config.ErrNotFound) { + t.Logf("not_found_key not match: %v", err) + } + +} diff --git a/config/file/watcher.go b/config/file/watcher.go new file mode 100644 index 000000000..7b53351e3 --- /dev/null +++ b/config/file/watcher.go @@ -0,0 +1,53 @@ +package file + +import ( + "os" + "path/filepath" + + "github.com/fsnotify/fsnotify" + "github.com/go-kratos/kratos/v2/config" +) + +type watcher struct { + f *file + fw *fsnotify.Watcher +} + +func newWatcher(f *file) (config.Watcher, error) { + fw, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + fw.Add(f.path) + return &watcher{f: f, fw: fw}, nil +} + +func (w *watcher) Next() ([]*config.KeyValue, error) { + select { + case event := <-w.fw.Events: + if event.Op == fsnotify.Rename { + if _, err := os.Stat(event.Name); err == nil || os.IsExist(err) { + w.fw.Add(event.Name) + } + } + fi, err := os.Stat(w.f.path) + if err != nil { + return nil, err + } + path := w.f.path + if fi.IsDir() { + path = filepath.Join(w.f.path, event.Name) + } + kv, err := w.f.loadFile(path) + if err != nil { + return nil, err + } + return []*config.KeyValue{kv}, nil + case err := <-w.fw.Errors: + return nil, err + } +} + +func (w *watcher) Close() error { + return w.fw.Close() +} diff --git a/config/options.go b/config/options.go new file mode 100644 index 000000000..ec583e3fe --- /dev/null +++ b/config/options.go @@ -0,0 +1,38 @@ +package config + +import ( + "github.com/go-kratos/kratos/v2/log" +) + +// Decoder is config decoder. +type Decoder func(*KeyValue, map[string]interface{}) error + +// Option is config option. +type Option func(*options) + +type options struct { + sources []Source + decoder Decoder + logger log.Logger +} + +// WithSource with config source. +func WithSource(s ...Source) Option { + return func(o *options) { + o.sources = s + } +} + +// WithDecoder with config decoder. +func WithDecoder(d Decoder) Option { + return func(o *options) { + o.decoder = d + } +} + +// WithLogger with config loogger. +func WithLogger(l log.Logger) Option { + return func(o *options) { + o.logger = l + } +} diff --git a/config/reader.go b/config/reader.go new file mode 100644 index 000000000..de6c24bc5 --- /dev/null +++ b/config/reader.go @@ -0,0 +1,107 @@ +package config + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/imdario/mergo" +) + +// Reader is config reader. +type Reader interface { + Merge(...*KeyValue) error + Value(string) (Value, bool) + Source() ([]byte, error) +} + +type reader struct { + opts options + values map[string]interface{} +} + +func newReader(opts options) Reader { + return &reader{ + opts: opts, + values: make(map[string]interface{}), + } +} + +func (r *reader) Merge(kvs ...*KeyValue) error { + merged, err := cloneMap(r.values) + if err != nil { + return err + } + for _, kv := range kvs { + next := make(map[string]interface{}) + if err := r.opts.decoder(kv, next); err != nil { + return err + } + if err := mergo.Map(&merged, convertMap(next), mergo.WithOverride); err != nil { + return err + } + } + r.values = merged + return nil +} + +func (r *reader) Value(path string) (Value, bool) { + var ( + next = r.values + keys = strings.Split(path, ".") + last = len(keys) - 1 + ) + for idx, key := range keys { + value, ok := next[key] + if !ok { + return nil, false + } + if idx == last { + av := &atomicValue{} + av.Store(value) + return av, true + } + switch vm := value.(type) { + case map[string]interface{}: + next = vm + default: + return nil, false + } + } + return nil, false +} + +func (r *reader) Source() ([]byte, error) { + return json.Marshal(r.values) +} + +func cloneMap(src map[string]interface{}) (map[string]interface{}, error) { + data, err := json.Marshal(src) + if err != nil { + return nil, err + } + dst := make(map[string]interface{}) + if err = json.Unmarshal(data, &dst); err != nil { + return nil, err + } + return dst, nil +} + +func convertMap(src interface{}) interface{} { + switch m := src.(type) { + case map[string]interface{}: + dst := make(map[string]interface{}, len(m)) + for k, v := range m { + dst[k] = convertMap(v) + } + return dst + case map[interface{}]interface{}: + dst := make(map[string]interface{}, len(m)) + for k, v := range m { + dst[fmt.Sprint(k)] = convertMap(v) + } + return dst + default: + return src + } +} diff --git a/config/source.go b/config/source.go new file mode 100644 index 000000000..2c15ce8cd --- /dev/null +++ b/config/source.go @@ -0,0 +1,20 @@ +package config + +// KeyValue is config key value. +type KeyValue struct { + Key string + Value []byte + Metadata map[string]string +} + +// Source is config source. +type Source interface { + Load() ([]*KeyValue, error) + Watch() (Watcher, error) +} + +// Watcher watches a source for changes. +type Watcher interface { + Next() ([]*KeyValue, error) + Close() error +} diff --git a/config/value.go b/config/value.go new file mode 100644 index 000000000..52ed86b4b --- /dev/null +++ b/config/value.go @@ -0,0 +1,105 @@ +package config + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "sync/atomic" + "time" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +var ( + _ Value = (*atomicValue)(nil) + _ Value = (*errValue)(nil) +) + +// Value is config value interface. +type Value interface { + Bool() (bool, error) + Int() (int64, error) + Float() (float64, error) + String() (string, error) + Duration() (time.Duration, error) + Scan(interface{}) error + Load() interface{} + Store(interface{}) +} + +type atomicValue struct { + atomic.Value +} + +func (v *atomicValue) Bool() (bool, error) { + switch val := v.Load().(type) { + case bool: + return val, nil + case int64, float64, string: + return strconv.ParseBool(fmt.Sprint(val)) + } + return false, fmt.Errorf("type assert to %v failed", reflect.TypeOf(v.Load())) +} +func (v *atomicValue) Int() (int64, error) { + switch val := v.Load().(type) { + case int64: + return int64(val), nil + case float64: + return int64(val), nil + case string: + return strconv.ParseInt(val, 10, 64) + } + return 0, fmt.Errorf("type assert to %v failed", reflect.TypeOf(v.Load())) +} +func (v *atomicValue) Float() (float64, error) { + switch val := v.Load().(type) { + case float64: + return float64(val), nil + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 10) + } + return 0.0, fmt.Errorf("type assert to %v failed", reflect.TypeOf(v.Load())) +} +func (v *atomicValue) String() (string, error) { + switch val := v.Load().(type) { + case string: + return val, nil + case bool, int64, float64: + return fmt.Sprint(val), nil + } + return "", fmt.Errorf("type assert to %v failed", reflect.TypeOf(v.Load())) +} +func (v *atomicValue) Duration() (time.Duration, error) { + val, err := v.Int() + if err != nil { + return 0, err + } + return time.Duration(val), nil +} +func (v *atomicValue) Scan(obj interface{}) error { + data, err := json.Marshal(v.Load()) + if err != nil { + return err + } + if pb, ok := obj.(proto.Message); ok { + return protojson.Unmarshal(data, pb) + } + return json.Unmarshal(data, obj) +} + +type errValue struct { + err error +} + +func (v errValue) Bool() (bool, error) { return false, v.err } +func (v errValue) Int() (int64, error) { return 0, v.err } +func (v errValue) Float() (float64, error) { return 0.0, v.err } +func (v errValue) Duration() (time.Duration, error) { return 0, v.err } +func (v errValue) String() (string, error) { return "", v.err } +func (v errValue) Scan(interface{}) error { return v.err } +func (v errValue) Load() interface{} { return nil } +func (v errValue) Store(interface{}) {} diff --git a/docs/.nojekyll b/docs/.nojekyll deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/CNAME b/docs/CNAME deleted file mode 100644 index 203bf8deb..000000000 --- a/docs/CNAME +++ /dev/null @@ -1 +0,0 @@ -v1.go-kratos.dev \ No newline at end of file diff --git a/docs/FAQ.md b/docs/FAQ.md deleted file mode 100644 index 768e19318..000000000 --- a/docs/FAQ.md +++ /dev/null @@ -1,21 +0,0 @@ -# 安装失败,提示go mod 错误 - -执行 -```shell -go get -u github.com/go-kratos/kratos/tool/kratos -``` -出现以下错误时 -```shell -go: github.com/prometheus/client_model@v0.0.0-20190220174349-fd36f4220a90: parsing go.mod: missing module line -go: github.com/remyoudompheng/bigfft@v0.0.0-20190806203942-babf20351dd7e3ac320adedbbe5eb311aec8763c: parsing go.mod: missing module line -``` -如果你使用了https://goproxy.io/ 代理,那你要使用其他代理来替换它,然后删除GOPATH目录下的mod缓存文件夹(`go clean --modcache`),然后重新执行安装命令 - -代理列表 - -``` -export GOPROXY=https://mirrors.aliyun.com/goproxy/ -export GOPROXY=https://goproxy.cn/ -export GOPROXY=https://goproxy.io/ -``` - diff --git a/docs/README.md b/docs/README.md index 13c1d0a72..e69de29bb 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,36 +0,0 @@ -![kratos](img/kratos3.png) -# Kratos - -Kratos是bilibili开源的一套Go微服务框架,包含大量微服务相关框架及工具。 - -### Goals - -我们致力于提供完整的微服务研发体验,整合相关框架及工具后,微服务治理相关部分可对整体业务开发周期无感,从而更加聚焦于业务交付。对每位开发者而言,整套Kratos框架也是不错的学习仓库,可以了解和参考到bilibili在微服务方面的技术积累和经验。 - -### Principles - -* 简单:不过度设计,代码平实简单 -* 通用:通用业务开发所需要的基础库的功能 -* 高效:提高业务迭代的效率 -* 稳定:基础库可测试性高,覆盖率高,有线上实践安全可靠 -* 健壮:通过良好的基础库设计,减少错用 -* 高性能:性能高,但不特定为了性能做hack优化,引入unsafe -* 扩展性:良好的接口设计,来扩展实现,或者通过新增基础库目录来扩展功能 -* 容错性:为失败设计,大量引入对SRE的理解,鲁棒性高 -* 工具链:包含大量工具链,比如cache代码生成,lint工具等等 - -### Features -* HTTP Blademaster:核心基于[gin](https://github.com/gin-gonic/gin)进行模块化设计,简单易用、核心足够轻量; -* GRPC Warden:基于官方gRPC开发,集成[discovery](https://github.com/bilibili/discovery)服务发现,并融合P2C负载均衡; -* Cache:优雅的接口化设计,非常方便的缓存序列化,推荐结合代理模式[overlord](https://github.com/bilibili/overlord); -* Database:集成MySQL/HBase/TiDB,添加熔断保护和统计支持,可快速发现数据层压力; -* Config:方便易用的[paladin sdk](config-paladin.md),可配合远程配置中心,实现配置版本管理和更新; -* Log:类似[zap](https://github.com/uber-go/zap)的field实现高性能日志库,并结合log-agent实现远程日志管理; -* Trace:基于opentracing,集成了全链路trace支持(gRPC/HTTP/MySQL/Redis/Memcached); -* Kratos Tool:工具链,可快速生成标准项目,或者通过Protobuf生成代码,非常便捷使用gRPC、HTTP、swagger文档; - - -------------- - -> 名字来源于:《战神》游戏以希腊神话为背景,讲述由凡人成为战神的奎托斯(Kratos)成为战神并展开弑神屠杀的冒险历程。 - diff --git a/docs/_sidebar.md b/docs/_sidebar.md deleted file mode 100644 index 84477b09d..000000000 --- a/docs/_sidebar.md +++ /dev/null @@ -1,39 +0,0 @@ -* [介绍](README.md) - * [快速开始 - 项目初始化](quickstart.md) -* [FAQ](FAQ.md) -* [http blademaster](blademaster.md) - * [bm quickstart](blademaster-quickstart.md) - * [bm module](blademaster-mod.md) - * [bm middleware](blademaster-mid.md) - * [bm protobuf](blademaster-pb.md) -* [grpc warden](warden.md) - * [warden quickstart](warden-quickstart.md) - * [warden interceptor](warden-mid.md) - * [warden resolver](warden-resolver.md) - * [warden balancer](warden-balancer.md) - * [warden protobuf](warden-pb.md) -* [config](config.md) - * [paladin](config-paladin.md) -* [ecode](ecode.md) -* [trace](trace.md) -* [log](logger.md) - * [log-agent](log-agent.md) -* [database](database.md) - * [mysql](database-mysql.md) - * [mysql-orm](database-mysql-orm.md) - * [hbase](database-hbase.md) - * [tidb](database-tidb.md) -* [cache](cache.md) - * [memcache](cache-mc.md) - * [redis](cache-redis.md) -* [kratos工具](kratos-tool.md) - * [protoc](kratos-protoc.md) - * [swagger](kratos-swagger.md) - * [genmc](kratos-genmc.md) - * [genbts](kratos-genbts.md) -* [限流bbr](ratelimit.md) -* [熔断breaker](breaker.md) -* [UT单元测试](ut.md) - * [testcli UT运行环境构建工具](ut-testcli.md) - * [testgen UT代码自动生成器](ut-testgen.md) - * [support UT周边辅助工具](ut-support.md) \ No newline at end of file diff --git a/docs/blademaster-mid.md b/docs/blademaster-mid.md deleted file mode 100644 index e379b97dc..000000000 --- a/docs/blademaster-mid.md +++ /dev/null @@ -1,177 +0,0 @@ -# 背景 - -基于bm的handler机制,可以自定义很多middleware(中间件)进行通用的业务处理,比如用户登录鉴权。接下来就以鉴权为例,说明middleware的写法和用法。 - -# 写自己的中间件 - -middleware本质上就是一个handler,接口和方法声明如下代码: - -```go -// Handler responds to an HTTP request. -type Handler interface { - ServeHTTP(c *Context) -} - -// HandlerFunc http request handler function. -type HandlerFunc func(*Context) - -// ServeHTTP calls f(ctx). -func (f HandlerFunc) ServeHTTP(c *Context) { - f(c) -} -``` - -1. 实现了`Handler`接口,可以作为engine的全局中间件使用:`engine.Use(YourHandler)` -2. 声明为`HandlerFunc`方法,可以作为engine的全局中间件使用:`engine.UseFunc(YourHandlerFunc)`,也可以作为router的局部中间件使用:`e.GET("/path", YourHandlerFunc)` - -简单示例代码如下: - -```go -type Demo struct { - Key string - Value string -} -// ServeHTTP implements from Handler interface -func (d *Demo) ServeHTTP(ctx *bm.Context) { - ctx.Set(d.Key, d.Value) -} - -e := bm.DefaultServer(nil) -d := &Demo{} - -// Handler使用如下: -e.Use(d) - -// HandlerFunc使用如下: -e.UseFunc(d.ServeHTTP) -e.GET("/path", d.ServeHTTP) - -// 或者只有方法 -myHandler := func(ctx *bm.Context) { - // some code -} -e.UseFunc(myHandler) -e.GET("/path", myHandler) -``` - -# 全局中间件 - -在blademaster的`server.go`代码中,有以下代码: - -```go -func DefaultServer(conf *ServerConfig) *Engine { - engine := NewServer(conf) - engine.Use(Recovery(), Trace(), Logger()) - return engine -} -``` - -会默认创建一个`bm engine`,并注册`Recovery(), Trace(), Logger()`三个middlerware用于全局handler处理,优先级从前到后。如果想要将自定义的middleware注册进全局,可以继续调用Use方法如下: - -```go -engine.Use(YourMiddleware()) -``` - -此方法会将`YourMiddleware`追加到已有的全局middleware后执行。如果需要全部自定义全局执行的middleware,可以使用`NewServer`方法创建一个无middleware的engine对象,然后使用`engine.Use/UseFunc`进行注册。 - -# 局部中间件 - -先来看一段鉴权伪代码示例([auth示例代码位置](https://github.com/go-kratos/kratos/tree/master/example/blademaster/middleware/auth)): - -```go -func Example() { - myHandler := func(ctx *bm.Context) { - mid := metadata.Int64(ctx, metadata.Mid) - ctx.JSON(fmt.Sprintf("%d", mid), nil) - } - - authn := auth.New(&auth.Config{DisableCSRF: false}) - - e := bm.DefaultServer(nil) - - // "/user"接口必须保证登录用户才能访问,那么我们加入"auth.User"来确保用户鉴权通过,才能进入myHandler进行业务逻辑处理 - e.GET("/user", authn.User, myHandler) - // "/guest"接口访客用户就可以访问,但如果登录用户我们需要知道mid,那么我们加入"auth.Guest"来尝试鉴权获取mid,但肯定会继续执行myHandler进行业务逻辑处理 - e.GET("/guest", authn.Guest, myHandler) - - // "/owner"开头的所有接口,都需要进行登录鉴权才可以被访问,那可以创建一个group并加入"authn.User" - o := e.Group("/owner", authn.User) - o.GET("/info", myHandler) // 该group创建的router不需要再显示的加入"authn.User" - o.POST("/modify", myHandler) // 该group创建的router不需要再显示的加入"authn.User" - - e.Start() -} -``` - -# 内置中间件 - -## Recovery - -代码位于`pkg/net/http/blademaster/recovery.go`内,用于recovery panic。会被`DefaultServer`默认注册,建议使用`NewServer`的话也将其作为首个中间件注册。 - -## Trace - -代码位于`pkg/net/http/blademaster/trace.go`内,用于trace设置,并且实现了`net/http/httptrace`的接口,能够收集官方库内的调用栈详情。会被`DefaultServer`默认注册,建议使用`NewServer`的话也将其作为第二个中间件注册。 - -## Logger - -代码位于`pkg/net/http/blademaster/logger.go`内,用于请求日志记录。会被`DefaultServer`默认注册,建议使用`NewServer`的话也将其作为第三个中间件注册。 - -## CSRF - -代码位于`pkg/net/http/blademaster/csrf.go`内,用于防跨站请求。如要使用如下: - -```go -e := bm.DefaultServer(nil) -// 挂载自适应限流中间件到 bm engine,使用默认配置 -csrf := bm.CSRF([]string{"bilibili.com"}, []string{"/a/api"}) -e.Use(csrf) -// 或者 -e.GET("/api", csrf, myHandler) -``` - -## CORS - -代码位于`pkg/net/http/blademaster/cors.go`内,用于跨域允许请求。请注意该: -1. 使用该中间件进行全局注册后,可"省略"单独为`OPTIONS`请求注册路由,如示例一。 -2. 使用该中间单独为某路由注册,需要为该路由再注册一个`OPTIONS`方法的同路径路由,如示例二。 - -示例一: -```go -e := bm.DefaultServer(nil) -// 挂载自适应限流中间件到 bm engine,使用默认配置 -cors := bm.CORS([]string{"github.com"}) -e.Use(cors) -// 该路由可以默认针对 OPTIONS /api 的跨域请求支持 -e.POST("/api", myHandler) -``` - -示例二: -```go -e := bm.DefaultServer(nil) -// 挂载自适应限流中间件到 bm engine,使用默认配置 -cors := bm.CORS([]string{"github.com"}) -// e.Use(cors) 不进行全局注册 -e.OPTIONS("/api", cors, myHandler) // 需要单独为/api进行OPTIONS方法注册 -e.POST("/api", cors, myHandler) -``` - -## 自适应限流 - -更多关于自适应限流的信息可参考:[kratos 自适应限流](ratelimit.md)。如要使用如下: - -```go -e := bm.DefaultServer(nil) -// 挂载自适应限流中间件到 bm engine,使用默认配置 -limiter := bm.NewRateLimiter(nil) -e.Use(limiter.Limit()) -// 或者 -e.GET("/api", csrf, myHandler) -``` - -# 扩展阅读 - -[bm快速开始](blademaster-quickstart.md) -[bm模块说明](blademaster-mod.md) -[bm基于pb生成](blademaster-pb.md) - diff --git a/docs/blademaster-mod.md b/docs/blademaster-mod.md deleted file mode 100644 index e8c81d704..000000000 --- a/docs/blademaster-mod.md +++ /dev/null @@ -1,88 +0,0 @@ -# Context - -以下是 blademaster 中 Context 对象结构体声明的代码片段: -```go -// Context is the most important part. It allows us to pass variables between -// middleware, manage the flow, validate the JSON of a request and render a -// JSON response for example. -type Context struct { - context.Context - - Request *http.Request - Writer http.ResponseWriter - - // flow control - index int8 - handlers []HandlerFunc - - // Keys is a key/value pair exclusively for the context of each request. - Keys map[string]interface{} - - Error error - - method string - engine *Engine -} -``` - -* 首先可以看到 blademaster 的 Context 结构体中会 embed 一个标准库中的 Context 实例,bm 中的 Context 也是直接通过该实例来实现标准库中的 Context 接口。 -* blademaster 会使用配置的 server timeout (默认1s) 作为一次请求整个过程中的超时时间,使用该context调用dao做数据库、缓存操作查询时均会将该超时时间传递下去,一旦抵达deadline,后续相关操作均会返回`context deadline exceeded`。 -* Request 和 Writer 字段用于获取当前请求的与输出响应。 -* index 和 handlers 用于 handler 的流程控制;handlers 中存储了当前请求需要执行的所有 handler,index 用于标记当前正在执行的 handler 的索引位。 -* Keys 用于在 handler 之间传递一些额外的信息。 -* Error 用于存储整个请求处理过程中的错误。 -* method 用于检查当前请求的 Method 是否与预定义的相匹配。 -* engine 字段指向当前 blademaster 的 Engine 实例。 - -以下为 Context 中所有的公开的方法: -```go -// 用于 Handler 的流程控制 -func (c *Context) Abort() -func (c *Context) AbortWithStatus(code int) -func (c *Context) Bytes(code int, contentType string, data ...[]byte) -func (c *Context) IsAborted() bool -func (c *Context) Next() - -// 用户获取或者传递请求的额外信息 -func (c *Context) RemoteIP() (cip string) -func (c *Context) Set(key string, value interface{}) -func (c *Context) Get(key string) (value interface{}, exists bool) - -// 用于校验请求的 payload -func (c *Context) Bind(obj interface{}) error -func (c *Context) BindWith(obj interface{}, b binding.Binding) error - -// 用于输出响应 -func (c *Context) Render(code int, r render.Render) -func (c *Context) Redirect(code int, location string) -func (c *Context) Status(code int) -func (c *Context) String(code int, format string, values ...interface{}) -func (c *Context) XML(data interface{}, err error) -func (c *Context) JSON(data interface{}, err error) -func (c *Context) JSONMap(data map[string]interface{}, err error) -func (c *Context) Protobuf(data proto.Message, err error) -``` - -所有方法基本上可以分为三类: - -* 流程控制 -* 额外信息传递 -* 请求处理 -* 响应处理 - -# Handler - -![handler](img/bm-handlers.png) - -初次接触`blademaster`的用户可能会对其`Handler`的流程处理产生不小的疑惑,实际上`bm`对`Handler`对处理非常简单: - -* 将`Router`模块中预先注册的`middleware`与其他`Handler`合并,放入`Context`的`handlers`字段,并将`index`字段置`0` -* 然后通过`Next()`方法一个个执行下去,部分`middleware`可能想要在过程中中断整个流程,此时可以使用`Abort()`方法提前结束处理 -* 有些`middleware`还想在所有`Handler`执行完后再执行部分逻辑,此时可以在自身`Handler`中显式调用`Next()`方法,并将这些逻辑放在调用了`Next()`方法之后 - -# 扩展阅读 - -[bm快速开始](blademaster-quickstart.md) -[bm中间件](blademaster-mid.md) -[bm基于pb生成](blademaster-pb.md) - diff --git a/docs/blademaster-pb.md b/docs/blademaster-pb.md deleted file mode 100644 index 0b693af13..000000000 --- a/docs/blademaster-pb.md +++ /dev/null @@ -1,83 +0,0 @@ -# 介绍 - -基于proto文件可以快速生成`bm`框架对应的代码,提前需要准备以下工作: - -* 安装`kratos tool protoc`工具,请看[kratos工具](kratos-tool.md) -* 编写`proto`文件,示例可参考[kratos-demo内proto文件](https://github.com/go-kratos/kratos-demo/blob/master/api/api.proto) - -### kratos工具说明 - -`kratos tool protoc`工具可以生成`warden` `bm` `swagger`对应的代码和文档,想要单独生成`bm`代码只需加上`--bm`如: - -```shell -# generate BM HTTP -kratos tool protoc --bm api.proto -``` - -### proto文件说明 - -请注意想要生成`bm`代码,需要特别在`proto`的`service`内指定`google.api.http`配置,如下: - -```go -service Demo { - rpc SayHello (HelloReq) returns (.google.protobuf.Empty); - rpc SayHelloURL(HelloReq) returns (HelloResp) { - option (google.api.http) = { // 该配置指定SayHelloURL方法对应的url - get:"/kratos-demo/say_hello" // 指定url和请求方式为GET - }; - }; -} -``` - -# 使用 - -建议在项目`api`目录下编写`proto`文件及生成对应的代码,可参考[kratos-demo内的api目录](https://github.com/go-kratos/kratos-demo/tree/master/api)。 - -执行命令后生成的`api.bm.go`代码,注意其中的`type DemoBMServer interface`和`RegisterDemoBMServer`,其中: - -* `DemoBMServer`接口,包含`proto`文件内配置了`google.api.http`选项的所有方法 -* `RegisterDemoBMServer`方法提供注册`DemoBMServer`接口的实现对象,和`bm`的`Engine`用于注册路由 -* `DemoBMServer`接口的实现,一般为`internal/service`内的业务逻辑代码,需要实现`DemoBMServer`接口 - -使用`RegisterDemoBMServer`示例代码请参考[kratos-demo内的http](https://github.com/go-kratos/kratos-demo/blob/master/internal/server/http/server.go)内的如下代码: - -```go -engine = bm.DefaultServer(hc.Server) -pb.RegisterDemoBMServer(engine, svc) -initRouter(engine) -``` - -`internal/service`内的`Service`结构实现了`DemoBMServer`接口可参考[kratos-demo内的service](https://github.com/go-kratos/kratos-demo/blob/master/internal/service/service.go)内的如下代码: - -```go -// SayHelloURL bm demo func. -func (s *Service) SayHelloURL(ctx context.Context, req *pb.HelloReq) (reply *pb.HelloResp, err error) { - reply = &pb.HelloResp{ - Content: "hello " + req.Name, - } - fmt.Printf("hello url %s", req.Name) - return -} -``` - -# 文档 - -基于同一份`proto`文件还可以生成对应的`swagger`文档,运行命令如下: - -```shell -# generate swagger -kratos tool protoc --swagger api.proto -``` - -该命令将生成对应的`swagger.json`文件,可用于`swagger`工具通过WEBUI的方式打开使用,可运行命令如下: - -```shell -kratos tool swagger serve api/api.swagger.json -``` - -# 扩展阅读 - -[bm快速开始](blademaster-quickstart.md) -[bm模块说明](blademaster-mod.md) -[bm中间件](blademaster-mid.md) - diff --git a/docs/blademaster-quickstart.md b/docs/blademaster-quickstart.md deleted file mode 100644 index d49064b50..000000000 --- a/docs/blademaster-quickstart.md +++ /dev/null @@ -1,142 +0,0 @@ -# 路由 - -进入`internal/server/http`目录下,打开`http.go`文件,其中有默认生成的`blademaster`模板。其中: - -```go -engine = bm.DefaultServer(hc.Server) -initRouter(engine) -if err := engine.Start(); err != nil { - panic(err) -} -``` - -是bm默认创建的`engine`及启动代码,我们看`initRouter`初始化路由方法,默认实现了: - -```go -func initRouter(e *bm.Engine) { - e.Ping(ping) // engine自带的"/ping"接口,用于负载均衡检测服务健康状态 - g := e.Group("/kratos-demo") // e.Group 创建一组 "/kratos-demo" 起始的路由组 - { - g.GET("/start", howToStart) // g.GET 创建一个 "kratos-demo/start" 的路由,使用GET方式请求,默认处理Handler为howToStart方法 - g.POST("start", howToStart) // g.POST 创建一个 "kratos-demo/start" 的路由,使用POST方式请求,默认处理Handler为howToStart方法 - } -} -``` - -bm的handler方法,结构如下: - -```go -func howToStart(c *bm.Context) // handler方法默认传入bm的Context对象 -``` - -### Ping - -engine自带Ping方法,用于设置`/ping`路由的handler,该路由统一提供于负载均衡服务做健康检测。服务是否健康,可自定义`ping handler`进行逻辑判断,如检测DB是否正常等。 - -```go -func ping(c *bm.Context) { - if some DB check not ok { - c.AbortWithStatus(503) - } -} -``` - -# 默认路由 - -默认路由有: - -* /metrics 用于prometheus信息采集 -* /metadata 可以查看所有注册的路由信息 - -查看加载的所有路由信息: - -```shell -curl 'http://127.0.0.1:8000/metadata' -``` - -输出: - -```json -{ - "code": 0, - "message": "0", - "ttl": 1, - "data": { - "/kratos-demo/start": { - "method": "GET" - }, - "/metadata": { - "method": "GET" - }, - "/metrics": { - "method": "GET" - }, - "/ping": { - "method": "GET" - } - } -} -``` - -# 路径参数 - -使用方式如下: - -```go -func initRouter(e *bm.Engine) { - e.Ping(ping) - g := e.Group("/kratos-demo") - { - g.GET("/start", howToStart) - - // 路径参数有两个特殊符号":"和"*" - // ":" 跟在"/"后面为参数的key,匹配两个/中间的值 或 一个/到结尾(其中不再包含/)的值 - // "*" 跟在"/"后面为参数的key,匹配从 /*开始到结尾的所有值,所有*必须写在最后且无法多个 - - // NOTE:这是不被允许的,会和 /start 冲突 - // g.GET("/:xxx") - - // NOTE: 可以拿到一个key为name的参数。注意只能匹配到/param1/felix,无法匹配/param1/felix/hao(该路径会404) - g.GET("/param1/:name", pathParam) - // NOTE: 可以拿到多个key参数。注意只能匹配到/param2/felix/hao/love,无法匹配/param2/felix或/param2/felix/hao - g.GET("/param2/:name/:value/:felid", pathParam) - // NOTE: 可以拿到一个key为name的参数 和 一个key为action的路径。 - // NOTE: 如/params3/felix/hello,action的值为"/hello" - // NOTE: 如/params3/felix/hello/hi,action的值为"/hello/hi" - // NOTE: 如/params3/felix/hello/hi/,action的值为"/hello/hi/" - g.GET("/param3/:name/*action", pathParam) - } -} - -func pathParam(c *bm.Context) { - name, _ := c.Params.Get("name") - value, _ := c.Params.Get("value") - felid, _ := c.Params.Get("felid") - action, _ := c.Params.Get("action") - path := c.RoutePath // NOTE: 获取注册的路由原始地址,如: /kratos-demo/param1/:name - c.JSONMap(map[string]interface{}{ - "name": name, - "value": value, - "felid": felid, - "action": action, - "path": path, - }, nil) -} -``` - -# 性能分析 - -启动时默认监听了`2333`端口用于`pprof`信息采集,如: - -```shell -go tool pprof http://127.0.0.1:8000/debug/pprof/profile -``` - -改变端口可以使用flag,如:`-http.perf=tcp://0.0.0.0:12333` - -# 扩展阅读 - -[bm模块说明](blademaster-mod.md) -[bm中间件](blademaster-mid.md) -[bm基于pb生成](blademaster-pb.md) - diff --git a/docs/blademaster.md b/docs/blademaster.md deleted file mode 100644 index 1915c3ed6..000000000 --- a/docs/blademaster.md +++ /dev/null @@ -1,43 +0,0 @@ -# 背景 - -在像微服务这样的分布式架构中,经常会有一些需求需要你调用多个服务,但是还需要确保服务的安全性、统一化每次的请求日志或者追踪用户完整的行为等等。要实现这些功能,你可能需要在所有服务中都设置一些相同的属性,虽然这个可以通过一些明确的接入文档来描述或者准入规范来界定,但是这么做的话还是有可能会有一些问题: - -1. 你很难让每一个服务都实现上述功能。因为对于开发者而言,他们应当注重的是实现功能。很多项目的开发者经常在一些日常开发中遗漏了这些关键点,经常有人会忘记去打日志或者去记录调用链。但是对于一些大流量的互联网服务而言,一个线上服务一旦发生故障时,即使故障时间很小,其影响面会非常大。一旦有人在关键路径上忘记路记录日志,那么故障的排除成本会非常高,那样会导致影响面进一步扩大。 -2. 事实上实现之前叙述的这些功能的成本也非常高。比如说对于鉴权(Identify)这个功能,你要是去一个服务一个服务地去实现,那样的成本也是非常高的。如果说把这个确保认证的责任分担在每个开发者身上,那样其实也会增加大家遗忘或者忽略的概率。 - -为了解决这样的问题,你可能需要一个框架来帮助你实现这些功能。比如说帮你在一些关键路径的请求上配置必要的鉴权或超时策略。那样服务间的调用会被多层中间件所过滤并检查,确保整体服务的稳定性。 - -# 设计目标 - -* 性能优异,不应该掺杂太多业务逻辑的成分 -* 方便开发使用,开发对接的成本应该尽可能地小 -* 后续鉴权、认证等业务逻辑的模块应该可以通过业务模块的开发接入该框架内 -* 默认配置已经是 production ready 的配置,减少开发与线上环境的差异性 - -# 概览 - -* 参考`gin`设计整套HTTP框架,去除`gin`中不需要的部分逻辑 -* 内置一些必要的中间件,便于业务方可以直接上手使用 - -# blademaster架构 - -![bm-arch](img/bm-arch-2-2.png) - -`blademaster`由几个非常精简的内部模块组成。其中`Router`用于根据请求的路径分发请求,`Context`包含了一个完整的请求信息,`Handler`则负责处理传入的`Context`,`Handlers`为一个列表,一个串一个地执行。 -所有的`middlerware`均以`Handler`的形式存在,这样可以保证`blademaster`自身足够精简且扩展性足够强。 - -![bm-arch](img/bm-arch-2-3.png) - -`blademaster`处理请求的模式非常简单,大部分的逻辑都被封装在了各种`Handler`中。一般而言,业务逻辑作为最后一个`Handler`。 - -正常情况下每个`Handler`按照顺序一个一个串行地执行下去,但是`Handler`中也可以中断整个处理流程,直接输出`Response`。这种模式常被用于校验登陆的`middleware`中:一旦发现请求不合法,直接响应拒绝。 - -请求处理的流程中也可以使用`Render`来辅助渲染`Response`,比如对于不同的请求需要响应不同的数据格式`JSON`、`XML`,此时可以使用不同的`Render`来简化逻辑。 - -# 扩展阅读 - -[bm快速开始](blademaster-quickstart.md) -[bm模块说明](blademaster-mod.md) -[bm中间件](blademaster-mid.md) -[bm基于pb生成](blademaster-pb.md) - diff --git a/docs/breaker.md b/docs/breaker.md deleted file mode 100644 index 2a4ff474c..000000000 --- a/docs/breaker.md +++ /dev/null @@ -1,49 +0,0 @@ -## 熔断器/Breaker -熔断器是为了当依赖的服务已经出现故障时,主动阻止对依赖服务的请求。保证自身服务的正常运行不受依赖服务影响,防止雪崩效应。 - -## kratos内置breaker的组件 -一般情况下直接使用kratos的组件时都自带了熔断逻辑,并且在提供了对应的breaker配置项。 -目前在kratos内集成熔断器的组件有: -- RPC client: pkg/net/rpc/warden/client -- Mysql client:pkg/database/sql -- Tidb client:pkg/database/tidb -- Http client:pkg/net/http/blademaster - -## 使用说明 -```go - //初始化熔断器组 - //一组熔断器公用同一个配置项,可从分组内取出单个熔断器使用。可用在比如mysql主从分离等场景。 - brkGroup := breaker.NewGroup(&breaker.Config{}) - //为每一个连接指定一个breaker - //此处假设一个客户端连接对象实例为conn - //breakName定义熔断器名称 一般可以使用连接地址 - breakName = conn.Addr - conn.breaker = brkGroup.Get(breakName) - - //在连接发出请求前判断熔断器状态 - if err = conn.breaker.Allow(); err != nil { - return - } - - //连接执行成功或失败将结果告知breaker - if(respErr != nil){ - conn.breaker.MarkFailed() - }else{ - conn.breaker.MarkSuccess() - } - -``` - -## 配置说明 -```go -type Config struct { - SwitchOff bool // 熔断器开关,默认关 false. - - K float64 //触发熔断的错误率(K = 1 - 1/错误率) - - Window xtime.Duration //统计桶窗口时间 - Bucket int //统计桶大小 - Request int64 //触发熔断的最少请求数量(请求少于该值时不会触发熔断) -} -``` - diff --git a/docs/cache-mc.md b/docs/cache-mc.md deleted file mode 100644 index 04f60f967..000000000 --- a/docs/cache-mc.md +++ /dev/null @@ -1,195 +0,0 @@ -# 开始使用 - -## 配置 - -进入项目中的configs目录,打开memcache.toml,我们可以看到: - -```toml -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:11211" - active = 50 - idle = 10 - dialTimeout = "100ms" - readTimeout = "200ms" - writeTimeout = "300ms" - idleTimeout = "80s" -``` -在该配置文件中我们可以配置memcache的连接方式proto、连接地址addr、连接池的闲置连接数idle、最大连接数active以及各类超时。 - -## 初始化 - -进入项目的internal/dao目录,打开mc.go,其中: - -```go -var cfg struct { - Client *memcache.Config -} -checkErr(paladin.Get("memcache.toml").UnmarshalTOML(&mc)) -``` -使用paladin配置管理工具将上文中的memcache.toml中的配置解析为我们需要使用的配置。 - -```go -// dao dao. -type dao struct { - mc *memcache.Memcache - mcExpire int32 -} -``` - -在dao的主结构提中定义了memcache的连接池对象和过期时间。 - -```go -d = &dao{ - // memcache - mc: memcache.New(mc.Demo), - mcExpire: int32(time.Duration(mc.DemoExpire) / time.Second), -} -``` - -使用kratos/pkg/cache/memcache包的New方法进行连接池对象的初始化,需要传入上文解析的配置。 - -## Ping - -```go -// Ping ping the resource. -func (d *dao) Ping(ctx context.Context) (err error) { - return d.pingMC(ctx) -} - -func (d *dao) pingMC(ctx context.Context) (err error) { - if err = d.mc.Set(ctx, &memcache.Item{Key: "ping", Value: []byte("pong"), Expiration: 0}); err != nil { - log.Error("conn.Set(PING) error(%v)", err) - } - return -} -``` - -生成的dao层模板中自带了memcache相关的ping方法,用于为负载均衡服务的健康监测提供依据,详见[blademaster](blademaster-quickstart.md)。 - -## 关闭 - -```go -// Close close the resource. -func (d *Dao) Close() { - d.mc.Close() -} -``` - -在关闭dao层时,通过调用memcache连接池对象的Close方法,我们可以关闭该连接池,从而释放相关资源。 - -# 常用方法 - -推荐使用[memcache代码生成器](kratos-genmc.md)帮助我们生成memcache操作的相关代码。 - -以下我们来逐一解析以下kratos/pkg/cache/memcache包中提供的常用方法。 - -## 单个查询 - -```go -// CacheDemo get data from mc -func (d *Dao) CacheDemo(c context.Context, id int64) (res *Demo, err error) { - key := demoKey(id) - res = &Demo{} - if err = d.mc.Get(c, key).Scan(res); err != nil { - res = nil - if err == memcache.ErrNotFound { - err = nil - } - } - if err != nil { - prom.BusinessErrCount.Incr("mc:CacheDemo") - log.Errorv(c, log.KV("CacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} -``` - -如上为代码生成器生成的进行单个查询的代码,使用到mc.Get(c,key)方法获得返回值,再使用scan方法将memcache的返回值转换为golang中的类型(如string,bool, 结构体等)。 - -## 批量查询使用 - -```go -replies, err := d.mc.GetMulti(c, keys) -for _, key := range replies.Keys() { - v := &Demo{} - err = replies.Scan(key, v) -} -``` - -如上为代码生成器生成的进行批量查询的代码片段,这里使用到mc.GetMulti(c,keys)方法获得返回值,与单个查询类似地,我们需要再使用scan方法将memcache的返回值转换为我们定义的结构体。 - -## 设置KV - -```go -// AddCacheDemo Set data to mc -func (d *Dao) AddCacheDemo(c context.Context, id int64, val *Demo) (err error) { - if val == nil { - return - } - key := demoKey(id) - item := &memcache.Item{Key: key, Object: val, Expiration: d.demoExpire, Flags: memcache.FlagJSON | memcache.FlagGzip} - if err = d.mc.Set(c, item); err != nil { - prom.BusinessErrCount.Incr("mc:AddCacheDemo") - log.Errorv(c, log.KV("AddCacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} -``` - -如上为代码生成器生成的添加结构体进入memcache的代码,这里需要使用到的是mc.Set方法进行设置。 -这里使用的item为memcache.Item结构体,包含key, value, 超时时间(秒), Flags。 - -### Flags - - -上文添加结构体进入memcache中,使用到的flags为:memcache.FlagJSON | memcache.FlagGzip代表着:使用json作为编码方式,gzip作为压缩方式。 - -Flags的相关常量在kratos/pkg/cache/memcache包中进行定义,包含编码方式如gob, json, protobuf,和压缩方式gzip。 - -```go -const( - // Flag, 15(encoding) bit+ 17(compress) bit - - // FlagRAW default flag. - FlagRAW = uint32(0) - // FlagGOB gob encoding. - FlagGOB = uint32(1) << 0 - // FlagJSON json encoding. - FlagJSON = uint32(1) << 1 - // FlagProtobuf protobuf - FlagProtobuf = uint32(1) << 2 - // FlagGzip gzip compress. - FlagGzip = uint32(1) << 15 -) -``` - -## 删除KV - -```go -// DelCacheDemo delete data from mc -func (d *Dao) DelCacheDemo(c context.Context, id int64) (err error) { - key := demoKey(id) - if err = d.mc.Delete(c, key); err != nil { - if err == memcache.ErrNotFound { - err = nil - return - } - prom.BusinessErrCount.Incr("mc:DelCacheDemo") - log.Errorv(c, log.KV("DelCacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} -``` -如上为代码生成器生成的从memcache中删除KV的代码,这里需要使用到的是mc.Delete方法。 -和查询时类似地,当memcache中不存在参数中的key时,会返回error为memcache.ErrNotFound。如果不需要处理这种error,可以参考上述代码将返回出去的error置为nil。 - -# 扩展阅读 - -[memcache代码生成器](kratos-genmc.md) -[redis模块说明](cache-redis.md) - diff --git a/docs/cache-redis.md b/docs/cache-redis.md deleted file mode 100644 index 898e419df..000000000 --- a/docs/cache-redis.md +++ /dev/null @@ -1,181 +0,0 @@ -# 开始使用 - -## 配置 - -进入项目中的configs目录,打开redis.toml,我们可以看到: - -```toml -[Client] - name = "kratos-demo" - proto = "tcp" - addr = "127.0.0.1:6389" - idle = 10 - active = 10 - dialTimeout = "1s" - readTimeout = "1s" - writeTimeout = "1s" - idleTimeout = "10s" -``` - -在该配置文件中我们可以配置redis的连接方式proto、连接地址addr、连接池的闲置连接数idle、最大连接数active以及各类超时。 - -## 初始化 - -进入项目的internal/dao目录,打开redis.go,其中: - -```go -var cfg struct { - Client *memcache.Config -} -checkErr(paladin.Get("redis.toml").UnmarshalTOML(&rc)) -``` -使用paladin配置管理工具将上文中的redis.toml中的配置解析为我们需要使用的配置。 - -```go -// Dao dao. -type Dao struct { - redis *redis.Pool - redisExpire int32 -} -``` - -在dao的主结构提中定义了redis的连接池对象和过期时间。 - -```go -d = &dao{ - // redis - redis: redis.NewPool(rc.Demo), - redisExpire: int32(time.Duration(rc.DemoExpire) / time.Second), -} -``` - -使用kratos/pkg/cache/redis包的NewPool方法进行连接池对象的初始化,需要传入上文解析的配置。 - -## Ping - -```go -// Ping ping the resource. -func (d *dao) Ping(ctx context.Context) (err error) { - return d.pingRedis(ctx) -} - -func (d *dao) pingRedis(ctx context.Context) (err error) { - conn := d.redis.Get(ctx) - defer conn.Close() - if _, err = conn.Do("SET", "ping", "pong"); err != nil { - log.Error("conn.Set(PING) error(%v)", err) - } - return -} -``` - -生成的dao层模板中自带了redis相关的ping方法,用于为负载均衡服务的健康监测提供依据,详见[blademaster](blademaster-quickstart.md)。 - -## 关闭 - -```go -// Close close the resource. -func (d *Dao) Close() { - d.redis.Close() -} -``` - -在关闭dao层时,通过调用redis连接池对象的Close方法,我们可以关闭该连接池,从而释放相关资源。 - -# 常用方法 - -## 发送单个命令 Do - -```go -// DemoIncrby . -func (d *dao) DemoIncrby(c context.Context, pid int) (err error) { - cacheKey := keyDemo(pid) - conn := d.redis.Get(c) - defer conn.Close() - if _, err = conn.Do("INCRBY", cacheKey, 1); err != nil { - log.Error("DemoIncrby conn.Do(INCRBY) key(%s) error(%v)", cacheKey, err) - } - return -} -``` -如上为向redis server发送单个命令的用法示意。这里需要使用redis连接池的Get方法获取一个redis连接conn,再使用conn.Do方法即可发送一条指令。 -注意,在使用该连接完毕后,需要使用conn.Close方法将该连接关闭。 - -## 批量发送命令 Pipeline - -kratos/pkg/cache/redis包除了支持发送单个命令,也支持批量发送命令(redis pipeline),比如: - -```go -// DemoIncrbys . -func (d *dao) DemoIncrbys(c context.Context, pid int) (err error) { - cacheKey := keyDemo(pid) - conn := d.redis.Get(c) - defer conn.Close() - if err = conn.Send("INCRBY", cacheKey, 1); err != nil { - return - } - if err = conn.Send("EXPIRE", cacheKey, d.redisExpire); err != nil { - return - } - if err = conn.Flush(); err != nil { - log.Error("conn.Flush error(%v)", err) - return - } - for i := 0; i < 2; i++ { - if _, err = conn.Receive(); err != nil { - log.Error("conn.Receive error(%v)", err) - return - } - } - return -} -``` - -和发送单个命令类似地,这里需要使用redis连接池的Get方法获取一个redis连接conn,在使用该连接完毕后,需要使用conn.Close方法将该连接关闭。 - -这里使用conn.Send方法将命令写入客户端的buffer(缓冲区)中,使用conn.Flush将客户端的缓冲区内的命令打包发送到redis server。redis server按顺序返回的reply可以使用conn.Receive方法进行接收和处理。 - - -## 返回值转换 - -kratos/pkg/cache/redis包中也提供了Scan方法将redis server的返回值转换为golang类型。 - -除此之外,kratos/pkg/cache/redis包提供了大量返回值转换的快捷方式: - -### 单个查询 - -单个查询可以使用redis.Uint64/Int64/Float64/Int/String/Bool/Bytes进行返回值的转换,比如: - -```go -// GetDemo get -func (d *Dao) GetDemo(ctx context.Context, key string) (string, error) { - conn := d.redis.Get(ctx) - defer conn.Close() - return redis.String(conn.Do("GET", key)) -} -``` - -### 批量查询 - -批量查询时候,可以使用redis.Int64s,Ints,Strings,ByteSlices方法转换如MGET,HMGET,ZRANGE,SMEMBERS等命令的返回值。 -还可以使用StringMap, IntMap, Int64Map方法转换HGETALL命令的返回值,比如: - -```go -// HGETALLDemo get -func (d *Dao) HGETALLDemo(c context.Context, pid int64) (res map[string]int64, err error) { - var ( - key = keyDemo(pid) - conn = d.redis.Get(c) - ) - defer conn.Close() - if res, err = redis.Int64Map(conn.Do("HGETALL", key)); err != nil { - log.Error("HGETALL %v failed error(%v)", key, err) - } - return -} -``` - -# 扩展阅读 - -[memcache模块说明](cache-mc.md) - diff --git a/docs/cache.md b/docs/cache.md deleted file mode 100644 index a1e484035..000000000 --- a/docs/cache.md +++ /dev/null @@ -1,20 +0,0 @@ -# 背景 - -我们需要统一的cache包,用于进行各类缓存操作。 - -# 概览 - -* 缓存操作均使用连接池,保证较快的数据读写速度且提高系统的安全可靠性。 - -# Memcache - -提供protobuf,gob,json序列化方式,gzip的memcache接口 - -[memcache模块说明](cache-mc.md) - -# Redis - -提供redis操作的各类接口以及各类将redis server返回值转换为golang类型的快捷方法。 - -[redis模块说明](cache-redis.md) - diff --git a/docs/config-paladin.md b/docs/config-paladin.md deleted file mode 100644 index 28d8c1cc9..000000000 --- a/docs/config-paladin.md +++ /dev/null @@ -1,118 +0,0 @@ -# Paladin SDK - -## 配置模块化 -进行配置的模块化是为了更好地管理配置,尽可能避免由修改配置带来的失误。 -在配置种类里,可以看到其实 环境配置 和 应用配置 已经由平台进行管理化。 -我们通常业务里只用配置 业务配置 和 在线配置 就可以了,之前我们大部分都是单个文件配置,而为了更好管理我们需要按类型进行拆分配置文件。 - -例如: - -| 名称 | 说明 | -|:------|:------| -| application.toml | 在线配置 | -| mysql.toml | 业务db配置 | -| hbase.toml | 业务hbase配置 | -| memcache.toml | 业务mc配置 | -| redis.toml | 业务redis配置 | -| http.toml | 业务http client/server/auth配置 | -| grpc.toml | 业务grpc client/server配置 | - -## 使用方式 - -paladin 是一个config SDK客户端,包括了remote、file、mock几个抽象功能,方便使用本地文件或者远程配置中心,并且集成了对象自动reload功能。 - -### 远程配置中心 -可以通过环境变量注入,例如:APP_ID/DEPLOY_ENV/ZONE/HOSTNAME,然后通过paladin实现远程配置中心SDK进行配合使用。 - -### 指定本地文件: -```shell -./cmd -conf=/data/conf/app/demo.toml -# or multi file -./cmd -conf=/data/conf/app/ -``` - -### mock配置文件 -```go -func TestMain(t *testing.M) { - mock := make(map[string]string]) - mock["application.toml"] = ` - demoSwitch = false - demoNum = 100 - demoAPI = "xxx" - ` - paladin.DefaultClient = paladin.NewMock(mock) -} -``` - -### example main -```go -// main.go -func main() { - flag.Parse() - // 初始化paladin - if err := paladin.Init(); err != nil { - panic(err) - } - log.Init(nil) // debug flag: log.dir={path} - defer log.Close() -} -``` - -### example HTTP/gRPC -```toml -# http.toml -[server] - addr = "0.0.0.0:9000" - timeout = "1s" - -``` - -```go -// server.go -func NewServer() { - // 默认配置用nil,这时读取HTTP/gRPC构架中的flag或者环境变量(可能是docker注入的环境变量,默认端口:8000/9000) - engine := bm.DefaultServer(nil) - - // 除非自己要替换了配置,用http.toml - var bc struct { - Server *bm.ServerConfig - } - if err := paladin.Get("http.toml").UnmarshalTOML(&bc); err != nil { - // 不存在时,将会为nil使用默认配置 - if err != paladin.ErrNotExist { - panic(err) - } - } - engine := bm.DefaultServer(bc.Server) -} -``` - -### example Service(在线配置热加载配置) -```go -# service.go -type Service struct { - ac *paladin.Map -} - -func New() *Service { - // paladin.Map 通过atomic.Value支持自动热加载 - var ac = new(paladin.TOML) - if err := paladin.Watch("application.toml", ac); err != nil { - panic(err) - } - s := &Service{ - ac: ac, - } - return s -} - -func (s *Service) Test() { - sw, err := s.ac.Get("switch").Bool() - if err != nil { - // TODO - } - - // or use default value - sw := paladin.Bool(s.ac.Get("switch"), false) -} -``` diff --git a/docs/config.md b/docs/config.md deleted file mode 100644 index f27244ab9..000000000 --- a/docs/config.md +++ /dev/null @@ -1,48 +0,0 @@ -# config - -## 介绍 -初看起来,配置管理可能很简单,但是这其实是不稳定的一个重要来源。 -即变更管理导致的故障,我们目前基于配置中心(config-service)的部署方式,二进制文件的发布与配置文件的修改是异步进行的,每次变更配置,需要重新构建发布版。 -由此,我们整体对配置文件进行梳理,对配置进行模块化,以及方便易用的paladin config sdk。 - -## 环境配置 - -| flag | env | remark | -|:----------|:----------|:------| -| region | REGION | 部署地区,sh-上海、gz-广州、bj-北京 | -| zone | ZONE | 分布区域,sh001-上海核心、sh004-上海嘉定 | -| deploy.env | DEPLOY_ENV | dev-开发、fat1-功能、uat-集成、pre-预发、prod-生产 | -| deploy.color | DEPLOY_COLOR | 服务颜色,blue(测试feature染色请求) | -| - | HOSTNAME | 主机名,xxx-hostname | - -全局公用环境变量,通常为部署环境配置,由系统、发布系统或supervisor进行环境变量注入,并不用进行例外配置,如果是开发过程中则可以通过flag注入进行运行测试。 - -## 应用配置 - -| flag | env | default | remark | -|:----------|:----------|:-------------|:------| -| appid | APP_ID | - | 应用ID | -| http | HTTP | tcp://0.0.0.0:8000/?timeout=1s | http 监听端口 | -| http.perf | HTTP_PERF | tcp://0.0.0.0:2233/?timeout=1s | http perf 监听端口 | -| grpc | GRPC | tcp://0.0.0.0:9000/?timeout=1s&idle_timeout=60s | grpc 监听端口 | -| grpc.target | - | - | 指定服务运行:
-grpc.target=demo.service=127.0.0.1:9000
-grpc.target=demo.service=127.0.0.2:9000 | -| discovery.nodes | DISCOVERY_NODES | - | 服务发现节点:127.0.0.1:7171,127.0.0.2:7171 | -| log.v | LOG_V | 0 | 日志级别:
DEBUG:0 INFO:1 WARN:2 ERROR:3 FATAL:4 | -| log.stdout | LOG_STDOUT | false | 是否标准输出:true、false| -| log.dir | LOG_DIR | - | 日志文件目录,如果配置会输出日志到文件,否则不输出日志文件 | -| log.agent | LOG_AGENT | - | 日志采集agent:
unixpacket:///var/run/lancer/collector_tcp.sock?timeout=100ms&chan=1024 | -| log.module | LOG_MODULE | - | 指定field信息 format: file=1,file2=2. | -| log.filter | LOG_FILTER | - | 过虑敏感信息 format: field1,field2. | - -基本为一些应用相关的配置信息,通常发布系统和supervisor都有对应的部署环境进行配置注入,并不用进行例外配置,如果开发过程中可以通过flag进行注入运行测试。 - -## 业务配置 -Redis、MySQL等业务组件,可以使用静态的配置文件来初始化,根据应用业务集群进行配置。 - -## 在线配置 -需要在线读取、变更的配置信息,比如某个业务开关,可以实现配置reload实时更新。 - -## 扩展阅读 - -[paladin配置sdk](config-paladin.md) - diff --git a/docs/database-hbase.md b/docs/database-hbase.md deleted file mode 100644 index 327092b7f..000000000 --- a/docs/database-hbase.md +++ /dev/null @@ -1,51 +0,0 @@ -# database/hbase - -## 说明 -Hbase Client,进行封装加入了链路追踪和统计。 - -## 配置 -需要指定hbase集群的zookeeper地址。 -``` -config := &hbase.Config{Zookeeper: &hbase.ZKConfig{Addrs: []string{"localhost"}}} -client := hbase.NewClient(config) -``` - -## 使用方式 -``` -package main - -import ( - "context" - "fmt" - - "github.com/go-kratos/kratos/pkg/database/hbase" -) - -func main() { - config := &hbase.Config{Zookeeper: &hbase.ZKConfig{Addrs: []string{"localhost"}}} - client := hbase.NewClient(config) - - // - values := map[string]map[string][]byte{"name": {"firstname": []byte("hello"), "lastname": []byte("world")}} - ctx := context.Background() - - // 写入信息 - // table: user - // rowkey: user1 - // values["family"] = columns - _, err := client.PutStr(ctx, "user", "user1", values) - if err != nil { - panic(err) - } - - // 读取信息 - // table: user - // rowkey: user1 - result, err := client.GetStr(ctx, "user", "user1") - if err != nil { - panic(err) - } - fmt.Printf("%v", result) -} -``` - diff --git a/docs/database-mysql-orm.md b/docs/database-mysql-orm.md deleted file mode 100644 index e29d5b1b5..000000000 --- a/docs/database-mysql-orm.md +++ /dev/null @@ -1,42 +0,0 @@ -# 开始使用 - -## 配置 - -进入项目中的configs目录,mysql.toml,我们可以看到: - -```toml -[demo] - addr = "127.0.0.1:3306" - dsn = "{user}:{password}@tcp(127.0.0.1:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - readDSN = ["{user}:{password}@tcp(127.0.0.2:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8","{user}:{password}@tcp(127.0.0.3:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8,utf8mb4"] - active = 20 - idle = 10 - idleTimeout ="4h" - queryTimeout = "200ms" - execTimeout = "300ms" - tranTimeout = "400ms" -``` - -在该配置文件中我们可以配置mysql的读和写的dsn、连接地址addr、连接池的闲置连接数idle、最大连接数active以及各类超时。 - -如果配置了readDSN,在进行读操作的时候会优先使用readDSN的连接。 - -## 初始化 - -进入项目的internal/dao目录,打开db.go,其中: - -```go -var cfg struct { - Client *sql.Config -} -checkErr(paladin.Get("db.toml").UnmarshalTOML(&dc)) -``` -使用paladin配置管理工具将上文中的db.toml中的配置解析为我们需要使用db的相关配置。 - -# TODO:补充常用方法 - -# 扩展阅读 - -[tidb模块说明](database-tidb.md) -[hbase模块说明](database-hbase.md) - diff --git a/docs/database-mysql.md b/docs/database-mysql.md deleted file mode 100644 index ee289f60c..000000000 --- a/docs/database-mysql.md +++ /dev/null @@ -1,195 +0,0 @@ -# 开始使用 - -## 配置 - -进入项目中的configs目录,mysql.toml,我们可以看到: - -```toml -[demo] - addr = "127.0.0.1:3306" - dsn = "{user}:{password}@tcp(127.0.0.1:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - readDSN = ["{user}:{password}@tcp(127.0.0.2:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8","{user}:{password}@tcp(127.0.0.3:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8,utf8mb4"] - active = 20 - idle = 10 - idleTimeout ="4h" - queryTimeout = "200ms" - execTimeout = "300ms" - tranTimeout = "400ms" -``` - -在该配置文件中我们可以配置mysql的读和写的dsn、连接地址addr、连接池的闲置连接数idle、最大连接数active以及各类超时。 - -如果配置了readDSN,在进行读操作的时候会优先使用readDSN的连接。 - -## 初始化 - -进入项目的internal/dao目录,打开db.go,其中: - -```go -var cfg struct { - Client *sql.Config -} -checkErr(paladin.Get("db.toml").UnmarshalTOML(&dc)) -``` -使用paladin配置管理工具将上文中的db.toml中的配置解析为我们需要使用db的相关配置。 - -```go -// Dao dao. -type Dao struct { - db *sql.DB -} -``` - -在dao的主结构提中定义了mysql的连接池对象。 - -```go -d = &dao{ - db: sql.NewMySQL(dc.Demo), -} -``` - -使用kratos/pkg/database/sql包的NewMySQL方法进行连接池对象的初始化,需要传入上文解析的配置。 - -## Ping - -```go -// Ping ping the resource. -func (d *dao) Ping(ctx context.Context) (err error) { - return d.db.Ping(ctx) -} -``` - -生成的dao层模板中自带了mysql相关的ping方法,用于为负载均衡服务的健康监测提供依据,详见[blademaster](blademaster-quickstart.md)。 - -## 关闭 - -```go -// Close close the resource. -func (d *dao) Close() { - d.db.Close() -} -``` - -在关闭dao层时,通过调用mysql连接池对象的Close方法,我们可以关闭该连接池,从而释放相关资源。 - -# 常用方法 - -## 单个查询 - -```go -// GetDemo 用户角色 -func (d *dao) GetDemo(c context.Context, did int64) (demo int8, err error) { - err = d.db.QueryRow(c, _getDemoSQL, did).Scan(&demo) - if err != nil && err != sql.ErrNoRows { - log.Error("d.GetDemo.Query error(%v)", err) - return - } - return demo, nil -} -``` - -db.QueryRow方法用于返回最多一条记录的查询,在QueryRow方法后使用Scan方法即可将mysql的返回值转换为Golang的数据类型。 - -当mysql查询不到对应数据时,会返回sql.ErrNoRows,如果不需处理,可以参考如上代码忽略此error。 - -## 批量查询 - -```go -// ResourceLogs ResourceLogs. -func (d *dao) GetDemos(c context.Context, dids []int64) (demos []int8, err error) { - rows, err := d.db.Query(c, _getDemosSQL, dids) - if err != nil { - log.Error("query error(%v)", err) - return - } - defer rows.Close() - for rows.Next() { - var tmpD int8 - if err = rows.Scan(&tmpD); err != nil { - log.Error("scan demo log error(%v)", err) - return - } - demos = append(demos, tmpD) - } - return -} -``` - -db.Query方法一般用于批量查询的场景,返回*sql.Rows和error信息。 -我们可以使用rows.Next()方法获得下一行的返回结果,并且配合使用rows.Scan()方法将该结果转换为Golang的数据类型。当没有下一行时,rows.Next方法将返回false,此时循环结束。 - -注意,在使用完毕rows对象后,需要调用rows.Close方法关闭连接,释放相关资源。 - -## 执行语句 - -```go -// DemoExec exec -func (d *Dao) DemoExec(c context.Context, id int64) (rows int64, err error) { - res, err := d.db.Exec(c, _demoUpdateSQL, id) - if err != nil { - log.Error("db.DemoExec.Exec(%s) error(%v)", _demoUpdateSQL, err) - return - } - return res.RowsAffected() -} -``` - -执行UPDATE/DELETE/INSERT语句时,使用db.Exec方法进行语句执行,返回*sql.Result和error信息: - -```go - -// A Result summarizes an executed SQL command. -type Result interface { - LastInsertId() (int64, error) - RowsAffected() (int64, error) -} -``` - -Result接口支持获取影响行数和LastInsertId(一般用于获取Insert语句插入数据库后的主键ID) - - -## 事务 - -kratos/pkg/database/sql包支持事务操作,具体操作示例如下: - -开启一个事务: - -```go -tx := d.db.Begin() -if err = tx.Error; err != nil { - log.Error("db begin transcation failed, err=%+v", err) - return -} -``` - -在事务中执行语句: - -```go -res, err := tx.Exec(_demoSQL, did) -if err != nil { - return -} -rows := res.RowsAffected() -``` - -提交事务: - -```go -if err = tx.Commit().Error; err!=nil{ - log.Error("db commit transcation failed, err=%+v", err) -} -``` - -回滚事务: - -```go -if err = tx.Rollback().Error; err!=nil{ - log.Error("db rollback failed, err=%+v", rollbackErr) -} -``` - -# 扩展阅读 - -- [tidb模块说明](database-tidb.md) -- [hbase模块说明](database-hbase.md) - diff --git a/docs/database-tidb.md b/docs/database-tidb.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/database.md b/docs/database.md deleted file mode 100644 index f8cb3ebf2..000000000 --- a/docs/database.md +++ /dev/null @@ -1,19 +0,0 @@ -# database/sql - -## 背景 -数据库驱动,进行封装加入了熔断、链路追踪和统计,以及链路超时。 -通常数据模块都写在`internal/dao`目录中,并提供对应的数据访问接口。 - -## MySQL -MySQL数据库驱动,支持读写分离、context、timeout、trace和统计功能,以及错误熔断防止数据库雪崩。 -[mysql client](database-mysql.md) -[mysql client orm](database-mysql-orm.md) - -## HBase -HBase客户端,支持trace、slowlog和统计功能。 -[hbase client](database-hbase.md) - -## TiDB -TiDB客户端,支持服务发现和熔断功能。 -[tidb client](database-tidb.md) - diff --git a/docs/design/kratos-v2.md b/docs/design/kratos-v2.md new file mode 100644 index 000000000..6aa6293f9 --- /dev/null +++ b/docs/design/kratos-v2.md @@ -0,0 +1,14 @@ +# Kratos v2 Kit Design + +MaoJian + +Last updated: December 25, 2020 + +## Abstract +kratos v1 基础库主要专注在各类功能的细节实现,比如 gRPC 的负载均衡,熔断器等一系列微服务需要的功能。 + +## Background + +## Proposal + +## Implementation diff --git a/docs/ecode.md b/docs/ecode.md deleted file mode 100644 index 10d8f0e8b..000000000 --- a/docs/ecode.md +++ /dev/null @@ -1,102 +0,0 @@ -# ecode - -## 背景 -错误码一般被用来进行异常传递,且需要具有携带`message`文案信息的能力。 - -## 错误码之Codes - -在`kratos`里,错误码被设计成`Codes`接口,声明如下[代码位置](https://github.com/go-kratos/kratos/blob/master/pkg/ecode/ecode.go): - -```go -// Codes ecode error interface which has a code & message. -type Codes interface { - // sometimes Error return Code in string form - // NOTE: don't use Error in monitor report even it also work for now - Error() string - // Code get error code. - Code() int - // Message get code message. - Message() string - //Detail get error detail,it may be nil. - Details() []interface{} -} - -// A Code is an int error code spec. -type Code int -``` - -可以看到该接口一共有四个方法,且`type Code int`结构体实现了该接口。 - -### 注册message - -一个`Code`错误码可以对应一个`message`,默认实现会从全局变量`_messages`中获取,业务可以将自定义`Code`对应的`message`通过调用`Register`方法的方式传递进去,如: - -```go -cms := map[int]string{ - 0: "很好很强大!", - -304: "啥都没变啊~", - -404: "啥都没有啊~", -} -ecode.Register(cms) - -fmt.Println(ecode.OK.Message()) // 输出:很好很强大! -``` - -注意:`map[int]string`类型并不是绝对,比如有业务要支持多语言的场景就可以扩展为类似`map[int]LangStruct`的结构,因为全局变量`_messages`是`atomic.Value`类型,只需要修改对应的`Message`方法实现即可。 - -### Details - -`Details`接口为`gRPC`预留,`gRPC`传递异常会将服务端的错误码pb序列化之后赋值给`Details`,客户端拿到之后反序列化得到,具体可阅读`status`的实现: -1. `ecode`包内的`Status`结构体实现了`Codes`接口[代码位置](https://github.com/go-kratos/kratos/blob/master/pkg/ecode/status.go) -2. `warden/internal/status`包内包装了`ecode.Status`和`grpc.Status`进行互相转换的方法[代码位置](https://github.com/go-kratos/kratos/blob/master/pkg/net/rpc/warden/internal/status/status.go) -3. `warden`的`client`和`server`则使用转换方法将`gRPC`底层返回的`error`最终转换为`ecode.Status` [代码位置](https://github.com/go-kratos/kratos/blob/master/pkg/net/rpc/warden/client.go#L162) - -## 转换为ecode - -错误码转换有以下两种情况: -1. 因为框架传递错误是靠`ecode`错误码,比如bm框架返回的`code`字段默认就是数字,那么客户端接收到如`{"code":-404}`的话,可以使用`ec := ecode.Int(-404)`或`ec := ecode.String("-404")`来进行转换。 -2. 在项目中`dao`层返回一个错误码,往往返回参数类型建议为`error`而不是`ecode.Codes`,因为`error`更通用,那么上层`service`就可以使用`ec := ecode.Cause(err)`进行转换。 - -## 判断 - -错误码判断是否相等: -1. `ecode`与`ecode`判断使用:`ecode.Equal(ec1, ec2)` -2. `ecode`与`error`判断使用:`ecode.EqualError(ec, err)` - -## 使用工具生成 - -使用proto协议定义错误码,格式如下: - -```proto -// user.proto -syntax = "proto3"; - -package ecode; - -enum UserErrCode { - UserUndefined = 0; // 因protobuf协议限制必须存在!!!无意义的0,工具生成代码时会忽略该参数 - UserNotLogin = 123; // 正式错误码 -} -``` - -需要注意以下几点: - -1. 必须是enum类型,且名字规范必须以"ErrCode"结尾,如:UserErrCode -2. 因为protobuf协议限制,第一个enum值必须为无意义的0 - -使用`kratos tool protoc --ecode user.proto`进行生成,生成如下代码: - -```go -package ecode - -import ( - "github.com/go-kratos/kratos/pkg/ecode" -) - -var _ ecode.Codes - -// UserErrCode -var ( - UserNotLogin = ecode.New(123); -) -``` diff --git a/docs/images/alipay.png b/docs/images/alipay.png new file mode 100644 index 000000000..4497460fc Binary files /dev/null and b/docs/images/alipay.png differ diff --git a/docs/img/kratos3.png b/docs/images/kratos.png similarity index 100% rename from docs/img/kratos3.png rename to docs/images/kratos.png diff --git a/docs/img/bm-arch-2-2.png b/docs/img/bm-arch-2-2.png deleted file mode 100644 index ba2163f85..000000000 Binary files a/docs/img/bm-arch-2-2.png and /dev/null differ diff --git a/docs/img/bm-arch-2-3.png b/docs/img/bm-arch-2-3.png deleted file mode 100644 index 4cf0dfde3..000000000 Binary files a/docs/img/bm-arch-2-3.png and /dev/null differ diff --git a/docs/img/bm-handlers.png b/docs/img/bm-handlers.png deleted file mode 100644 index 5d9b5e8c8..000000000 Binary files a/docs/img/bm-handlers.png and /dev/null differ diff --git a/docs/img/kratos-log.jpg b/docs/img/kratos-log.jpg deleted file mode 100644 index b9615297d..000000000 Binary files a/docs/img/kratos-log.jpg and /dev/null differ diff --git a/docs/img/kratos.png b/docs/img/kratos.png deleted file mode 100644 index aa4d1cd91..000000000 Binary files a/docs/img/kratos.png and /dev/null differ diff --git a/docs/img/kratos2.png b/docs/img/kratos2.png deleted file mode 100644 index 612f83b34..000000000 Binary files a/docs/img/kratos2.png and /dev/null differ diff --git a/docs/img/ratelimit-benchmark-up-1.png b/docs/img/ratelimit-benchmark-up-1.png deleted file mode 100644 index 7f8d3a866..000000000 Binary files a/docs/img/ratelimit-benchmark-up-1.png and /dev/null differ diff --git a/docs/img/ratelimit-rolling-window.png b/docs/img/ratelimit-rolling-window.png deleted file mode 100644 index 168937d39..000000000 Binary files a/docs/img/ratelimit-rolling-window.png and /dev/null differ diff --git a/docs/img/zipkin.jpg b/docs/img/zipkin.jpg deleted file mode 100644 index 8fca3794d..000000000 Binary files a/docs/img/zipkin.jpg and /dev/null differ diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index 94fa7b465..000000000 --- a/docs/index.html +++ /dev/null @@ -1,30 +0,0 @@ - - - - - Kratos Documentation - - - - - - -
- - - - - - diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index 4ff3e2057..000000000 --- a/docs/install.md +++ /dev/null @@ -1,66 +0,0 @@ -# 安装 - -1.安装protoc二进制文件 - -``` -下载地址:https://github.com/google/protobuf/releases -mv bin/protoc /usr/local/bin/ -mv -r include/google /usr/local/include/ -``` - -2.安装protobuf库文件 - -``` -go get -u github.com/golang/protobuf/proto -``` - -3.安装goprotobuf插件 - -``` -go get github.com/golang/protobuf/protoc-gen-go -``` - -4.安装gogoprotobuf插件和依赖 - -``` -//gogo -go get github.com/gogo/protobuf/protoc-gen-gogo - -//gofast -go get github.com/gogo/protobuf/protoc-gen-gofast - -//依赖 -go get github.com/gogo/protobuf/proto -go get github.com/gogo/protobuf/gogoproto -``` - -5.安装框架依赖 - -``` -# grpc (或者git clone https://github.com/grpc/grpc-go 然后复制到google.golang.org/grpc) -go get -u google.golang.org/grpc - -# genproto (或者git clone https://github.com/google/go-genproto 然后复制到google.golang.org/genproto) -go get google.golang.org/genproto/... -``` - -6.安装kratos tool - -``` -go get -u github.com/go-kratos/kratos/tool/kratos -cd $GOPATH/src -kratos new kratos-demo --proto -``` - -7.运行 - -``` -cd kratos-demo/cmd -go build -./cmd -conf ../configs -``` - -打开浏览器访问:[http://localhost:8000/kratos-demo/start](http://localhost:8000/kratos-demo/start),你会看到输出了`Golang 大法好 !!!` - -[kratos工具](kratos-tool.md) - diff --git a/docs/kratos-genbts.md b/docs/kratos-genbts.md deleted file mode 100644 index ca3e74430..000000000 --- a/docs/kratos-genbts.md +++ /dev/null @@ -1,27 +0,0 @@ -### kratos tool genbts - -> 缓存回源代码生成 - -在internal/dao/dao.go中添加mc缓存interface定义,可以指定对应的[注解参数](../../tool/kratos-gen-bts/README.md); -并且在接口前面添加`go:generate kratos tool genbts`; -然后在当前目录执行`go generate`,可以看到自动生成的dao.bts.go代码。 - -### 回源模板 -```go -//go:generate kratos tool genbts -type _bts interface { - // bts: -batch=2 -max_group=20 -batch_err=break -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - Demos(c context.Context, keys []int64) (map[int64]*Demo, error) - // bts: -sync=true -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - Demo(c context.Context, key int64) (*Demo, error) - // bts: -paging=true - Demo1(c context.Context, key int64, pn int, ps int) (*Demo, error) - // bts: -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - None(c context.Context) (*Demo, error) -} -``` - -### 参考 - -也可以参考完整的testdata例子:kratos/tool/kratos-gen-bts/testdata - diff --git a/docs/kratos-genmc.md b/docs/kratos-genmc.md deleted file mode 100644 index d83378c4d..000000000 --- a/docs/kratos-genmc.md +++ /dev/null @@ -1,68 +0,0 @@ -### kratos tool genmc - -> 缓存代码生成 - -在internal/dao/dao.go中添加mc缓存interface定义,可以指定对应的[注解参数](../../tool/kratos-gen-mc/README.md); -并且在接口前面添加`go:generate kratos tool genmc`; -然后在当前目录执行`go generate`,可以看到自动生成的mc.cache.go代码。 - -### 缓存模板 -```go -//go:generate kratos tool genmc -type _mc interface { - // mc: -key=demoKey - CacheDemos(c context.Context, keys []int64) (map[int64]*Demo, error) - // mc: -key=demoKey - CacheDemo(c context.Context, key int64) (*Demo, error) - // mc: -key=keyMid - CacheDemo1(c context.Context, key int64, mid int64) (*Demo, error) - // mc: -key=noneKey - CacheNone(c context.Context) (*Demo, error) - // mc: -key=demoKey - CacheString(c context.Context, key int64) (string, error) - - // mc: -key=demoKey -expire=d.demoExpire -encode=json - AddCacheDemos(c context.Context, values map[int64]*Demo) error - // mc: -key=demo2Key -expire=d.demoExpire -encode=json - AddCacheDemos2(c context.Context, values map[int64]*Demo, tp int64) error - // 这里也支持自定义注释 会替换默认的注释 - // mc: -key=demoKey -expire=d.demoExpire -encode=json|gzip - AddCacheDemo(c context.Context, key int64, value *Demo) error - // mc: -key=keyMid -expire=d.demoExpire -encode=gob - AddCacheDemo1(c context.Context, key int64, value *Demo, mid int64) error - // mc: -key=noneKey - AddCacheNone(c context.Context, value *Demo) error - // mc: -key=demoKey -expire=d.demoExpire - AddCacheString(c context.Context, key int64, value string) error - - // mc: -key=demoKey - DelCacheDemos(c context.Context, keys []int64) error - // mc: -key=demoKey - DelCacheDemo(c context.Context, key int64) error - // mc: -key=keyMid - DelCacheDemo1(c context.Context, key int64, mid int64) error - // mc: -key=noneKey - DelCacheNone(c context.Context) error -} - -func demoKey(id int64) string { - return fmt.Sprintf("art_%d", id) -} - -func demo2Key(id, tp int64) string { - return fmt.Sprintf("art_%d_%d", id, tp) -} - -func keyMid(id, mid int64) string { - return fmt.Sprintf("art_%d_%d", id, mid) -} - -func noneKey() string { - return "none" -} -``` - -### 参考 - -也可以参考完整的testdata例子:kratos/tool/kratos-gen-mc/testdata - diff --git a/docs/kratos-protoc.md b/docs/kratos-protoc.md deleted file mode 100644 index 78273fa3e..000000000 --- a/docs/kratos-protoc.md +++ /dev/null @@ -1,28 +0,0 @@ -### kratos tool protoc - -```shell -# generate all -kratos tool protoc api.proto -# generate gRPC -kratos tool protoc --grpc api.proto -# generate BM HTTP -kratos tool protoc --bm api.proto -# generate ecode -kratos tool protoc --ecode api.proto -# generate swagger -kratos tool protoc --swagger api.proto -``` - -执行生成如 `api.pb.go/api.bm.go/api.swagger.json/api.ecode.go` 的对应文件,需要注意的是:`ecode`生成有固定规则,需要首先是`enum`类型,且`enum`名字要以`ErrCode`结尾,如`enum UserErrCode`。详情可见:[example](https://github.com/go-kratos/kratos/tree/master/example/protobuf) - -> 该工具在Windows/Linux下运行,需提前安装好 [protobuf](https://github.com/google/protobuf) 工具 - -`kratos tool protoc`本质上是拼接好了`protoc`命令然后进行执行,在执行时会打印出对应执行的`protoc`命令,如下可见: - -```shell -protoc --proto_path=$GOPATH --proto_path=$GOPATH/github.com/go-kratos/kratos/third_party --proto_path=. --bm_out=:. api.proto -protoc --proto_path=$GOPATH --proto_path=$GOPATH/github.com/go-kratos/kratos/third_party --proto_path=. --gofast_out=plugins=grpc:. api.proto -protoc --proto_path=$GOPATH --proto_path=$GOPATH/github.com/go-kratos/kratos/third_party --proto_path=. --bswagger_out=:. api.proto -protoc --proto_path=$GOPATH --proto_path=$GOPATH/github.com/go-kratos/kratos/third_party --proto_path=. --ecode_out=:. api.proto -``` - diff --git a/docs/kratos-swagger.md b/docs/kratos-swagger.md deleted file mode 100644 index 6d28b3f2d..000000000 --- a/docs/kratos-swagger.md +++ /dev/null @@ -1,8 +0,0 @@ -### kratos tool swagger -```shell -kratos tool swagger serve api/api.swagger.json -``` -执行命令后,浏览器会自动打开swagger文档地址。 -同时也可以查看更多的 [go-swagger](https://github.com/go-swagger/go-swagger) 官方参数进行使用。 - - diff --git a/docs/kratos-tool.md b/docs/kratos-tool.md deleted file mode 100644 index 3bb45a4f2..000000000 --- a/docs/kratos-tool.md +++ /dev/null @@ -1,106 +0,0 @@ -# 介绍 - -kratos包含了一批好用的工具集,比如项目一键生成、基于proto生成http&grpc代码,生成缓存回源代码,生成memcache执行代码,生成swagger文档等。 - -# 获取工具 - -执行以下命令,即可快速安装好`kratos`工具 -```shell -go get -u github.com/go-kratos/kratos/tool/kratos -``` - -那么接下来让我们快速开始熟悉工具的用法~ - -# kratos本体 - -`kratos`是所有工具集的本体,就像`go`一样,拥有执行各种子工具的能力,如`go build`和`go tool`。先让我们看看`-h`的输出: - -``` -NAME: - kratos - kratos tool - -USAGE: - kratos [global options] command [command options] [arguments...] - -VERSION: - 0.0.1 - -COMMANDS: - new, n create new project - build, b kratos build - run, r kratos run - tool, t kratos tool - version, v kratos version - self-upgrade kratos self-upgrade - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS: - --help, -h show help - --version, -v print the version -``` - -可以看到`kratos`有如:`new` `build` `run` `tool`等在内的COMMANDS,那么接下来一一演示如何使用。 - -# kratos new - -`kratos new`是快速创建一个项目的命令,执行如下: - -```shell -kratos new kratos-demo -``` - -即可快速在当前目录生成一个叫`kratos-demo`的项目。此外还支持指定owner和path,如下: - -```shell -kratos new kratos-demo -o YourName -d YourPath -``` - -注意,`kratos new`默认会生成通过 protobuf 定义的`grpc`和`bm`示例代码的,如只生成bm请加`--http`,如下: - -```shell -kratos new kratos-demo -o YourName -d YourPath --http -``` - -如只生成grpc请加`--grpc`,如下: - -```shell -kratos new kratos-demo -o YourName -d YourPath --grpc -``` - -> 特别注意,如果不是MacOS系统,需要自己进行手动安装protoc,用于生成的示例项目`api`目录下的`proto`文件并不会自动生成对应的`.pb.go`和`.bm.go`文件。 - -> 也可以参考以下说明进行生成:[protoc说明](protoc.md) - -# kratos build & run - -`kratos build`和`kratos run`是`go build`和`go run`的封装,可以在当前项目任意目录进行快速运行进行调试,并无特别用途。 - -# kratos tool - -`kratos tool`是基于proto生成http&grpc代码,生成缓存回源代码,生成memcache执行代码,生成swagger文档等工具集,先看下的执行效果: - -``` -kratos tool - -protoc(已安装): 快速方便生成pb.go的protoc封装,windows、Linux请先安装protoc工具 Author(kratos) [2019/10/31] -genbts(已安装): 缓存回源逻辑代码生成器 Author(kratos) [2019/10/31] -testcli(已安装): 测试代码生成 Author(kratos) [2019/09/09] -genmc(已安装): mc缓存代码生成 Author(kratos) [2019/07/23] -swagger(已安装): swagger api文档 Author(goswagger.io) [2019/05/05] - -安装工具: kratos tool install demo -执行工具: kratos tool demo -安装全部工具: kratos tool install all -全部升级: kratos tool upgrade all -``` - -> 小小说明:如未安装工具,第一次运行也可自动安装,不需要特别执行install - -目前已经集成的工具有: - -* [kratos](kratos-tool.md) 为本体工具,只用于安装更新使用; -* [protoc](kratos-protoc.md) 用于快速生成gRPC、HTTP、Swagger文件,该命令Windows,Linux用户需要手动安装 protobuf 工具; -* [swagger](kratos-swagger.md) 用于显示自动生成的HTTP API接口文档,通过 `kratos tool swagger serve api/api.swagger.json` 可以查看文档; -* [genmc](kratos-genmc.md) 用于自动生成memcached缓存代码; -* [genbts](kratos-genbts.md) 用于生成缓存回源代码生成,如果miss则调用回源函数从数据源获取,然后塞入缓存; - diff --git a/docs/log-agent.md b/docs/log-agent.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/logger.md b/docs/logger.md deleted file mode 100644 index 6c30bb1c1..000000000 --- a/docs/logger.md +++ /dev/null @@ -1,34 +0,0 @@ -# 日志基础库 - -## 概览 -基于[zap](https://github.com/uber-go/zap)的field方式实现的高性能log库,提供Info、Warn、Error日志级别; -并提供了context支持,方便打印环境信息以及日志的链路追踪,在框架中都通过field方式实现,避免format日志带来的性能消耗。 - -## 配置选项 - -| flag | env | type | remark | -|:----------|:----------|:-------------:|:------| -| log.v | LOG_V | int | 日志级别:DEBUG:0 INFO:1 WARN:2 ERROR:3 FATAL:4 | -| log.stdout | LOG_STDOUT | bool | 是否标准输出:true、false| -| log.dir | LOG_DIR | string | 日志文件目录,如果配置会输出日志到文件,否则不输出日志文件 | -| log.agent | LOG_AGENT | string | 日志采集agent:unixpacket:///var/run/lancer/collector_tcp.sock?timeout=100ms&chan=1024 | -| log.module | LOG_MODULE | string | 指定field信息 format: file=1,file2=2. | -| log.filter | LOG_FILTER | string | 过虑敏感信息 format: field1,field2. | - -## 使用方式 -```go -func main() { - // 解析flag - flag.Parse() - // 初始化日志模块 - log.Init(nil) - // 打印日志 - log.Info("hi:%s", "kratos") - log.Infoc(Context.TODO(), "hi:%s", "kratos") - log.Infov(Context.TODO(), log.KVInt("key1", 100), log.KVString("key2", "test value") -} -``` - -## 扩展阅读 -* [log-agent](log-agent.md) - diff --git a/docs/protoc.md b/docs/protoc.md deleted file mode 100644 index 4676154c0..000000000 --- a/docs/protoc.md +++ /dev/null @@ -1,26 +0,0 @@ -# protoc - -`protobuf`是Google官方出品的一种轻便高效的结构化数据存储格式,可以用于结构化数据串行化,或者说序列化。它很适合做数据存储或 RPC 数据交换格式。可用于通讯协议、数据存储等领域的语言无关、平台无关、可扩展的序列化结构数据格式。 - -使用`protobuf`,需要先书写`.proto`文件,然后编译该文件。编译`proto`文件则需要使用到官方的`protoc`工具,安装文档请参看:[google官方protoc工具](https://github.com/protocolbuffers/protobuf#protocol-compiler-installation)。 - -注意:`protoc`是用于编辑`proto`文件的工具,它并不具备生成对应语言代码的能力,所以正常都是`protoc`配合对应语言的代码生成工具来使用,如Go语言的[gogo protobuf](https://github.com/gogo/protobuf),请先点击按文档说明安装。 - -安装好对应工具后,我们可以进入`api`目录,执行如下命令: - -```shell -export $KRATOS_HOME = kratos路径 -export $KRATOS_DEMO = 项目路径 - -// 生成:api.pb.go -protoc -I$GOPATH/src:$KRATOS_HOME/tool/protobuf/pkg/extensions:$KRATOS_DEMO/api --gogofast_out=plugins=grpc:$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto - -// 生成:api.bm.go -protoc -I$GOPATH/src:$KRATOS_HOME/tool/protobuf/pkg/extensions:$KRATOS_DEMO/api --bm_out=$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto - -// 生成:api.swagger.json -protoc -I$GOPATH/src:$KRATOS_HOME/tool/protobuf/pkg/extensions:$KRATOS_DEMO/api --bswagger_out=$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto -``` - -请注意替换`/Users/felix/work/go/src`目录为你本地开发环境对应GOPATH目录,其中`--gogofast_out`意味着告诉`protoc`工具需要使用`gogo protobuf`的工具生成代码。 - diff --git a/docs/quickstart.md b/docs/quickstart.md deleted file mode 100644 index 0cf5c0b4d..000000000 --- a/docs/quickstart.md +++ /dev/null @@ -1,71 +0,0 @@ -# 快速开始 - -**在安装之前,请确认您的开发环境中正确安装了[golang](https://golang.org/), [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git), 和[protoc](https://grpc.io/docs/protoc-installation/)** - -创建kratos项目,可以使用`kratos`工具,如下: - -```shell -go get -u github.com/go-kratos/kratos/tool/kratos -cd $GOPATH/src -kratos new kratos-demo -``` - -根据提示可以快速创建项目,如[kratos-demo](https://github.com/go-kratos/kratos-demo)就是通过工具创建生成。目录结构如下: - -``` -├── CHANGELOG.md -├── OWNERS -├── README.md -├── api # api目录为对外保留的proto文件及生成的pb.go文件 -│   ├── api.bm.go -│   ├── api.pb.go # 通过go generate生成的pb.go文件 -│   ├── api.proto -│   └── client.go -├── cmd -│   └── main.go # cmd目录为main所在 -├── configs # configs为配置文件目录 -│   ├── application.toml # 应用的自定义配置文件,可能是一些业务开关如:useABtest = true -│   ├── db.toml # db相关配置 -│   ├── grpc.toml # grpc相关配置 -│   ├── http.toml # http相关配置 -│   ├── memcache.toml # memcache相关配置 -│   └── redis.toml # redis相关配置 -├── go.mod -├── go.sum -└── internal # internal为项目内部包,包括以下目录: -│   ├── dao # dao层,用于数据库、cache、MQ、依赖某业务grpc|http等资源访问 -│   │   ├── dao.bts.go -│   │   ├── dao.go -│   │   ├── db.go -│   │   ├── mc.cache.go -│   │   ├── mc.go -│   │   └── redis.go -│   ├── di # 依赖注入层 采用wire静态分析依赖 -│   │   ├── app.go -│   │   ├── wire.go # wire 声明 -│   │   └── wire_gen.go # go generate 生成的代码 -│   ├── model # model层,用于声明业务结构体 -│   │   └── model.go -│   ├── server # server层,用于初始化grpc和http server -│   │   ├── grpc # grpc层,用于初始化grpc server和定义method -│   │   │   └── server.go -│   │   └── http # http层,用于初始化http server和声明handler -│   │   └── server.go -│   └── service # service层,用于业务逻辑处理,且为方便http和grpc共用方法,建议入参和出参保持grpc风格,且使用pb文件生成代码 -│   └── service.go -└── test # 测试资源层 用于存放测试相关资源数据 如docker-compose配置 数据库初始化语句等 - └── docker-compose.yaml -``` - -生成后可直接运行如下: - -```shell -cd kratos-demo/cmd -go build -./cmd -conf ../configs -``` - -打开浏览器访问:[http://localhost:8000/kratos-demo/start](http://localhost:8000/kratos-demo/start),你会看到输出了`Golang 大法好 !!!` - -[kratos工具](kratos-tool.md) - diff --git a/docs/ratelimit.md b/docs/ratelimit.md deleted file mode 100644 index 17f58c6aa..000000000 --- a/docs/ratelimit.md +++ /dev/null @@ -1,57 +0,0 @@ -# 自适应限流保护 - -kratos 借鉴了 Sentinel 项目的自适应限流系统,通过综合分析服务的 cpu 使用率、请求成功的 qps 和请求成功的 rt 来做自适应限流保护。 - - -## 核心目标 - -* 自动嗅探负载和 qps,减少人工配置 -* 削顶,保证超载时系统不被拖垮,并能以高水位 qps 继续运行 - - -## 限流规则 - -### 指标介绍 - -| 指标名称 | 指标含义 | -| -------- | ------------------------------------------------------------- | -| cpu | 最近 1s 的 CPU 使用率均值,使用滑动平均计算,采样周期是 250ms | -| inflight | 当前处理中正在处理的请求数量 | -| pass | 请求处理成功的量 | -| rt | 请求成功的响应耗时 | - - -### 滑动窗口 - -在自适应限流保护中,采集到的指标的时效性非常强,系统只需要采集最近一小段时间内的 qps、rt 即可,对于较老的数据,会自动丢弃。为了实现这个效果,kratos 使用了滑动窗口来保存采样数据。 - -![ratelimit-rolling-window](img/ratelimit-rolling-window.png) - -如上图,展示了一个具有两个桶(bucket)的滑动窗口(rolling window)。整个滑动窗口用来保存最近 1s 的采样数据,每个小的桶用来保存 500ms 的采样数据。 -当时间流动之后,过期的桶会自动被新桶的数据覆盖掉,在图中,在 1000-1500ms 时,bucket 1 的数据因为过期而被丢弃,之后 bucket 3 的数据填到了窗口的头部。 - - -### 限流公式 - -判断是否丢弃当前请求的算法如下: - -`cpu > 800 AND (Now - PrevDrop) < 1s AND (MaxPass * MinRt * windows / 1000) < InFlight` - -MaxPass 表示最近 5s 内,单个采样窗口中最大的请求数。 -MinRt 表示最近 5s 内,单个采样窗口中最小的响应时间。 -windows 表示一秒内采样窗口的数量,默认配置中是 5s 50 个采样,那么 windows 的值为 10。 - -## 压测报告 - -场景1,请求以每秒增加1个的速度不停上升,压测效果如下: - -![ratelimit-benchmark-up-1](img/ratelimit-benchmark-up-1.png) - -左测是没有限流的压测效果,右侧是带限流的压测效果。 -可以看到,没有限流的场景里,系统在 700qps 时开始抖动,在 1k qps 时被拖垮,几乎没有新的请求能被放行,然而在使用限流之后,系统请求能够稳定在 600 qps 左右,rt 没有暴增,服务也没有被打垮,可见,限流有效的保护了服务。 - - -## 参考资料 - -[Sentinel 系统自适应限流](https://github.com/alibaba/Sentinel/wiki/%E7%B3%BB%E7%BB%9F%E8%87%AA%E9%80%82%E5%BA%94%E9%99%90%E6%B5%81) - diff --git a/docs/trace.md b/docs/trace.md deleted file mode 100644 index 34cd5e4a9..000000000 --- a/docs/trace.md +++ /dev/null @@ -1,42 +0,0 @@ -# 背景 - -当代的互联网的服务,通常都是用复杂的、大规模分布式集群来实现的。互联网应用构建在不同的软件模块集上,这些软件模块,有可能是由不同的团队开发、可能使用不同的编程语言来实现、有可能布在了几千台服务器,横跨多个不同的数据中心。因此,就需要一些可以帮助理解系统行为、用于分析性能问题的工具。 - -# 概览 - -* kratos内部的trace基于opentracing语义 -* 使用protobuf协议描述trace结构 -* 全链路支持(gRPC/HTTP/MySQL/Redis/Memcached等) - -## 参考文档 - -[opentracing](https://github.com/opentracing-contrib/opentracing-specification-zh/blob/master/specification.md) -[dapper](https://bigbully.github.io/Dapper-translation/) - -# 使用 - -kratos本身不提供整套`trace`数据方案,但在`net/trace/report.go`内声明了`repoter`接口,可以简单的集成现有开源系统,比如:`zipkin`和`jaeger`。 - -### zipkin使用 - -可以看[zipkin](https://github.com/go-kratos/kratos/tree/master/pkg/net/trace/zipkin)的协议上报实现,具体使用方式如下: - -1. 前提是需要有一套自己搭建的`zipkin`集群 -2. 在业务代码的`main`函数内进行初始化,代码如下: - -```go -// 忽略其他代码 -import "github.com/go-kratos/kratos/pkg/net/trace/zipkin" -// 忽略其他代码 -func main(){ - // 忽略其他代码 - zipkin.Init(&zipkin.Config{ - Endpoint: "http://localhost:9411/api/v2/spans", - }) - // 忽略其他代码 -} -``` - -### zipkin效果图 - -![zipkin](img/zipkin.jpg) diff --git a/docs/ut-support.md b/docs/ut-support.md deleted file mode 100644 index 789d25f48..000000000 --- a/docs/ut-support.md +++ /dev/null @@ -1,500 +0,0 @@ -## 单元测试辅助工具 -在单元测试中,我们希望每个测试用例都是独立的。这时候就需要Stub, Mock, Fakes等工具来帮助我们进行用例和依赖之间的隔离。 - -同时通过对错误情况的 Mock 也可以帮我们检查代码多个分支结果,从而提高覆盖率。 - -以下工具已加入到 Kratos 框架 go modules,可以借助 testgen 代码生成器自动生成部分工具代码,请放心食用。更多使用方法还欢迎大家多多探索。 - -### GoConvey -GoConvey是一套针对golang语言的BDD类型的测试框架。提供了良好的管理和执行测试用例的方式,包含丰富的断言函数,而且同时有测试执行和报告Web界面的支持。 - -#### 使用特性 -为了更好的使用 GoConvey 来编写和组织测试用例,需要注意以下几点特性: - -1. Convey方法和So方法的使用 -> - Convey方法声明了一种规格的组织,每个组织内包含一句描述和一个方法。在方法内也可以嵌套其他Convey语句和So语句。 -```Go -// 顶层Convey方法,需引入*testing.T对象 -Convey(description string, t *testing.T, action func()) - -// 其他嵌套Convey方法,无需引入*testing.T对象 -Convey(description string, action func()) -``` -注:同一Scope下的Convey语句描述不可以相同! -> - So方法是断言方法,用于对执行结果进行比对。GoConvey官方提供了大量断言,同时也可以自定义自己的断言([戳这里了解官方文档](https://github.com/smartystreets/goconvey/wiki/Assertions)) -```Go -// A=B断言 -So(A, ShouldEqual, B) - -// A不为空断言 -So(A, ShouldNotBeNil) -``` - -2. 执行次序 -> 假设有以下Convey伪代码,执行次序将为A1B2A1C3。将Convey方法类比树的结点的话,整体执行类似树的遍历操作。 -> 所以Convey A部分可在组织测试用例时,充当“Setup”的方法。用于初始化等一些操作。 -```Go -Convey伪代码 -Convey A - So 1 - Convey B - So 2 - Convey C - So 3 -``` - -3. Reset方法 -> GoConvey提供了Reset方法来进行“Teardown”的操作。用于执行完测试用例后一些状态的回收,连接关闭等操作。Reset方法不可与顶层Convey语句在同层。 -```Go -// Reset -Reset func(action func()) -``` -假设有以下带有Reset方法的伪代码,同层Convey语句执行完后均会执行同层的Reset方法。执行次序为A1B2C3EA1D4E。 -```Go -Convey A - So 1 - Convey B - So 2 - Convey C - So 3 - Convey D - So 4 - Reset E -``` - -4. 自然语言逻辑到测试用例的转换 -> 在了解了Convey方法的特性和执行次序后,我们可以通过这些性质把对一个方法的测试用例按照日常逻辑组织起来。尤其建议使用Given-When-Then的形式来组织 -> - 比较直观的组织示例 -```Go -Convey("Top-level", t, func() { - - // Setup 工作,在本层内每个Convey方法执行前都会执行的部分: - db.Open() - db.Initialize() - - Convey("Test a query", func() { - db.Query() - // TODO: assertions here - }) - - Convey("Test inserts", func() { - db.Insert() - // TODO: assertions here - }) - - Reset(func() { - // Teardown工作,在本层内每个Convey方法执行完后都会执行的部分: - db.Close() - }) - -}) -``` -> - 定义单独的包含Setup和Teardown的帮助方法 -```Go -package main - -import ( - "database/sql" - "testing" - - _ "github.com/lib/pq" - . "github.com/smartystreets/goconvey/convey" -) - -// 帮助方法,将原先所需的处理方法以参数(f)形式传入 -func WithTransaction(db *sql.DB, f func(tx *sql.Tx)) func() { - return func() { - // Setup工作 - tx, err := db.Begin() - So(err, ShouldBeNil) - - Reset(func() { - // Teardown工作 - /* Verify that the transaction is alive by executing a command */ - _, err := tx.Exec("SELECT 1") - So(err, ShouldBeNil) - - tx.Rollback() - }) - - // 调用传入的闭包做实际的事务处理 - f(tx) - } -} - -func TestUsers(t *testing.T) { - db, err := sql.Open("postgres", "postgres://localhost?sslmode=disable") - if err != nil { - panic(err) - } - - Convey("Given a user in the database", t, WithTransaction(db, func(tx *sql.Tx) { - _, err := tx.Exec(`INSERT INTO "Users" ("id", "name") VALUES (1, 'Test User')`) - So(err, ShouldBeNil) - - Convey("Attempting to retrieve the user should return the user", func() { - var name string - - data := tx.QueryRow(`SELECT "name" FROM "Users" WHERE "id" = 1`) - err = data.Scan(&name) - - So(err, ShouldBeNil) - So(name, ShouldEqual, "Test User") - }) - })) -} -``` - -#### 使用建议 -强烈建议使用 [testgen](ut-testgen.md) 进行测试用例的生成,生成后每个方法将包含一个符合以下规范的正向用例。 - -用例规范: -1. 每个方法至少包含一个测试方法(命名为Test[PackageName][FunctionName]) -2. 每个测试方法包含一个顶层Convey语句,仅在此引入admin *testing.T类型的对象,在该层进行变量声明。 -3. 每个测试方法不同的用例用Convey方法组织 -4. 每个测试用例的一组断言用一个Convey方法组织 -5. 使用convey.C保持上下文一致 - -### MonkeyPatching - -#### 特性和使用条件 -1. Patch()对任何无接收者的方法均有效 -2. PatchInstanceMethod()对有接收者的包内/私有方法无法工作(因使用到了反射机制)。可以采用给私有方法的下一级打补丁,或改为无接收者的方法,或将方法转为公有 - -#### 适用场景(建议) -项目代码中上层对下层包依赖时,下层包方法Mock(例如service层对dao层方法依赖时) -基础库(MySql, Memcache, Redis)错误Mock -其他标准库,基础库以及第三方包方法Mock - -#### 使用示例 -1. 上层包对下层包依赖示例 -Service层对Dao层依赖: - -```GO -// 原方法 -func (s *Service) realnameAlipayApply(c context.Context, mid int64) (info *model.RealnameAlipayApply, err error) { - if info, err = s.mbDao.RealnameAlipayApply(c, mid); err != nil { - return - } - ... - return -} - -// 测试方法 -func TestServicerealnameAlipayApply(t *testing.T) { - convey.Convey("realnameAlipayApply", t, func(ctx convey.C) { - ... - ctx.Convey("When everything goes positive", func(ctx convey.C) { - guard := monkey.PatchInstanceMethod(reflect.TypeOf(s.mbDao), "RealnameAlipayApply", func(_ *dao.Dao, _ context.Context, _ int64) (*model.RealnameAlipayApply, error) { - return nil, nil - }) - defer guard.Unpatch() - info, err := s.realnameAlipayApply(c, mid) - ctx.Convey("Then err should be nil,info should not be nil", func(ctx convey.C) { - ctx.So(info, convey.ShouldNotBeNil) - ctx.So(err, convey.ShouldBeNil) - }) - }) - }) -} -``` -2. 基础库错误Mock示例 - -```Go -// 原方法(部分) -func (d *Dao) BaseInfoCache(c context.Context, mid int64) (info *model.BaseInfo, err error) { - ... - conn := d.mc.Get(c) - defer conn.Close() - item, err := conn.Get(key) - if err != nil { - log.Error("conn.Get(%s) error(%v)", key, err) - return - } - ... - return -} - -// 测试方法(错误Mock部分) -func TestDaoBaseInfoCache(t *testing.T) { - convey.Convey("BaseInfoCache", t, func(ctx convey.C) { - ... - Convey("When conn.Get gets error", func(ctx convey.C) { - guard := monkey.PatchInstanceMethod(reflect.TypeOf(d.mc), "Get", func(_ *memcache.Pool, _ context.Context) memcache.Conn { - return memcache.MockWith(memcache.ErrItemObject) - }) - defer guard.Unpatch() - _, err := d.BaseInfoCache(c, mid) - ctx.Convey("Error should be equal to memcache.ErrItemObject", func(ctx convey.C) { - ctx.So(err, convey.ShouldEqual, memcache.ErrItemObject) - }) - }) - }) -} -``` - -#### 注意事项 -- Monkey非线程安全 -- Monkey无法针对Inline方法打补丁,在测试时可以使用go test -gcflags=-l来关闭inline编译的模式(一些简单的go inline介绍戳这里) -- Monkey在一些面向安全不允许内存页写和执行同时进行的操作系统上无法工作 -- 更多详情请戳:https://github.com/bouk/monkey - - - -### Gock——HTTP请求Mock工具 - -#### 特性和使用条件 - -#### 工作原理 -1. 截获任意通过 http.DefaultTransport或者自定义http.Transport对外的http.Client请求 -2. 以“先进先出”原则将对外需求和预定义好的HTTP Mock池中进行匹配 -3. 如果至少一个Mock被匹配,将按照2中顺序原则组成Mock的HTTP返回 -4. 如果没有Mock被匹配,若实际的网络可用,将进行实际的HTTP请求。否则将返回错误 - -#### 特性 -- 内建帮助工具实现JSON/XML简单Mock -- 支持持久的、易失的和TTL限制的Mock -- 支持HTTP Mock请求完整的正则表达式匹配 -- 可通过HTTP方法,URL参数,请求头和请求体匹配 -- 可扩展和可插件化的HTTP匹配规则 -- 具备在Mock和实际网络模式之间切换的能力 -- 具备过滤和映射HTTP请求到正确的Mock匹配的能力 -- 支持映射和过滤可以更简单的掌控Mock -- 通过使用http.RoundTripper接口广泛兼容HTTP拦截器 -- 可以在任意net/http兼容的Client上工作 -- 网络延迟模拟(beta版本) -- 无其他依赖 - -#### 适用场景(建议) -任何需要进行HTTP请求的操作,建议全部用Gock进行Mock,以减少对环境的依赖。 - -使用示例: -1. net/http 标准库 HTTP 请求Mock - -```Go -import gock "gopkg.in/h2non/gock.v1" - -// 原方法 - func (d *Dao) Upload(c context.Context, fileName, fileType string, expire int64, body io.Reader) (location string, err error) { - ... - resp, err = d.bfsClient.Do(req) //d.bfsClient类型为*http.client - ... - if resp.StatusCode != http.StatusOK { - ... - } - header = resp.Header - code = header.Get("Code") - if code != strconv.Itoa(http.StatusOK) { - ... - } - ... - return -} - -// 测试方法 -func TestDaoUpload(t *testing.T) { - convey.Convey("Upload", t, func(ctx convey.C) { - ... - // d.client 类型为 *http.client 根据Gock包描述需要设置http.Client的Transport情况。也可在TestMain中全局设置,则所有的HTTP请求均通过Gock来解决 - d.client.Transport = gock.DefaultTransport // !注意:进行httpMock前需要对http 请求进行拦截,否则Mock失败 - // HTTP请求状态和Header都正确的Mock - ctx.Convey("When everything is correct", func(ctx convey.C) { - httpMock("PUT", url).Reply(200).SetHeaders(map[string]string{ - "Code": "200", - "Location": "SomePlace", - }) - location, err := d.Upload(c, fileName, fileType, expire, body) - ctx.Convey("Then err should be nil.location should not be nil.", func(ctx convey.C) { - ctx.So(err, convey.ShouldBeNil) - ctx.So(location, convey.ShouldNotBeNil) - }) - }) - ... - // HTTP请求状态错误Mock - ctx.Convey("When http request status != 200", func(ctx convey.C) { - d.client.Transport = gock.DefaultTransport - httpMock("PUT", url).Reply(404) - _, err := d.Upload(c, fileName, fileType, expire, body) - ctx.Convey("Then err should not be nil", func(ctx convey.C) { - ctx.So(err, convey.ShouldNotBeNil) - }) - }) - // HTTP请求Header中Code值错误Mock - ctx.Convey("When http request Code in header != 200", func(ctx convey.C) { - d.client.Transport = gock.DefaultTransport - httpMock("PUT", url).Reply(404).SetHeaders(map[string]string{ - "Code": "404", - "Location": "SomePlace", - }) - _, err := d.Upload(c, fileName, fileType, expire, body) - ctx.Convey("Then err should not be nil", func(ctx convey.C) { - ctx.So(err, convey.ShouldNotBeNil) - }) - }) - - // 由于同包内有其他进行实际HTTP请求的测试。所以再每次用例结束后,进行现场恢复(关闭Gock设置默认的Transport) - ctx.Reset(func() { - gock.OffAll() - d.client.Transport = http.DefaultClient.Transport - }) - - - }) -} - -func httpMock(method, url string) *gock.Request { - r := gock.New(url) - r.Method = strings.ToUpper(method) - return r -} -``` -2. blademaster库HTTP请求Mock - -```Go -// 原方法 -func (d *Dao) SendWechatToGroup(c context.Context, chatid, msg string) (err error) { - ... - if err = d.client.Do(c, req, &res); err != nil { - ... - } - if res.Code != 0 { - ... - } - return -} - -// 测试方法 -func TestDaoSendWechatToGroup(t *testing.T) { - convey.Convey("SendWechatToGroup", t, func(ctx convey.C) { - ... - // 根据Gock包描述需要设置bm.Client的Transport情况。也可在TestMain中全局设置,则所有的HTTP请求均通过Gock来解决。 - // d.client 类型为 *bm.client - d.client.SetTransport(gock.DefaultTransport) // !注意:进行httpMock前需要对http 请求进行拦截,否则Mock失败 - // HTTP请求状态和返回内容正常Mock - ctx.Convey("When everything gose postive", func(ctx convey.C) { - httpMock("POST", _sagaWechatURL+"/appchat/send").Reply(200).JSON(`{"code":0,"message":"0"}`) - err := d.SendWechatToGroup(c, d.c.WeChat.ChatID, msg) - ... - }) - // HTTP请求状态错误Mock - ctx.Convey("When http status != 200", func(ctx convey.C) { - httpMock("POST", _sagaWechatURL+"/appchat/send").Reply(404) - err := d.SendWechatToGroup(c, d.c.WeChat.ChatID, msg) - ... - }) - // HTTP请求返回值错误Mock - ctx.Convey("When http response code != 0", func(ctx convey.C) { - httpMock("POST", _sagaWechatURL+"/appchat/send").Reply(200).JSON(`{"code":-401,"message":"0"}`) - err := d.SendWechatToGroup(c, d.c.WeChat.ChatID, msg) - ... - }) - // 由于同包内有其他进行实际HTTP请求的测试。所以再每次用例结束后,进行现场恢复(关闭Gock设置默认的Transport)。 - ctx.Reset(func() { - gock.OffAll() - d.client.SetTransport(http.DefaultClient.Transport) - }) - }) -} - -func httpMock(method, url string) *gock.Request { - r := gock.New(url) - r.Method = strings.ToUpper(method) - return r -} -``` - -#### 注意事项 -- Gock不是完全线程安全的 -- 如果执行并发代码,在配置Gock和解释定制的HTTP clients时,要确保Mock已经事先声明好了来避免不需要的竞争机制 -- 更多详情请戳:https://github.com/h2non/gock - - -### GoMock - -#### 使用条件 -只能对公有接口(interface)定义的代码进行Mock,并仅能在测试过程中进行 - -#### 使用方法 -- 官方安装使用步骤 -```shell -## 获取GoMock包和自动生成Mock代码工具mockgen -go get github.com/golang/mock/gomock -go install github.com/golang/mock/mockgen - -## 生成mock文件 -## 方法1:生成对应文件下所有interface -mockgen -source=path/to/your/interface/file.go - -## 方法2:生成对应包内指定多个interface,并用逗号隔开 -mockgen database/sql/driver Conn,Driver - -## 示例: -mockgen -destination=$GOPATH/kratos/app/xxx/dao/dao_mock.go -package=dao kratos/app/xxx/dao DaoInterface -``` -- testgen 使用步骤(GoMock生成功能已集成在Creater工具中,无需额外安装步骤即可直接使用) -```shell -## 直接给出含有接口类型定义的包路径,生成Mock文件将放在包目录下一级mock/pkgName_mock.go中 -./creater --m mock absolute/path/to/your/pkg -``` -- 测试代码内使用方法 - -```Go -// 测试用例内直接使用 -// 需引入的包 -import ( - ... - "github.com/otokaze/mock/gomock" - ... -) - -func TestPkgFoo(t *testing.T) { - convey.Convey("Foo", t, func(ctx convey.C) { - ... - ctx.Convey("Mock Interface to test", func(ctx convey.C) { - // 1. 使用gomock.NewController新增一个控制器 - mockCtrl := gomock.NewController(t) - // 2. 测试完成后关闭控制器 - defer mockCtrl.Finish() - // 3. 以控制器为参数生成Mock对象 - yourMock := mock.NewMockYourClient(mockCtrl) - // 4. 使用Mock对象替代原代码中的对象 - yourClient = yourMock - // 5. 使用EXPECT().方法名(方法参数).Return(返回值)来构造所需输入/输出 - yourMock.EXPECT().YourMethod(gomock.Any()).Return(nil) - res:= Foo(params) - ... - }) - ... - }) -} - -// 可以利用Convey执行顺序方式适当调整以简化代码 -func TestPkgFoo(t *testing.T) { - convey.Convey("Foo", t, func(ctx convey.C) { - ... - mockCtrl := gomock.NewController(t) - yourMock := mock.NewMockYourClient(mockCtrl) - ctx.Convey("Mock Interface to test1", func(ctx convey.C) { - yourMock.EXPECT().YourMethod(gomock.Any()).Return(nil) - ... - }) - ctx.Convey("Mock Interface to test2", func(ctx convey.C) { - yourMock.EXPECT().YourMethod(args).Return(res) - ... - }) - ... - ctx.Reset(func(){ - mockCtrl.Finish() - }) - }) -} -``` - -#### 适用场景(建议) -1. gRPC中的Client接口 -2. 也可改造现有代码构造Interface后使用(具体可配合Creater的功能进行Interface和Mock的生成) -3. 任何对接口中定义方法依赖的场景 - -#### 注意事项 -- 如有Mock文件在包内,在执行单元测试时Mock代码会被识别进行测试。请注意Mock文件的放置。 -- 更多详情请戳:https://github.com/golang/mock \ No newline at end of file diff --git a/docs/ut-testcli.md b/docs/ut-testcli.md deleted file mode 100644 index 65490e110..000000000 --- a/docs/ut-testcli.md +++ /dev/null @@ -1,152 +0,0 @@ -## testcli UT运行环境构建工具 -基于 docker-compose 实现跨平台跨语言环境的容器依赖管理方案,以解决运行ut场景下的 (mysql, redis, mc)容器依赖问题。 - -*这个是testing/lich的二进制工具版本(Go请直接使用库版本:github.com/go-kratos/kratos/pkg/testing/lich)* - -### 功能和特性 -- 自动读取 test 目录下的 yaml 并启动依赖 -- 自动导入 test 目录下的 DB 初始化 SQL -- 提供特定容器内的 healthcheck (mysql, mc, redis) -- 提供一站式解决 UT 服务依赖的工具版本 (testcli) - -### 编译安装 -*使用本工具/库需要前置安装好 docker & docker-compose@v1.24.1^* - -#### Method 1. With go get -```shell -go get -u github.com/go-kratos/kratos/tool/testcli -$GOPATH/bin/testcli -h -``` -#### Method 2. Build with Go -```shell -cd github.com/go-kratos/kratos/tool/testcli -go build -o $GOPATH/bin/testcli -$GOPATH/bin/testcli -h -``` -#### Method 3. Import with Kratos pkg -```Go -import "github.com/go-kratos/kratos/pkg/testing/lich" -``` - -### 构建数据 -#### Step 1. create docker-compose.yml -创建依赖服务的 docker-compose.yml,并把它放在项目路径下的 test 文件夹下面。例如: -```shell -mkdir -p $YOUR_PROJECT/test -``` -```yaml -version: "3.7" - -services: - db: - image: mysql:5.6 - ports: - - 3306:3306 - environment: - - MYSQL_ROOT_PASSWORD=root - volumes: - - .:/docker-entrypoint-initdb.d - command: [ - '--character-set-server=utf8', - '--collation-server=utf8_unicode_ci' - ] - - redis: - image: redis - ports: - - 6379:6379 -``` -一般来讲,我们推荐在项目根目录创建 test 目录,里面存放描述服务的yml,以及需要初始化的数据(database.sql等)。 - -同时也需要注意,正确的对容器内服务进行健康检测,testcli会在容器的health状态执行UT,其实我们也内置了针对几个较为通用镜像(mysql mariadb mc redis)的健康检测,也就是不写也没事(^^;; - -#### Step 2. export database.sql -构造初始化的数据(database.sql等),当然也把它也在 test 文件夹里。 -```sql -CREATE DATABASE IF NOT EXISTS `YOUR_DATABASE_NAME`; - -SET NAMES 'utf8'; -USE `YOUR_DATABASE_NAME`; - -CREATE TABLE IF NOT EXISTS `YOUR_TABLE_NAME` ( - `id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键', - PRIMARY KEY (`id`), -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='YOUR_TABLE_NAME'; -``` -这里需要注意,在创建库/表的时候尽量加上 IF NOT EXISTS,以给予一定程度的容错,以及 SET NAMES 'utf8'; 用于解决客户端连接乱码问题。 - -#### Step 3. change your project mysql config -```toml -[mysql] - addr = "127.0.0.1:3306" - dsn = "root:root@tcp(127.0.0.1:3306)/YOUR_DATABASE?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - active = 20 - idle = 10 - idleTimeout ="1s" - queryTimeout = "1s" - execTimeout = "1s" - tranTimeout = "1s" -``` -在 *Step 1* 我们已经指定了服务对外暴露的端口为3306(这当然也可以是你指定的任何值),那理所应当的我们也要修改项目连接数据库的配置~ - -Great! 至此你已经完成了运行所需要用到的数据配置,接下来就来运行它。 - -### 运行 -开头也说过本工具支持两种运行方式:testcli 二进制工具版本和 go package 源码包,业务方可以根据需求场景进行选择。 -#### Method 1. With testcli tool -*已支持的 flag: -f,--nodown,down,run* -- -f,指定 docker-compose.yaml 文件路径,默认为当前目录下。 -- --nodown,指定是否在UT执行完成后保留容器,以供下次复用。 -- down,teardown 销毁当前项目下这个 compose 文件产生的容器。 -- run,运行你当前语言的单测执行命令(如:golang为 go test -v ./) - -example: -```shell -testcli -f ../../test/docker-compose.yaml run go test -v ./ -``` -#### Method 2. Import with Kratos pkg -- Step1. 在 Dao|Service 层中的 TestMain 单测主入口中,import "github.com/go-kratos/kratos/pkg/testing/lich" 引入testcli工具的go库版本。 -- Step2. 使用 flag.Set("f", "../../test/docker-compose.yaml") 指定 docker-compose.yaml 文件的路径。 -- Step3. 在 flag.Parse() 后即可使用 lich.Setup() 安装依赖&初始化数据(注意测试用例执行结束后 lich.Teardown() 回收下~) -- Step4. 运行 `go test -v ./ `看看效果吧~ - -example: -```Go -package dao - - -import ( - "flag" - "os" - "strings" - "testing" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/testing/lich" - ) - -var ( - d *Dao -) - -func TestMain(m *testing.M) { - flag.Set("conf", "../../configs") - flag.Set("f", "../../test/docker-compose.yaml") - flag.Parse() - if err := paladin.Init(); err != nil { - panic(err) - } - if err := lich.Setup(); err != nil { - panic(err) - } - defer lich.Teardown() - d = New() - if code := m.Run(); code != 0 { - panic(code) - } -} - ``` -## 注意 -因为启动mysql容器较为缓慢,健康检测的机制会重试3次,每次暂留5秒钟,基本在10s内mysql就能从creating到服务正常启动! -当然你也可以在使用 testcli 时加上 --nodown,使其不用每次跑都新建容器,只在第一次跑的时候会初始化容器,后面都进行复用,这样速度会快很多。 - diff --git a/docs/ut-testgen.md b/docs/ut-testgen.md deleted file mode 100644 index c13f90a3a..000000000 --- a/docs/ut-testgen.md +++ /dev/null @@ -1,45 +0,0 @@ -## testgen UT代码自动生成器 -解放你的双手,让你的UT一步到位! - -### 功能和特性 -- 支持生成 Dao|Service 层UT代码功能(每个方法包含一个正向用例) -- 支持生成 Dao|Service 层测试入口文件dao_test.go, service_test.go(用于控制初始化,控制测试流程等) -- 支持生成Mock代码(使用GoMock框架) -- 支持选择不同模式生成不同代码(使用"–m mode"指定) -- 生成单元测试代码时,同时支持传入目录或文件 -- 支持指定方法追加生成测试用例(使用"–func funcName"指定) - -### 编译安装 -#### Method 1. With go get -```shell -go get -u github.com/go-kratos/kratos/tool/testgen -$GOPATH/bin/testgen -h -``` -#### Method 2. Build with Go -```shell -cd github.com/go-kratos/kratos/tool/testgen -go build -o $GOPATH/bin/testgen -$GOPATH/bin/testgen -h -``` -### 运行 -#### 生成Dao/Service层单元UT -```shell -$GOPATH/bin/testgen YOUR_PROJECT/dao # default mode -$GOPATH/bin/testgen --m test path/to/your/pkg -$GOPATH/bin/testgen --func functionName path/to/your/pkg -``` - -#### 生成接口类型 -```shell -$GOPATH/bin/testgen --m interface YOUR_PROJECT/dao #当前仅支持传目录,如目录包含子目录也会做处理 -``` - -#### 生成Mock代码 - ```shell -$GOPATH/bin/testgen --m mock YOUR_PROJECT/dao #仅传入包路径即可 -``` - -#### 生成Monkey代码 -```shell -$GOPATH/bin/testgen --m monkey yourCodeDirPath #仅传入包路径即可 -``` diff --git a/docs/ut.md b/docs/ut.md deleted file mode 100644 index 8907f0517..000000000 --- a/docs/ut.md +++ /dev/null @@ -1,32 +0,0 @@ -# 背景 -单元测试即对最小可测试单元进行检查和验证,它可以很好的让你的代码在上测试环境之前自己就能前置的发现问题,解决问题。当然每个语言都有原生支持的 UT 框架,不过在 Kratos 里面我们需要有一些配套设施以及周边工具来辅助我们构筑整个 UT 生态。 - -# 工具链 -- testgen UT代码自动生成器(README: tool/testgen/README.md) -- testcli UT运行环境构建工具(README: tool/testcli/README.md) - -# 测试框架选型 -golang 的单元测试,既可以用官方自带的 testing 包,也有开源的如 testify、goconvey 业内知名,使用非常多也很好用的框架。 - -根据一番调研和内部使用经验,我们确定: -> - testing 作为基础库测试框架(非常精简不过够用) -> - goconvey 作为业务程序的单元测试框架(因为涉及比较多的业务场景和流程控制判断,比如更丰富的res值判断、上下文嵌套支持、还有webUI等) - -# 单元测试标准 -1. 覆盖率,当前标准:60%(所有包均需达到) -尽量达到70%以上。当然覆盖率并不能完全说明单元测试的质量,开发者需要考虑关键的条件判断和预期的结果。复杂的代码是需要好好设计测试用例的。 -2. 通过率,当前标准:100%(所有用例中的断言必须通过) - -# 书写建议 -1. 结果验证 -> - 校验err是否为nil. err是go函数的标配了,也是最基础的判断,如果err不为nil,基本上函数返回值或者处理肯定是有问题了。 -> - 检验res值是否正确。res值的校验是非常重要的,也是很容易忽略的地方。比如返回结构体对象,要对结构体的成员进行判断,而有可能里面是0值。goconvey对res值的判断支持是非常友好的。 - -2. 逻辑验证 -> 业务代码经常是流程比较复杂的,而函数的执行结果也是有上下文的,比如有不同条件分支。goconvey就非常优雅的支持了这种情况,可以嵌套执行。单元测试要结合业务代码逻辑,才能尽量的减少线上bug。 - -3. 如何mock -主要分以下3块: -> - 基础组件,如mc、redis、mysql等,由 testcli(testing/lich) 起基础镜像支持(需要提供建表、INSERT语句)与本地开发环境一致,也保证了结果的一致性。 -> - rpc server,如 xxxx-service 需要定义 interface 供业务依赖方使用。所有rpc server 都必须要提供一个interface+mock代码(gomock)。 -> - http server则直接写mock代码gock。 diff --git a/docs/warden-balancer.md b/docs/warden-balancer.md deleted file mode 100644 index 5ffc9ee68..000000000 --- a/docs/warden-balancer.md +++ /dev/null @@ -1,39 +0,0 @@ -# Warden Balancer - -## 介绍 -grpc-go内置了round-robin轮询,但由于自带的轮询算法不支持权重,也不支持color筛选等需求,故需要重新实现一个负载均衡算法。 - -## WRR (Weighted Round Robin) -该算法在加权轮询法基础上增加了动态调节权重值,用户可以在为每一个节点先配置一个初始的权重分,之后算法会根据节点cpu、延迟、服务端错误率、客户端错误率动态打分,在将打分乘用户自定义的初始权重分得到最后的权重值。 - -## P2C (Pick of two choices) -本算法通过随机选择两个node选择优胜者来避免羊群效应,并通过ewma尽量获取服务端的实时状态。 - -服务端: -服务端获取最近500ms内的CPU使用率(需要将cgroup设置的限制考虑进去,并除于CPU核心数),并将CPU使用率乘与1000后塞入每次grpc请求中的的Trailer中夹带返回: -cpu_usage -uint64 encoded with string -cpu_usage : 1000 - -客户端: -主要参数: -* server_cpu:通过每次请求中服务端塞在trailer中的cpu_usage拿到服务端最近500ms内的cpu使用率 -* inflight:当前客户端正在发送并等待response的请求数(pending request) -* latency: 加权移动平均算法计算出的接口延迟 -* client_success:加权移动平均算法计算出的请求成功率(只记录grpc内部错误,比如context deadline) - -目前客户端,已经默认使用p2c负载均衡算法`grpc.WithBalancerName(p2c.Name)`: -```go -// NewClient returns a new blank Client instance with a default client interceptor. -// opt can be used to add grpc dial options. -func NewClient(conf *ClientConfig, opt ...grpc.DialOption) *Client { - c := new(Client) - if err := c.SetConfig(conf); err != nil { - panic(err) - } - c.UseOpt(grpc.WithBalancerName(p2c.Name)) - c.UseOpt(opt...) - c.Use(c.recovery(), clientLogging(), c.handle()) - return c -} -``` diff --git a/docs/warden-mid.md b/docs/warden-mid.md deleted file mode 100644 index 1fbd2ad89..000000000 --- a/docs/warden-mid.md +++ /dev/null @@ -1,374 +0,0 @@ -# 说明 - -gRPC暴露了两个拦截器接口,分别是: - -* `grpc.UnaryServerInterceptor`服务端拦截器 -* `grpc.UnaryClientInterceptor`客户端拦截器 - -基于两个拦截器可以针对性的定制公共模块的封装代码,比如`warden/logging.go`是通用日志逻辑。 - -# 分析 - -## 服务端拦截器 - -让我们先看一下`grpc.UnaryServerInterceptor`的声明,[官方代码位置](https://github.com/grpc/grpc-go/blob/master/interceptor.go): - -```go -// UnaryServerInfo consists of various information about a unary RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type UnaryServerInfo struct { - // Server is the service implementation the user provides. This is read-only. - Server interface{} - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string -} - -// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) - -// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info -// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper -// of the service method implementation. It is the responsibility of the interceptor to invoke handler -// to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) -``` - -看起来很简单包括: - -* 一个`UnaryServerInfo`结构体用于`Server`和`FullMethod`字段传递,`Server`为`gRPC server`的对象实例,`FullMethod`为请求方法的全名 -* 一个`UnaryHandler`方法用于传递`Handler`,就是基于`proto`文件`service`内声明而生成的方法 -* 一个`UnaryServerInterceptor`用于拦截`Handler`方法,可在`Handler`执行前后插入拦截代码 - -为了更形象的说明拦截器的执行过程,请看基于`proto`生成的以下代码[代码位置](https://github.com/go-kratos/kratos-demo/blob/master/api/api.pb.go): - -```go -func _Demo_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HelloReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DemoServer).SayHello(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/demo.service.v1.Demo/SayHello", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DemoServer).SayHello(ctx, req.(*HelloReq)) - } - return interceptor(ctx, in, info, handler) -} -``` - -这个`_Demo_SayHello_Handler`方法是关键,该方法会被包装为`grpc.ServiceDesc`结构,被注册到gRPC内部,具体可在生成的`pb.go`代码内查找`s.RegisterService(&_Demo_serviceDesc, srv)`。 - -* 当`gRPC server`收到一次请求时,首先根据请求方法从注册到`server`内的`grpc.ServiceDesc`找到该方法对应的`Handler`如:`_Demo_SayHello_Handler`并执行 -* `_Demo_SayHello_Handler`执行过程请看上面具体代码,当`interceptor`不为`nil`时,会将`SayHello`包装为`grpc.UnaryHandler`结构传递给`interceptor` - -这样就完成了`UnaryServerInterceptor`的执行过程。那么`_Demo_SayHello_Handler`内的`interceptor`是如何注入到`gRPC server`内,则看下面这段代码[官方代码位置](https://github.com/grpc/grpc-go/blob/master/server.go): - -```go -// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the -// server. Only one unary interceptor can be installed. The construction of multiple -// interceptors (e.g., chaining) can be implemented at the caller. -func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { - return func(o *options) { - if o.unaryInt != nil { - panic("The unary server interceptor was already set and may not be reset.") - } - o.unaryInt = i - } -} -``` - -请一定注意这方法的注释!!! - -> Only one unary interceptor can be installed. The construction of multiple interceptors (e.g., chaining) can be implemented at the caller. - -`gRPC`本身只支持一个`interceptor`,想要多`interceptors`需要自己实现~~所以`warden`基于`grpc.UnaryClientInterceptor`实现了`interceptor chain`,请看下面代码[代码位置](https://github.com/go-kratos/kratos/blob/master/pkg/net/rpc/warden/server.go): - -```go -// Use attachs a global inteceptor to the server. -// For example, this is the right place for a rate limiter or error management inteceptor. -func (s *Server) Use(handlers ...grpc.UnaryServerInterceptor) *Server { - finalSize := len(s.handlers) + len(handlers) - if finalSize >= int(_abortIndex) { - panic("warden: server use too many handlers") - } - mergedHandlers := make([]grpc.UnaryServerInterceptor, finalSize) - copy(mergedHandlers, s.handlers) - copy(mergedHandlers[len(s.handlers):], handlers) - s.handlers = mergedHandlers - return s -} - -// interceptor is a single interceptor out of a chain of many interceptors. -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three -// will see context changes of one and two. -func (s *Server) interceptor(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - var ( - i int - chain grpc.UnaryHandler - ) - - n := len(s.handlers) - if n == 0 { - return handler(ctx, req) - } - - chain = func(ic context.Context, ir interface{}) (interface{}, error) { - if i == n-1 { - return handler(ic, ir) - } - i++ - return s.handlers[i](ic, ir, args, chain) - } - - return s.handlers[0](ctx, req, args, chain) -} -``` - -很简单的逻辑: - -* `warden server`使用`Use`方法进行`grpc.UnaryServerInterceptor`的注入,而`func (s *Server) interceptor`本身就实现了`grpc.UnaryServerInterceptor` -* `func (s *Server) interceptor`可以根据注册的`grpc.UnaryServerInterceptor`顺序从前到后依次执行 - -而`warden`在初始化的时候将该方法本身注册到了`gRPC server`,在`NewServer`方法内可以看到下面代码: - -```go -opt = append(opt, keepParam, grpc.UnaryInterceptor(s.interceptor)) -s.server = grpc.NewServer(opt...) -``` - -如此完整的服务端拦截器逻辑就串联完成。 - -## 客户端拦截器 - - -让我们先看一下`grpc.UnaryClientInterceptor`的声明,[官方代码位置](https://github.com/grpc/grpc-go/blob/master/interceptor.go): - -```go -// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error - -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC -// and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error -``` - -看起来和服务端拦截器并没有什么太大的区别,比较简单包括: - -* 一个`UnaryInvoker`表示客户端具体要发出的执行方法 -* 一个`UnaryClientInterceptor`用于拦截`Invoker`方法,可在`Invoker`执行前后插入拦截代码 - -具体执行过程,请看基于`proto`生成的下面代码[代码位置](https://github.com/go-kratos/kratos-demo/blob/master/api/api.pb.go): - -```go -func (c *demoClient) SayHello(ctx context.Context, in *HelloReq, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) { - out := new(google_protobuf1.Empty) - err := grpc.Invoke(ctx, "/demo.service.v1.Demo/SayHello", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} -``` - -当客户端调用`SayHello`时可以看到执行了`grpc.Invoke`方法,并且将`fullMethod`和其他参数传入,最终会执行下面代码[官方代码位置](https://github.com/grpc/grpc-go/blob/master/call.go): - -```go -// Invoke sends the RPC request on the wire and returns after response is -// received. This is typically called by generated code. -// -// All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - // allow interceptor to see all applicable call options, which means those - // configured as defaults from dial option as well as per-call options - opts = combine(cc.dopts.callOptions, opts) - - if cc.dopts.unaryInt != nil { - return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) - } - return invoke(ctx, method, args, reply, cc, opts...) -} -``` - -其中的`unaryInt`即为客户端连接创建时注册的拦截器,使用下面代码注册[官方代码位置](https://github.com/grpc/grpc-go/blob/master/dialoptions.go): - -```go -// WithUnaryInterceptor returns a DialOption that specifies the interceptor for -// unary RPCs. -func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.unaryInt = f - }) -} -``` - -需要注意的是客户端的拦截器在官方`gRPC`内也只能支持注册一个,与服务端拦截器`interceptor chain`逻辑类似`warden`在客户端拦截器也做了相同处理,并且在客户端连接时进行注册,请看下面代码[代码位置](https://github.com/go-kratos/kratos/blob/master/pkg/net/rpc/warden/client.go): - -```go -// Use attachs a global inteceptor to the Client. -// For example, this is the right place for a circuit breaker or error management inteceptor. -func (c *Client) Use(handlers ...grpc.UnaryClientInterceptor) *Client { - finalSize := len(c.handlers) + len(handlers) - if finalSize >= int(_abortIndex) { - panic("warden: client use too many handlers") - } - mergedHandlers := make([]grpc.UnaryClientInterceptor, finalSize) - copy(mergedHandlers, c.handlers) - copy(mergedHandlers[len(c.handlers):], handlers) - c.handlers = mergedHandlers - return c -} - -// chainUnaryClient creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryClient(one, two, three) will execute one before two before three. -func (c *Client) chainUnaryClient() grpc.UnaryClientInterceptor { - n := len(c.handlers) - if n == 0 { - return func(ctx context.Context, method string, req, reply interface{}, - cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - return invoker(ctx, method, req, reply, cc, opts...) - } - } - - return func(ctx context.Context, method string, req, reply interface{}, - cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - var ( - i int - chainHandler grpc.UnaryInvoker - ) - chainHandler = func(ictx context.Context, imethod string, ireq, ireply interface{}, ic *grpc.ClientConn, iopts ...grpc.CallOption) error { - if i == n-1 { - return invoker(ictx, imethod, ireq, ireply, ic, iopts...) - } - i++ - return c.handlers[i](ictx, imethod, ireq, ireply, ic, chainHandler, iopts...) - } - - return c.handlers[0](ctx, method, req, reply, cc, chainHandler, opts...) - } -} -``` - -如此完整的客户端拦截器逻辑就串联完成。 - -# 实现自己的拦截器 - -以服务端拦截器`logging`为例: - -```go -// serverLogging warden grpc logging -func serverLogging() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - // NOTE: handler执行之前的拦截代码:主要获取一些关键参数,如耗时计时、ip等 - // 如果自定义的拦截器只需要在handler执行后,那么可以直接执行handler - - startTime := time.Now() - caller := metadata.String(ctx, metadata.Caller) - if caller == "" { - caller = "no_user" - } - var remoteIP string - if peerInfo, ok := peer.FromContext(ctx); ok { - remoteIP = peerInfo.Addr.String() - } - var quota float64 - if deadline, ok := ctx.Deadline(); ok { - quota = time.Until(deadline).Seconds() - } - - // call server handler - resp, err := handler(ctx, req) // NOTE: 以具体执行的handler为分界线!!! - - // NOTE: handler执行之后的拦截代码:主要进行耗时计算、日志记录 - // 如果自定义的拦截器在handler执行后不需要逻辑,这可直接返回 - - // after server response - code := ecode.Cause(err).Code() - duration := time.Since(startTime) - - // monitor - statsServer.Timing(caller, int64(duration/time.Millisecond), info.FullMethod) - statsServer.Incr(caller, info.FullMethod, strconv.Itoa(code)) - logFields := []log.D{ - log.KVString("user", caller), - log.KVString("ip", remoteIP), - log.KVString("path", info.FullMethod), - log.KVInt("ret", code), - // TODO: it will panic if someone remove String method from protobuf message struct that auto generate from protoc. - log.KVString("args", req.(fmt.Stringer).String()), - log.KVFloat64("ts", duration.Seconds()), - log.KVFloat64("timeout_quota", quota), - log.KVString("source", "grpc-access-log"), - } - if err != nil { - logFields = append(logFields, log.KV("error", err.Error()), log.KV("stack", fmt.Sprintf("%+v", err))) - } - logFn(code, duration)(ctx, logFields...) - return resp, err - } -} -``` - -# 内置拦截器 - -## 自适应限流拦截器 - -更多关于自适应限流的信息,请参考:[kratos 自适应限流](ratelimit.md) - -```go -package grpc - -import ( - pb "kratos-demo/api" - "kratos-demo/internal/service" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/ratelimiter" -) - -// New new a grpc server. -func New(svc *service.Service) *warden.Server { - var rc struct { - Server *warden.ServerConfig - } - if err := paladin.Get("grpc.toml").UnmarshalTOML(&rc); err != nil { - if err != paladin.ErrNotExist { - panic(err) - } - } - ws := warden.NewServer(rc.Server) - - // 挂载自适应限流拦截器到 warden server,使用默认配置 - limiter := ratelimiter.New(nil) - ws.Use(limiter.Limit()) - - // 注意替换这里: - // RegisterDemoServer方法是在"api"目录下代码生成的 - // 对应proto文件内自定义的service名字,请使用正确方法名替换 - pb.RegisterDemoServer(ws.Server(), svc) - - ws, err := ws.Start() - if err != nil { - panic(err) - } - return ws -} -``` - -# 扩展阅读 - -[warden快速开始](warden-quickstart.md) -[warden基于pb生成](warden-pb.md) -[warden负载均衡](warden-balancer.md) -[warden服务发现](warden-resolver.md) diff --git a/docs/warden-pb.md b/docs/warden-pb.md deleted file mode 100644 index a02cb1d04..000000000 --- a/docs/warden-pb.md +++ /dev/null @@ -1,48 +0,0 @@ -# 介绍 - -基于proto文件可以快速生成`warden`框架对应的代码,提前需要准备以下工作: - -* 安装`kratos tool protoc`工具,请看[kratos工具](kratos-tool.md) -* 编写`proto`文件,示例可参考[kratos-demo内proto文件](https://github.com/go-kratos/kratos-demo/blob/master/api/api.proto) - -### kratos工具说明 - -`kratos tool protoc`工具可以生成`warden` `bm` `swagger`对应的代码和文档,想要单独生成`warden`代码只需加上`--grpc`如: - -```shell -# generate gRPC -kratos tool protoc --grpc api.proto -``` - -# 使用 - -建议在项目`api`目录下编写`proto`文件及生成对应的代码,可参考[kratos-demo内的api目录](https://github.com/go-kratos/kratos-demo/tree/master/api)。 - -执行命令后生成的`api.pb.go`代码,注意其中的`DemoClient`和`DemoServer`,其中: - -* `DemoClient`接口为客户端调用接口,相对应的有`demoClient`结构体为其实现 -* `DemoServer`接口为服务端接口声明,需要业务自己实现该接口的所有方法,`kratos`建议在`internal/service`目录下使用`Service`结构体实现 - -`internal/service`内的`Service`结构实现了`DemoServer`接口可参考[kratos-demo内的service](https://github.com/go-kratos/kratos-demo/blob/master/internal/service/service.go)内的如下代码: - -```go -// SayHelloURL bm demo func. -func (s *Service) SayHelloURL(ctx context.Context, req *pb.HelloReq) (reply *pb.HelloResp, err error) { - reply = &pb.HelloResp{ - Content: "hello " + req.Name, - } - fmt.Printf("hello url %s", req.Name) - return -} -``` - -更详细的客户端和服务端使用请看[warden快速开始](warden-quickstart.md) - -# 扩展阅读 - -[warden快速开始](warden-quickstart.md) -[warden拦截器](warden-mid.md) -[warden负载均衡](warden-balancer.md) -[warden服务发现](warden-resolver.md) - - diff --git a/docs/warden-quickstart.md b/docs/warden-quickstart.md deleted file mode 100644 index 6173ebd55..000000000 --- a/docs/warden-quickstart.md +++ /dev/null @@ -1,171 +0,0 @@ -# 准备工作 - -推荐使用[kratos工具](kratos-tool.md)快速生成带`grpc`的项目,如我们生成一个叫`kratos-demo`的项目。 - -``` -kratos new kratos-demo --proto -``` - -# pb文件 - -创建项目成功后,进入`api`目录下可以看到`api.proto`和`api.pb.go`和`generate.go`文件,其中: - -* `api.proto`是gRPC server的描述文件 -* `api.pb.go`是基于`api.proto`生成的代码文件 -* `generate.go`是用于`kratos tool protoc`执行`go generate`进行代码生成的临时文件 - -接下来可以将以上三个文件全部删除或者保留`generate.go`,之后编写自己的proto文件,确认proto无误后,进行代码生成: - -* 可直接执行`kratos tool protoc`,该命令会调用protoc工具生成`.pb.go`文件 -* 如`generate.go`没删除,也可以执行`go generate`命令,将调用`kratos tool protoc`工具进行代码生成 - -[kratos工具请看](kratos-tool.md) - -### 如没看kprotoc文档,请看下面这段话 - -`kratos tool protoc`用于快速生成`pb.go`文件,但目前windows和Linux需要先自己安装`protoc`工具,具体请看[protoc说明](protoc.md)。 - -# 注册server - -进入`internal/server/grpc`目录打开`server.go`文件,可以看到以下代码,只需要替换以下注释内容就可以启动一个gRPC服务。 - -```go -package grpc - -import ( - pb "kratos-demo/api" - "kratos-demo/internal/service" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" -) - -// New new a grpc server. -func New(svc *service.Service) *warden.Server { - var rc struct { - Server *warden.ServerConfig - } - if err := paladin.Get("grpc.toml").UnmarshalTOML(&rc); err != nil { - if err != paladin.ErrNotExist { - panic(err) - } - } - ws := warden.NewServer(rc.Server) - // 注意替换这里: - // RegisterDemoServer方法是在"api"目录下代码生成的 - // 对应proto文件内自定义的service名字,请使用正确方法名替换 - pb.RegisterDemoServer(ws.Server(), svc) - ws, err := ws.Start() - if err != nil { - panic(err) - } - return ws -} -``` - -### 注册注意 - -```go -// SayHello grpc demo func. -func (s *Service) SayHello(ctx context.Context, req *pb.HelloReq) (reply *empty.Empty, err error) { - reply = new(empty.Empty) - fmt.Printf("hello %s", req.Name) - return -} -``` - -请进入`internal/service`内找到`SayHello`方法,注意方法的入参和出参,都是按照gRPC的方法声明对应的: - -* 第一个参数必须是`context.Context`,第二个必须是proto内定义的`message`对应生成的结构体 -* 第一个返回值必须是proto内定义的`message`对应生成的结构体,第二个参数必须是`error` -* 在http框架bm中,如果共用proto文件生成bm代码,那么也可以直接使用该service方法 - -建议service严格按照此格式声明方法使其能够在bm和warden内共用。 - -# client调用 - -请进入`internal/dao`方法内,一般对资源的处理都会在这一层封装。 -对于`client`端,前提必须有对应`proto`文件生成的代码,那么有两种选择: - -* 拷贝proto文件到自己项目下并且执行代码生成 -* 直接import服务端的api package - -> 这也是业务代码我们加了一层`internal`的关系,服务对外暴露的只有接口 - -不管哪一种方式,以下初始化gRPC client的代码建议伴随生成的代码存放在统一目录下: - -```go -package dao - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - - "google.golang.org/grpc" -) - -// target server addrs. -const target = "direct://default/127.0.0.1:9000,127.0.0.1:9091" // NOTE: example - -// NewClient new member grpc client -func NewClient(cfg *warden.ClientConfig, opts ...grpc.DialOption) (DemoClient, error) { - client := warden.NewClient(cfg, opts...) - conn, err := client.Dial(context.Background(), target) - if err != nil { - return nil, err - } - // 注意替换这里: - // NewDemoClient方法是在"api"目录下代码生成的 - // 对应proto文件内自定义的service名字,请使用正确方法名替换 - return NewDemoClient(conn), nil -} -``` - -其中,`target`为gRPC用于服务发现的目标,使用标准url资源格式提供给resolver用于服务发现。`warden`默认使用`direct`直连方式,直接与`server`端进行连接。如果在使用其他服务发现组件请看[warden服务发现](warden-resolver.md)。 - -有了初始化`Client`的代码,我们的`Dao`对象即可进行初始化和使用,以下以直接import服务端api包为例: - -```go -package dao - -import( - demoapi "kratos-demo/api" - grpcempty "github.com/golang/protobuf/ptypes/empty" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - - "github.com/pkg/errors" -) - -type Dao struct{ - demoClient demoapi.DemoClient -} - -// New account dao. -func New() (d *Dao) { - cfg := &warden.ClientConfig{} - paladin.Get("grpc.toml").UnmarshalTOML(cfg) - d = &Dao{} - var err error - if d.demoClient, err = demoapi.NewClient(cfg); err != nil { - panic(err) - } - return -} - -// SayHello say hello. -func (d *Dao) SayHello(c context.Context, req *demoapi.HelloReq) (resp *grpcempty.Empty, err error) { - if resp, err = d.demoClient.SayHello(c, req); err != nil { - err = errors.Wrapf(err, "%v", arg) - } - return -} -``` - -如此在`internal/service`层就可以进行资源的方法调用。 - -# 扩展阅读 - -[warden拦截器](warden-mid.md) -[warden基于pb生成](warden-pb.md) -[warden服务发现](warden-resolver.md) -[warden负载均衡](warden-balancer.md) diff --git a/docs/warden-resolver.md b/docs/warden-resolver.md deleted file mode 100644 index 38232743f..000000000 --- a/docs/warden-resolver.md +++ /dev/null @@ -1,254 +0,0 @@ -# 前提 - -服务注册与发现最简单的就是`direct`固定服务端地址的直连方式。也就是服务端正常监听端口启动不进行额外操作,客户端使用如下`target`: - -```url -direct://default/127.0.0.1:9000,127.0.0.1:9091 -``` - -> `target`就是标准的`URL`资源定位符[查看WIKI](https://zh.wikipedia.org/wiki/%E7%BB%9F%E4%B8%80%E8%B5%84%E6%BA%90%E5%AE%9A%E4%BD%8D%E7%AC%A6) - -其中`direct`为协议类型,此处表示直接使用该`URL`内提供的地址`127.0.0.1:9000,127.0.0.1:9091`进行连接,而`default`在此处无意义仅当做占位符。 - -# gRPC Resolver - -gRPC暴露了服务发现的接口`resolver.Builder`和`resolver.ClientConn`和`resolver.Resolver`,[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver/resolver.go): - -```go -// Builder creates a resolver that will be used to watch name resolution updates. -type Builder interface { - // Build creates a new resolver for the given target. - // - // gRPC dial calls Build synchronously, and fails if the returned error is - // not nil. - Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. - Scheme() string -} - -// ClientConn contains the callbacks for resolver to notify any updates -// to the gRPC ClientConn. -// -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. -type ClientConn interface { - // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) - // NewAddress is called by resolver to notify ClientConn a new list - // of resolved addresses. - // The address list should be the complete list of resolved addresses. - // - // Deprecated: Use UpdateState instead. - NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) -} - -// Resolver watches for the updates on the specified target. -// Updates include address updates and service config updates. -type Resolver interface { - // ResolveNow will be called by gRPC to try to resolve the target name - // again. It's just a hint, resolver can ignore this if it's not necessary. - // - // It could be called multiple times concurrently. - ResolveNow(ResolveNowOption) - // Close closes the resolver. - Close() -} -``` - -下面依次分析这三个接口的作用: - -* `Builder`用于gRPC内部创建`Resolver`接口的实现,但注意声明的`Build`方法将接口`ClientConn`作为参数传入了 -* `ClientConn`接口有两个废弃方法不用管,看`UpdateState`方法需要传入`State`结构,看代码可以发现其中包含了`Addresses []Address // Resolved addresses for the target`,可以看出是需要将服务发现得到的`Address`对象列表告诉`ClientConn`的对象 -* `Resolver`提供了`ResolveNow`用于被gRPC尝试重新进行服务发现 - -看完这三个接口就可以明白gRPC的服务发现实现逻辑,通过`Builder`进行`Reslover`的创建,在`Build`的过程中将服务发现的地址信息丢给`ClientConn`用于内部连接创建等逻辑。主要逻辑可以按下面顺序来看源码理解: - -* 当`client`在`Dial`时会根据`target`解析的`scheme`获取对应的`Builder`,[官方代码位置](https://github.com/grpc/grpc-go/blob/master/clientconn.go#L242) -* 当`Dial`成功会创建出结构体`ClientConn`的对象[官方代码位置](https://github.com/grpc/grpc-go/blob/master/clientconn.go#L447)(注意不是上面的`ClientConn`接口),可以看到结构体`ClientConn`内的成员`resolverWrapper`又实现了接口`ClientConn`的方法[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver_conn_wrapper.go) -* 当`resolverWrapper`被初始化时就会调用`Build`方法[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver_conn_wrapper.go#L89),其中参数为接口`ClientConn`传入的是`ccResolverWrapper` -* 当用户基于`Builder`的实现进行`UpdateState`调用时,则会触发结构体`ClientConn`的`updateResolverState`方法[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver_conn_wrapper.go#L109),`updateResolverState`则会对传入的`Address`进行初始化等逻辑[官方代码位置](https://github.com/grpc/grpc-go/blob/master/clientconn.go#L553) - -如此整个服务发现过程就结束了。从中也可以看出gRPC官方提供的三个接口还是很灵活的,但也正因为灵活要实现稍微麻烦一些,而`Address`[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver/resolver.go#L79)如果直接被业务拿来用于服务节点信息的描述结构则显得有些过于简单。 - -所以`warden`包装了gRPC的整个服务发现实现逻辑,代码分别位于`pkg/naming/naming.go`和`warden/resolver/resolver.go`,其中: - -* `naming.go`内定义了用于描述业务实例的`Instance`结构、用于服务注册的`Registry`接口、用于服务发现的`Resolver`接口 -* `resolver.go`内实现了gRPC官方的`resolver.Builder`和`resolver.Resolver`接口,但也暴露了`naming.go`内的`naming.Builder`和`naming.Resolver`接口 - -# warden Resolver - -接下来看`naming`内的接口如下: - -```go -// Resolver resolve naming service -type Resolver interface { - Fetch(context.Context) (*InstancesInfo, bool) - Watch() <-chan struct{} - Close() error -} - -// Builder resolver builder. -type Builder interface { - Build(id string) Resolver - Scheme() string -} -``` - -可以看到封装方式与gRPC官方的方法一样,通过`Builder`进行`Resolver`的初始化。不同的是通过封装将参数进行了简化: - -* `Build`只需要传对应的服务`id`即可:`warden/resolver/resolver.go`在gRPC进行调用后,会根据`Scheme`方法查询对应的`naming.Builder`实现并调用`Build`将`id`传入,而`naming.Resolver`的实现即可通过`id`去对应的服务发现中间件进行实例信息的查询 -* 而`Resolver`则对方法进行了扩展,除了简单进行`Fetch`操作外还多了`Watch`方法,用于监听服务发现中间件的节点变化情况,从而能够实时的进行服务实例信息的更新 - -在`naming/discovery`内实现了基于[discovery](https://github.com/bilibili/discovery)为中间件的服务注册与发现逻辑。如果要实现其他中间件如`etcd`|`zookeeper`等的逻辑,参考`naming/discovery/discovery.go`内的逻辑,将与`discovery`的交互逻辑替换掉即可(后续会默认将etcd/zk等实现,敬请期待)。 - -# 使用discovery - -因为`warden`内默认使用`direct`的方式,所以要使用[discovery](https://github.com/bilibili/discovery)需要在业务的`NewClient`前进行注册,代码如下: - -```go -package dao - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/naming/discovery" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/resolver" - - "google.golang.org/grpc" -) - -// AppID your appid, ensure unique. -const AppID = "demo.service" // NOTE: example - -func init(){ - // NOTE: 注意这段代码,表示要使用discovery进行服务发现 - // NOTE: 还需注意的是,resolver.Register是全局生效的,所以建议该代码放在进程初始化的时候执行 - // NOTE: !!!切记不要在一个进程内进行多个不同中间件的Register!!! - // NOTE: 在启动应用时,可以通过flag(-discovery.nodes) 或者 环境配置(DISCOVERY_NODES)指定discovery节点 - resolver.Register(discovery.Builder()) -} - -// NewClient new member grpc client -func NewClient(cfg *warden.ClientConfig, opts ...grpc.DialOption) (DemoClient, error) { - client := warden.NewClient(cfg, opts...) - conn, err := client.Dial(context.Background(), "discovery://default/"+AppID) - if err != nil { - return nil, err - } - // 注意替换这里: - // NewDemoClient方法是在"api"目录下代码生成的 - // 对应proto文件内自定义的service名字,请使用正确方法名替换 - return NewDemoClient(conn), nil -} -``` - -> 注意:`resolver.Register`是全局行为,建议放在包加载阶段或main方法开始时执行,该方法执行后会在gRPC内注册构造方法 - -`target`是`discovery://default/${appid}`,当gRPC内进行解析后会得到`scheme`=`discovery`和`appid`,然后进行以下逻辑: - -1. `warden/resolver.Builder`会通过`scheme`获取到`naming/discovery.Builder`对象(靠`resolver.Register`注册过的) -2. 拿到`naming/discovery.Builder`后执行`Build(appid)`构造`naming/discovery.Discovery` -3. `naming/discovery.Discovery`对象基于`appid`就知道要获取哪个服务的实例信息 - -# 服务注册 - -客户端既然使用了[discovery](https://github.com/bilibili/discovery)进行服务发现,也就意味着服务端启动后必须将自己注册给[discovery](https://github.com/bilibili/discovery)知道。 - -相对服务发现来讲,服务注册则简单很多,看`naming/discovery/discovery.go`内的代码实现了`naming/naming.go`内的`Registry`接口,服务端启动时可以参考下面代码进行注册: - -```go -// 该代码可放在main.go,当warden server进行初始化之后 -// 省略... - -ip := "" // NOTE: 必须拿到您实例节点的真实IP, -port := "" // NOTE: 必须拿到您实例grpc监听的真实端口,warden默认监听9000 -hn, _ := os.Hostname() -dis := discovery.New(nil) -ins := &naming.Instance{ - Zone: env.Zone, - Env: env.DeployEnv, - AppID: "your app id", - Hostname: hn, - Addrs: []string{ - "grpc://" + ip + ":" + port, - }, -} -cancel, err := dis.Register(context.Background(), ins) -if err != nil { - panic(err) -} - -// 省略... - -// 特别注意!!! -// cancel必须在进程退出时执行!!! -cancel() -``` - - - -# 使用ETCD - -和使用discovery类似,只需要在注册时使用etcd naming即可。 - -```go -package dao - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/naming/etcd" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/resolver" - - "google.golang.org/grpc" -) - -// AppID your appid, ensure unique. -const AppID = "demo.service" // NOTE: example - -func init(){ - // NOTE: 注意这段代码,表示要使用etcd进行服务发现 ,其他事项参考discovery的说明 - // NOTE: 在启动应用时,可以通过flag(-etcd.endpoints) 或者 环境配置(ETCD_ENDPOINTS)指定etcd节点 - // NOTE: 如果需要自己指定配置时 需要同时设置DialTimeout 与 DialOptions: []grpc.DialOption{grpc.WithBlock()} - resolver.Register(etcd.Builder(nil)) -} - -// NewClient new member grpc client -func NewClient(cfg *warden.ClientConfig, opts ...grpc.DialOption) (DemoClient, error) { - client := warden.NewClient(cfg, opts...) - // 这里使用etcd scheme - conn, err := client.Dial(context.Background(), "etcd://default/"+AppID) - if err != nil { - return nil, err - } - // 注意替换这里: - // NewDemoClient方法是在"api"目录下代码生成的 - // 对应proto文件内自定义的service名字,请使用正确方法名替换 - return NewDemoClient(conn), nil -} -``` - -etcd的服务注册与discovery基本相同,可以传入详细的etcd配置项, 或者传入nil后通过flag(-etcd.endpoints)/环境配置(ETCD_ENDPOINTS)来指定etcd节点。 - -### 其他配置项 - -etcd默认的全局keyPrefix为kratos_etcd,当该keyPrefix与项目中其他keyPrefix冲突时可以通过flag(-etcd.prefix)或者环境配置(ETCD_PREFIX)来指定keyPrefix。 - - - -# 扩展阅读 - -[warden快速开始](warden-quickstart.md) -[warden拦截器](warden-mid.md) -[warden基于pb生成](warden-pb.md) -[warden负载均衡](warden-balancer.md) diff --git a/docs/warden.md b/docs/warden.md deleted file mode 100644 index 996a0c81d..000000000 --- a/docs/warden.md +++ /dev/null @@ -1,41 +0,0 @@ -# 背景 - -我们需要统一的rpc服务,经过选型讨论决定直接使用成熟的、跨语言的gRPC。 - -# 概览 - -* 不改gRPC源码,基于接口进行包装集成trace、log、prom等组件 -* 打通自有服务注册发现系统[discovery](https://github.com/bilibili/discovery) -* 实现更平滑可靠的负载均衡算法 - -# 拦截器 - -gRPC暴露了两个拦截器接口,分别是: - -* `grpc.UnaryServerInterceptor`服务端拦截器 -* `grpc.UnaryClientInterceptor`客户端拦截器 - -基于两个拦截器可以针对性的定制公共模块的封装代码,比如`warden/logging.go`是通用日志逻辑。 - -[warden拦截器](warden-mid.md) - -# 服务发现 - -`warden`默认使用`direct`方式直连,正常线上都会使用第三方服务注册与发现中间件,`warden`内包含了[discovery](https://github.com/bilibili/discovery)的逻辑实现,想使用如`etcd`、`zookeeper`等也可以,都请看下面文档。 - -[warden服务发现](warden-resolver.md) - -# 负载均衡 - -实现了`wrr`和`p2c`两种算法,默认使用`p2c`。 - -[warden负载均衡](warden-balancer.md) - -# 扩展阅读 - -- [warden快速开始](warden-quickstart.md) -- [warden拦截器](warden-mid.md) -- [warden负载均衡](warden-balancer.md) -- [warden基于pb生成](warden-pb.md) -- [warden服务发现](warden-resolver.md) - diff --git a/encoding/encoding.go b/encoding/encoding.go new file mode 100644 index 000000000..0e9141a7a --- /dev/null +++ b/encoding/encoding.go @@ -0,0 +1,40 @@ +package encoding + +import "strings" + +// Codec defines the interface Transport uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all Transport clients and +// servers. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/encoding/json/json.go b/encoding/json/json.go new file mode 100644 index 000000000..8c88f4ceb --- /dev/null +++ b/encoding/json/json.go @@ -0,0 +1,49 @@ +package json + +import ( + "encoding/json" + + "github.com/go-kratos/kratos/v2/encoding" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// Name is the name registered for the json codec. +const Name = "json" + +var ( + // MarshalOptions is a configurable JSON format marshaler. + MarshalOptions = protojson.MarshalOptions{ + EmitUnpopulated: true, + } + // UnmarshalOptions is a configurable JSON format parser. + UnmarshalOptions = protojson.UnmarshalOptions{ + DiscardUnknown: true, + } +) + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with json. +type codec struct{} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if m, ok := v.(proto.Message); ok { + return MarshalOptions.Marshal(m) + } + return json.Marshal(v) +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + if m, ok := v.(proto.Message); ok { + return UnmarshalOptions.Unmarshal(data, m) + } + return json.Unmarshal(data, v) +} + +func (codec) Name() string { + return Name +} diff --git a/encoding/proto/proto.go b/encoding/proto/proto.go new file mode 100644 index 000000000..26a291441 --- /dev/null +++ b/encoding/proto/proto.go @@ -0,0 +1,30 @@ +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "google.golang.org/grpc/encoding" + "google.golang.org/protobuf/proto" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for Transport. +type codec struct{} + +func (codec) Marshal(v interface{}) ([]byte, error) { + return proto.Marshal(v.(proto.Message)) +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + return proto.Unmarshal(data, v.(proto.Message)) +} + +func (codec) Name() string { + return Name +} diff --git a/errors/codes.go b/errors/codes.go new file mode 100644 index 000000000..acd1c4a71 --- /dev/null +++ b/errors/codes.go @@ -0,0 +1,285 @@ +package errors + +import ( + "errors" + "fmt" +) + +// Cancelled The operation was cancelled, typically by the caller. +// HTTP Mapping: 499 Client Closed Request +func Cancelled(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 1, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsCancelled(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 1 + } + return false +} + +// Unknown error. +// HTTP Mapping: 500 Internal Server Error +func Unknown(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 2, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsUnknown(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 2 + } + return false +} + +// InvalidArgument The client specified an invalid argument. +// HTTP Mapping: 400 Bad Request +func InvalidArgument(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 3, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsInvalidArgument(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 3 + } + return false +} + +// DeadlineExceeded The deadline expired before the operation could complete. +// HTTP Mapping: 504 Gateway Timeout +func DeadlineExceeded(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 4, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsDeadlineExceeded(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 4 + } + return false +} + +// NotFound Some requested entity (e.g., file or directory) was not found. +// HTTP Mapping: 404 Not Found +func NotFound(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 5, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsNotFound(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 5 + } + return false +} + +// AlreadyExists The entity that a client attempted to create (e.g., file or directory) already exists. +// HTTP Mapping: 409 Conflict +func AlreadyExists(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 6, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsAlreadyExists(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 6 + } + return false +} + +// PermissionDenied The caller does not have permission to execute the specified operation. +// HTTP Mapping: 403 Forbidden +func PermissionDenied(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 7, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsPermissionDenied(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 7 + } + return false +} + +// ResourceExhausted Some resource has been exhausted, perhaps a per-user quota, or +// perhaps the entire file system is out of space. +// HTTP Mapping: 429 Too Many Requests +func ResourceExhausted(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 8, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsResourceExhausted(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 8 + } + return false +} + +// FailedPrecondition The operation was rejected because the system is not in a state +// required for the operation's execution. +// HTTP Mapping: 400 Bad Request +func FailedPrecondition(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 9, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsFailedPrecondition(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 9 + } + return false +} + +// Aborted The operation was aborted, typically due to a concurrency issue such as +// a sequencer check failure or transaction abort. +// HTTP Mapping: 409 Conflict +func Aborted(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 10, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsAborted(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 10 + } + return false +} + +// OutOfRange The operation was attempted past the valid range. E.g., seeking or +// reading past end-of-file. +// HTTP Mapping: 400 Bad Request +func OutOfRange(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 11, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsOutOfRange(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 11 + } + return false +} + +// Unimplemented The operation is not implemented or is not supported/enabled in this service. +// HTTP Mapping: 501 Not Implemented +func Unimplemented(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 12, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsUnimplemented(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 12 + } + return false +} + +// Internal This means that some invariants expected by the +// underlying system have been broken. This error code is reserved +// for serious errors. +// +// HTTP Mapping: 500 Internal Server Error +func Internal(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 13, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsInternal(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 13 + } + return false +} + +// Unavailable The service is currently unavailable. +// HTTP Mapping: 503 Service Unavailable +func Unavailable(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 14, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsUnavailable(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 14 + } + return false +} + +// DataLoss Unrecoverable data loss or corruption. +// HTTP Mapping: 500 Internal Server Error +func DataLoss(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 15, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsDataLoss(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 15 + } + return false +} + +// Unauthorized The request does not have valid authentication credentials for the operation. +// HTTP Mapping: 401 Unauthorized +func Unauthorized(reason, format string, a ...interface{}) error { + return &StatusError{ + Code: 16, + Reason: reason, + Message: fmt.Sprintf(format, a...), + } +} + +func IsUnauthorized(err error) bool { + if se := new(StatusError); errors.As(err, &se) { + return se.Code == 16 + } + return false +} diff --git a/errors/errors.go b/errors/errors.go new file mode 100644 index 000000000..69f7b8a64 --- /dev/null +++ b/errors/errors.go @@ -0,0 +1,73 @@ +package errors + +import ( + "errors" + "fmt" +) + +const ( + // UnknownReason is unknown reason for error info. + UnknownReason = "" + // SupportPackageIsVersion1 this constant should not be referenced by any other code. + SupportPackageIsVersion1 = true +) + +var _ error = (*StatusError)(nil) + +// StatusError contains an error response from the server. +type StatusError = Status + +// Is matches each error in the chain with the target value. +func (e *StatusError) Is(target error) bool { + err, ok := target.(*StatusError) + if ok { + return e.Code == err.Code + } + return false +} + +func (e *StatusError) Error() string { + return fmt.Sprintf("error: code = %d reason = %s message = %s details = %+v", e.Code, e.Reason, e.Message, e.Details) +} + +// Error returns a Status representing c and msg. +func Error(code int32, reason, message string) error { + return &StatusError{ + Code: code, + Reason: reason, + Message: message, + } +} + +// Errorf returns New(c, fmt.Sprintf(format, a...)). +func Errorf(code int32, reason, format string, a ...interface{}) error { + return Error(code, reason, fmt.Sprintf(format, a...)) +} + +// Code returns the status code. +func Code(err error) int32 { + if err == nil { + return 0 // ok + } + if se := new(StatusError); errors.As(err, &se) { + return se.Code + } + return 2 // unknown +} + +// Reason returns the status for a particular error. +// It supports wrapped errors. +func Reason(err error) string { + if se := new(StatusError); errors.As(err, &se) { + return se.Reason + } + return UnknownReason +} + +// FromError returns status error. +func FromError(err error) (*StatusError, bool) { + if se := new(StatusError); errors.As(err, &se) { + return se, true + } + return nil, false +} diff --git a/errors/errors.pb.go b/errors/errors.pb.go new file mode 100644 index 000000000..3b142ba34 --- /dev/null +++ b/errors/errors.pb.go @@ -0,0 +1,187 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.13.0 +// source: errors.proto + +package errors + +import ( + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_errors_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_errors_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_errors_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*any.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_errors_proto protoreflect.FileDescriptor + +var file_errors_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, + 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x19, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, + 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7e, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x62, 0x0a, 0x11, 0x64, 0x65, 0x76, 0x2e, + 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x42, 0x0b, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2d, 0x6b, 0x72, 0x61, 0x74, + 0x6f, 0x73, 0x2f, 0x6b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x0c, + 0x4b, 0x72, 0x61, 0x74, 0x6f, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_errors_proto_rawDescOnce sync.Once + file_errors_proto_rawDescData = file_errors_proto_rawDesc +) + +func file_errors_proto_rawDescGZIP() []byte { + file_errors_proto_rawDescOnce.Do(func() { + file_errors_proto_rawDescData = protoimpl.X.CompressGZIP(file_errors_proto_rawDescData) + }) + return file_errors_proto_rawDescData +} + +var file_errors_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_errors_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: kratos.errors.Status + (*any.Any)(nil), // 1: google.protobuf.Any +} +var file_errors_proto_depIdxs = []int32{ + 1, // 0: kratos.errors.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_errors_proto_init() } +func file_errors_proto_init() { + if File_errors_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_errors_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_errors_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_errors_proto_goTypes, + DependencyIndexes: file_errors_proto_depIdxs, + MessageInfos: file_errors_proto_msgTypes, + }.Build() + File_errors_proto = out.File + file_errors_proto_rawDesc = nil + file_errors_proto_goTypes = nil + file_errors_proto_depIdxs = nil +} diff --git a/errors/errors.proto b/errors/errors.proto new file mode 100644 index 000000000..653bbf961 --- /dev/null +++ b/errors/errors.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package kratos.errors; + +import "google/protobuf/any.proto"; + +option cc_enable_arenas = true; +option go_package = "github.com/go-kratos/kratos/v2/errors;errors"; +option java_multiple_files = true; +option java_outer_classname = "ErrorsProto"; +option java_package = "com.github.kratos.errors"; +option objc_class_prefix = "KratosErrors"; + +message Status { + int32 code = 1; + string reason = 2; + string message = 3; + repeated google.protobuf.Any details = 4; +} diff --git a/errors/errors_test.go b/errors/errors_test.go new file mode 100644 index 000000000..4c66b2934 --- /dev/null +++ b/errors/errors_test.go @@ -0,0 +1,54 @@ +package errors + +import ( + "errors" + "fmt" + "testing" +) + +func TestErrorsMatch(t *testing.T) { + s := &StatusError{Code: 1} + st := &StatusError{Code: 2} + + if errors.Is(s, st) { + t.Errorf("error is not match: %+v -> %+v", s, st) + } + + s.Code = 1 + st.Code = 1 + if !errors.Is(s, st) { + t.Errorf("error is not match: %+v -> %+v", s, st) + } + + s.Reason = "test_reason" + s.Reason = "test_reason" + + if !errors.Is(s, st) { + t.Errorf("error is not match: %+v -> %+v", s, st) + } + + if Reason(s) != "test_reason" { + t.Errorf("error is not match: %+v -> %+v", s, st) + } +} + +func TestErrorIs(t *testing.T) { + err1 := &StatusError{Code: 1} + t.Log(err1) + err2 := fmt.Errorf("wrap : %w", err1) + t.Log(err2) + + if !(errors.Is(err2, err1)) { + t.Errorf("error is not match: a: %v b: %v ", err2, err1) + } +} + +func TestErrorAs(t *testing.T) { + err1 := &StatusError{Code: 1} + err2 := fmt.Errorf("wrap : %w", err1) + + err3 := new(StatusError) + if !errors.As(err2, &err3) { + t.Errorf("error is not match: %v", err2) + } +} diff --git a/example/blademaster/middleware/auth/README.md b/example/blademaster/middleware/auth/README.md deleted file mode 100644 index 22f7554eb..000000000 --- a/example/blademaster/middleware/auth/README.md +++ /dev/null @@ -1,7 +0,0 @@ -#### blademaster/middleware/auth - -##### 项目简介 - -blademaster 的 authorization middleware,主要用于设置路由的认证策略 - -注:仅仅是个demo,请根据自身业务实现具体鉴权逻辑 diff --git a/example/blademaster/middleware/auth/auth.go b/example/blademaster/middleware/auth/auth.go deleted file mode 100644 index cb3accf66..000000000 --- a/example/blademaster/middleware/auth/auth.go +++ /dev/null @@ -1,153 +0,0 @@ -package auth - -import ( - "github.com/go-kratos/kratos/pkg/ecode" - bm "github.com/go-kratos/kratos/pkg/net/http/blademaster" - "github.com/go-kratos/kratos/pkg/net/metadata" -) - -// Config is the identify config model. -type Config struct { - // csrf switch. - DisableCSRF bool -} - -// Auth is the authorization middleware -type Auth struct { - conf *Config -} - -// authFunc will return mid and error by given context -type authFunc func(*bm.Context) (int64, error) - -var _defaultConf = &Config{ - DisableCSRF: false, -} - -// New is used to create an authorization middleware -func New(conf *Config) *Auth { - if conf == nil { - conf = _defaultConf - } - auth := &Auth{ - conf: conf, - } - return auth -} - -// User is used to mark path as access required. -// If `access_token` is exist in request form, it will using mobile access policy. -// Otherwise to web access policy. -func (a *Auth) User(ctx *bm.Context) { - req := ctx.Request - if req.Form.Get("access_token") == "" { - a.UserWeb(ctx) - return - } - a.UserMobile(ctx) -} - -// UserWeb is used to mark path as web access required. -func (a *Auth) UserWeb(ctx *bm.Context) { - a.midAuth(ctx, a.authCookie) -} - -// UserMobile is used to mark path as mobile access required. -func (a *Auth) UserMobile(ctx *bm.Context) { - a.midAuth(ctx, a.authToken) -} - -// Guest is used to mark path as guest policy. -// If `access_token` is exist in request form, it will using mobile access policy. -// Otherwise to web access policy. -func (a *Auth) Guest(ctx *bm.Context) { - req := ctx.Request - if req.Form.Get("access_token") == "" { - a.GuestWeb(ctx) - return - } - a.GuestMobile(ctx) -} - -// GuestWeb is used to mark path as web guest policy. -func (a *Auth) GuestWeb(ctx *bm.Context) { - a.guestAuth(ctx, a.authCookie) -} - -// GuestMobile is used to mark path as mobile guest policy. -func (a *Auth) GuestMobile(ctx *bm.Context) { - a.guestAuth(ctx, a.authToken) -} - -// authToken is used to authorize request by token -func (a *Auth) authToken(ctx *bm.Context) (int64, error) { - req := ctx.Request - key := req.Form.Get("access_token") - if key == "" { - return 0, ecode.Unauthorized - } - // NOTE: 请求登录鉴权服务接口,拿到对应的用户id - var mid int64 - // TODO: get mid from some code - return mid, nil -} - -// authCookie is used to authorize request by cookie -func (a *Auth) authCookie(ctx *bm.Context) (int64, error) { - req := ctx.Request - session, _ := req.Cookie("SESSION") - if session == nil { - return 0, ecode.Unauthorized - } - // NOTE: 请求登录鉴权服务接口,拿到对应的用户id - var mid int64 - // TODO: get mid from some code - - // check csrf - clientCsrf := req.FormValue("csrf") - if a.conf != nil && !a.conf.DisableCSRF && req.Method == "POST" { - // NOTE: 如果开启了CSRF认证,请从CSRF服务获取该用户关联的csrf - var csrf string // TODO: get csrf from some code - if clientCsrf != csrf { - return 0, ecode.Unauthorized - } - } - - return mid, nil -} - -func (a *Auth) midAuth(ctx *bm.Context, auth authFunc) { - mid, err := auth(ctx) - if err != nil { - ctx.JSON(nil, err) - ctx.Abort() - return - } - setMid(ctx, mid) -} - -func (a *Auth) guestAuth(ctx *bm.Context, auth authFunc) { - mid, err := auth(ctx) - // no error happened and mid is valid - if err == nil && mid > 0 { - setMid(ctx, mid) - return - } - - ec := ecode.Cause(err) - if ecode.Equal(ec, ecode.Unauthorized) { - ctx.JSON(nil, ec) - ctx.Abort() - return - } -} - -// set mid into context -// NOTE: This method is not thread safe. -func setMid(ctx *bm.Context, mid int64) { - ctx.Set(metadata.Mid, mid) - if md, ok := metadata.FromContext(ctx); ok { - md[metadata.Mid] = mid - return - } -} diff --git a/example/blademaster/middleware/auth/example_test.go b/example/blademaster/middleware/auth/example_test.go deleted file mode 100644 index d77d34494..000000000 --- a/example/blademaster/middleware/auth/example_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package auth_test - -import ( - "fmt" - - "github.com/go-kratos/kratos/example/blademaster/middleware/auth" - bm "github.com/go-kratos/kratos/pkg/net/http/blademaster" - "github.com/go-kratos/kratos/pkg/net/metadata" -) - -// This example create a identify middleware instance and attach to several path, -// it will validate request by specified policy and put extra information into context. e.g., `mid`. -// It provides additional handler functions to provide the identification for your business handler. -func Example() { - myHandler := func(ctx *bm.Context) { - mid := metadata.Int64(ctx, metadata.Mid) - ctx.JSON(fmt.Sprintf("%d", mid), nil) - } - - authn := auth.New(&auth.Config{ - DisableCSRF: false, - }) - - e := bm.DefaultServer(nil) - - // mark `/user` path as User policy - e.GET("/user", authn.User, myHandler) - // mark `/mobile` path as UserMobile policy - e.GET("/mobile", authn.UserMobile, myHandler) - // mark `/web` path as UserWeb policy - e.GET("/web", authn.UserWeb, myHandler) - // mark `/guest` path as Guest policy - e.GET("/guest", authn.Guest, myHandler) - - o := e.Group("/owner", authn.User) - o.GET("/info", myHandler) - o.POST("/modify", myHandler) - - e.Start() -} diff --git a/example/protobuf/api.bm.go b/example/protobuf/api.bm.go deleted file mode 100644 index ebe6252ec..000000000 --- a/example/protobuf/api.bm.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by protoc-gen-bm v0.1, DO NOT EDIT. -// source: api.proto - -package api - -import ( - "context" - - bm "github.com/go-kratos/kratos/pkg/net/http/blademaster" - "github.com/go-kratos/kratos/pkg/net/http/blademaster/binding" -) -import google_protobuf1 "github.com/golang/protobuf/ptypes/empty" - -// to suppressed 'imported but not used warning' -var _ *bm.Context -var _ context.Context -var _ binding.StructValidator - -var PathUserInfo = "/user.api.User/Info" -var PathUserCard = "/user.api.User/Card" - -// UserBMServer is the server API for User service. -type UserBMServer interface { - Info(ctx context.Context, req *UserReq) (resp *InfoReply, err error) - - Card(ctx context.Context, req *UserReq) (resp *google_protobuf1.Empty, err error) -} - -var UserSvc UserBMServer - -func userInfo(c *bm.Context) { - p := new(UserReq) - if err := c.BindWith(p, binding.Default(c.Request.Method, c.Request.Header.Get("Content-Type"))); err != nil { - return - } - resp, err := UserSvc.Info(c, p) - c.JSON(resp, err) -} - -func userCard(c *bm.Context) { - p := new(UserReq) - if err := c.BindWith(p, binding.Default(c.Request.Method, c.Request.Header.Get("Content-Type"))); err != nil { - return - } - resp, err := UserSvc.Card(c, p) - c.JSON(resp, err) -} - -// RegisterUserBMServer Register the blademaster route -func RegisterUserBMServer(e *bm.Engine, server UserBMServer) { - UserSvc = server - e.GET("/user.api.User/Info", userInfo) - e.GET("/user.api.User/Card", userCard) -} diff --git a/example/protobuf/api.ecode.go b/example/protobuf/api.ecode.go deleted file mode 100644 index 7563f36ab..000000000 --- a/example/protobuf/api.ecode.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by protoc-gen-ecode v0.1, DO NOT EDIT. -// source: api.proto - -package api - -import ( - "github.com/go-kratos/kratos/pkg/ecode" -) - -// to suppressed 'imported but not used warning' -var _ ecode.Codes - -// UserErrCode ecode -var ( - UserNotExist = ecode.New(-404) - UserUpdateNameFailed = ecode.New(10000) -) diff --git a/example/protobuf/api.pb.go b/example/protobuf/api.pb.go deleted file mode 100644 index 3cdf09b00..000000000 --- a/example/protobuf/api.pb.go +++ /dev/null @@ -1,1000 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: api.proto - -package api - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - io "io" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type UserErrCode int32 - -const ( - UserErrCode_OK UserErrCode = 0 - UserErrCode_UserNotExist UserErrCode = -404 - UserErrCode_UserUpdateNameFailed UserErrCode = 10000 -) - -var UserErrCode_name = map[int32]string{ - 0: "OK", - -404: "UserNotExist", - 10000: "UserUpdateNameFailed", -} - -var UserErrCode_value = map[string]int32{ - "OK": 0, - "UserNotExist": -404, - "UserUpdateNameFailed": 10000, -} - -func (x UserErrCode) String() string { - return proto.EnumName(UserErrCode_name, int32(x)) -} - -func (UserErrCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{0} -} - -type Info struct { - Mid int64 `protobuf:"varint,1,opt,name=mid,proto3" json:"mid"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name"` - Sex string `protobuf:"bytes,3,opt,name=sex,proto3" json:"sex"` - Face string `protobuf:"bytes,4,opt,name=face,proto3" json:"face"` - Sign string `protobuf:"bytes,5,opt,name=sign,proto3" json:"sign"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{0} -} -func (m *Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) -} -func (m *Info) XXX_Size() int { - return m.Size() -} -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) -} - -var xxx_messageInfo_Info proto.InternalMessageInfo - -func (m *Info) GetMid() int64 { - if m != nil { - return m.Mid - } - return 0 -} - -func (m *Info) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Info) GetSex() string { - if m != nil { - return m.Sex - } - return "" -} - -func (m *Info) GetFace() string { - if m != nil { - return m.Face - } - return "" -} - -func (m *Info) GetSign() string { - if m != nil { - return m.Sign - } - return "" -} - -type UserReq struct { - Mid int64 `protobuf:"varint,1,opt,name=mid,proto3" json:"mid,omitempty" validate:"gt=0,required"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserReq) Reset() { *m = UserReq{} } -func (m *UserReq) String() string { return proto.CompactTextString(m) } -func (*UserReq) ProtoMessage() {} -func (*UserReq) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{1} -} -func (m *UserReq) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserReq.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserReq.Merge(m, src) -} -func (m *UserReq) XXX_Size() int { - return m.Size() -} -func (m *UserReq) XXX_DiscardUnknown() { - xxx_messageInfo_UserReq.DiscardUnknown(m) -} - -var xxx_messageInfo_UserReq proto.InternalMessageInfo - -func (m *UserReq) GetMid() int64 { - if m != nil { - return m.Mid - } - return 0 -} - -type InfoReply struct { - Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InfoReply) Reset() { *m = InfoReply{} } -func (m *InfoReply) String() string { return proto.CompactTextString(m) } -func (*InfoReply) ProtoMessage() {} -func (*InfoReply) Descriptor() ([]byte, []int) { - return fileDescriptor_00212fb1f9d3bf1c, []int{2} -} -func (m *InfoReply) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoReply.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InfoReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoReply.Merge(m, src) -} -func (m *InfoReply) XXX_Size() int { - return m.Size() -} -func (m *InfoReply) XXX_DiscardUnknown() { - xxx_messageInfo_InfoReply.DiscardUnknown(m) -} - -var xxx_messageInfo_InfoReply proto.InternalMessageInfo - -func (m *InfoReply) GetInfo() *Info { - if m != nil { - return m.Info - } - return nil -} - -func init() { - proto.RegisterEnum("user.api.UserErrCode", UserErrCode_name, UserErrCode_value) - proto.RegisterType((*Info)(nil), "user.api.Info") - proto.RegisterType((*UserReq)(nil), "user.api.UserReq") - proto.RegisterType((*InfoReply)(nil), "user.api.InfoReply") -} - -func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } - -var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 366 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x51, 0x4d, 0x4b, 0xeb, 0x40, - 0x14, 0xed, 0x34, 0x79, 0xfd, 0x98, 0x3e, 0x1e, 0x7d, 0xf3, 0x9e, 0x90, 0x96, 0x92, 0x96, 0xac, - 0x8a, 0x68, 0x2a, 0x15, 0x04, 0x05, 0x37, 0x95, 0x0a, 0x52, 0xa8, 0x10, 0xe8, 0xc6, 0xdd, 0xb4, - 0x99, 0xc4, 0x81, 0x26, 0x93, 0xe6, 0x43, 0xea, 0xbf, 0x70, 0x25, 0xfe, 0x24, 0x97, 0xfe, 0x82, - 0x22, 0x75, 0x57, 0x5c, 0xb9, 0x16, 0x94, 0x7b, 0x5b, 0xa9, 0x38, 0x8b, 0xc3, 0x9c, 0x39, 0xf7, - 0x70, 0xcf, 0xbd, 0x43, 0xcb, 0x3c, 0x92, 0x76, 0x14, 0xab, 0x54, 0xb1, 0x52, 0x96, 0x88, 0xd8, - 0xe6, 0x91, 0xac, 0xef, 0xfb, 0x32, 0xbd, 0xce, 0xc6, 0xf6, 0x44, 0x05, 0x1d, 0x5f, 0xf9, 0xaa, - 0x83, 0x05, 0xe3, 0xcc, 0x43, 0x86, 0x04, 0x6f, 0x6b, 0xa3, 0x75, 0x4f, 0xa8, 0x7e, 0x11, 0x7a, - 0x8a, 0xd5, 0xa8, 0x16, 0x48, 0xd7, 0x20, 0x2d, 0xd2, 0xd6, 0x7a, 0xc5, 0xd5, 0xa2, 0x09, 0xd4, - 0x01, 0x60, 0x0d, 0xaa, 0x87, 0x3c, 0x10, 0x46, 0xbe, 0x45, 0xda, 0xe5, 0x5e, 0x69, 0xb5, 0x68, - 0x22, 0x77, 0x10, 0xc1, 0x98, 0x88, 0xb9, 0xa1, 0xa1, 0x88, 0xc6, 0x44, 0xcc, 0x1d, 0x00, 0x30, - 0x7a, 0x7c, 0x22, 0x0c, 0x7d, 0x6b, 0x04, 0xee, 0x20, 0x82, 0x9a, 0x48, 0x3f, 0x34, 0x7e, 0x6d, - 0x55, 0xe0, 0x0e, 0xa2, 0x75, 0x4c, 0x8b, 0xa3, 0x44, 0xc4, 0x8e, 0x98, 0x31, 0xfb, 0x7b, 0xb4, - 0xc6, 0xdb, 0xa2, 0x69, 0xdc, 0xf0, 0xa9, 0x74, 0x79, 0x2a, 0x4e, 0x2c, 0x3f, 0x3d, 0x3d, 0xd8, - 0x8b, 0xc5, 0x2c, 0x93, 0xb1, 0x70, 0x2d, 0xcc, 0x6b, 0x75, 0x68, 0x19, 0x46, 0x72, 0x44, 0x34, - 0xbd, 0x65, 0x16, 0xd5, 0x65, 0xe8, 0x29, 0x74, 0x57, 0xba, 0x7f, 0xec, 0xaf, 0x45, 0xd9, 0x58, - 0x82, 0xda, 0xee, 0x80, 0x56, 0xa0, 0x57, 0x3f, 0x8e, 0xcf, 0x94, 0x2b, 0x58, 0x81, 0xe6, 0x2f, - 0x07, 0xd5, 0x1c, 0xab, 0xd1, 0xdf, 0xf0, 0x3c, 0x54, 0x69, 0x7f, 0x2e, 0x93, 0xb4, 0xfa, 0xfa, - 0xfe, 0xb1, 0x3e, 0x84, 0xd5, 0xe8, 0x7f, 0x90, 0x46, 0x11, 0xa4, 0x18, 0xf2, 0x40, 0x9c, 0x73, - 0x39, 0x15, 0x6e, 0xf5, 0x6e, 0xd8, 0x3d, 0xa2, 0x3a, 0x48, 0xcc, 0xde, 0x2c, 0xf6, 0xef, 0xb6, - 0xe5, 0x66, 0xa0, 0xfa, 0xbf, 0x1f, 0x29, 0x20, 0x68, 0x6f, 0xe7, 0x71, 0x69, 0x92, 0xa7, 0xa5, - 0x49, 0x9e, 0x97, 0x26, 0x79, 0x78, 0x31, 0x73, 0x57, 0x1a, 0x8f, 0xe4, 0xb8, 0x80, 0xff, 0x74, - 0xf8, 0x19, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xa7, 0xb7, 0x22, 0xed, 0x01, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// UserClient is the client API for User service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type UserClient interface { - Info(ctx context.Context, in *UserReq, opts ...grpc.CallOption) (*InfoReply, error) -} - -type userClient struct { - cc *grpc.ClientConn -} - -func NewUserClient(cc *grpc.ClientConn) UserClient { - return &userClient{cc} -} - -func (c *userClient) Info(ctx context.Context, in *UserReq, opts ...grpc.CallOption) (*InfoReply, error) { - out := new(InfoReply) - err := c.cc.Invoke(ctx, "/user.api.User/Info", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// UserServer is the server API for User service. -type UserServer interface { - Info(context.Context, *UserReq) (*InfoReply, error) -} - -func RegisterUserServer(s *grpc.Server, srv UserServer) { - s.RegisterService(&_User_serviceDesc, srv) -} - -func _User_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(UserServer).Info(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/user.api.User/Info", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(UserServer).Info(ctx, req.(*UserReq)) - } - return interceptor(ctx, in, info, handler) -} - -var _User_serviceDesc = grpc.ServiceDesc{ - ServiceName: "user.api.User", - HandlerType: (*UserServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Info", - Handler: _User_Info_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", -} - -func (m *Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Info) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Mid != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintApi(dAtA, i, uint64(m.Mid)) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Sex) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Sex))) - i += copy(dAtA[i:], m.Sex) - } - if len(m.Face) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Face))) - i += copy(dAtA[i:], m.Face) - } - if len(m.Sign) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintApi(dAtA, i, uint64(len(m.Sign))) - i += copy(dAtA[i:], m.Sign) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *UserReq) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserReq) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Mid != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintApi(dAtA, i, uint64(m.Mid)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *InfoReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InfoReply) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Info != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintApi(dAtA, i, uint64(m.Info.Size())) - n1, err := m.Info.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Mid != 0 { - n += 1 + sovApi(uint64(m.Mid)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Sex) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Face) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.Sign) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UserReq) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Mid != 0 { - n += 1 + sovApi(uint64(m.Mid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InfoReply) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.Size() - n += 1 + l + sovApi(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovApi(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mid", wireType) - } - m.Mid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mid |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sex = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Face", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Face = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sign", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sign = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserReq) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserReq: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserReq: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mid", wireType) - } - m.Mid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mid |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &Info{} - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipApi(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") -) diff --git a/example/protobuf/api.proto b/example/protobuf/api.proto deleted file mode 100644 index 23f65d47d..000000000 --- a/example/protobuf/api.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package user.api; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "api"; - -enum UserErrCode { - OK = 0; - UserNotExist = -404; - UserUpdateNameFailed = 10000; -} - -message Info { - int64 mid = 1 [(gogoproto.jsontag) = "mid"]; - string name = 2 [(gogoproto.jsontag) = "name"]; - string sex = 3 [(gogoproto.jsontag) = "sex"]; - string face = 4 [(gogoproto.jsontag) = "face"]; - string sign = 5 [(gogoproto.jsontag) = "sign"]; -} - -message UserReq { - int64 mid = 1 [(gogoproto.moretags) = "validate:\"gt=0,required\""]; -} - -message InfoReply { - Info info = 1; -} - -service User { - rpc Info(UserReq) returns (InfoReply); - rpc Card(UserReq) returns (google.protobuf.Empty); -} diff --git a/example/protobuf/api.swagger.json b/example/protobuf/api.swagger.json deleted file mode 100644 index 32266a239..000000000 --- a/example/protobuf/api.swagger.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "title": "api.proto", - "version": "" - }, - "schemes": [ - "http", - "https" - ], - "consumes": [ - "application/json", - "multipart/form-data" - ], - "produces": [ - "application/json" - ], - "paths": { - "/user.api.User/Info": { - "get": { - "summary": "/user.api.User/Info", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "type": "object", - "properties": { - "code": { - "type": "integer" - }, - "message": { - "type": "string" - }, - "data": { - "$ref": "#/definitions/.user.api.InfoReply" - } - } - } - } - }, - "parameters": [ - { - "name": "mid", - "in": "query", - "required": true, - "type": "integer" - } - ], - "tags": [ - "user.api.User" - ] - } - } - }, - "definitions": { - ".user.api.Info": { - "type": "object", - "properties": { - "mid": { - "type": "integer" - }, - "name": { - "type": "string" - }, - "sex": { - "type": "string" - }, - "face": { - "type": "string" - }, - "sign": { - "type": "string" - } - } - }, - ".user.api.InfoReply": { - "type": "object", - "properties": { - "info": { - "$ref": "#/definitions/.user.api.Info" - } - } - }, - ".user.api.UserReq": { - "type": "object", - "properties": { - "mid": { - "type": "integer" - } - }, - "required": [ - "mid" - ] - } - } -} \ No newline at end of file diff --git a/example/protobuf/gen.sh b/example/protobuf/gen.sh deleted file mode 100644 index 96984776c..000000000 --- a/example/protobuf/gen.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -kratos tool protoc api.proto diff --git a/go.mod b/go.mod index ce4196b74..9880ce6c1 100644 --- a/go.mod +++ b/go.mod @@ -1,70 +1,16 @@ -module github.com/go-kratos/kratos +module github.com/go-kratos/kratos/v2 -go 1.13 +go 1.15 require ( - github.com/BurntSushi/toml v0.3.1 - github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect - github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect - github.com/aristanetworks/goarista v0.0.0-20190912214011-b54698eaaca6 // indirect - github.com/coreos/go-semver v0.3.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect - github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect - github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect - github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect - github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/fatih/color v1.10.0 - github.com/fsnotify/fsnotify v1.4.7 - github.com/go-ole/go-ole v1.2.4 // indirect - github.com/go-playground/locales v0.12.1 // indirect - github.com/go-playground/universal-translator v0.16.0 // indirect - github.com/go-sql-driver/mysql v1.4.1 - github.com/go-zookeeper/zk v1.0.1 - github.com/gobuffalo/packr/v2 v2.7.1 - github.com/gogo/protobuf v1.3.1 - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - github.com/golang/mock v1.3.1 // indirect - github.com/golang/protobuf v1.3.5 - github.com/google/uuid v1.1.1 // indirect - github.com/gorilla/websocket v1.4.2 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.14.3 // indirect - github.com/leodido/go-urn v1.1.0 // indirect - github.com/montanaflynn/stats v0.5.0 - github.com/opentracing/opentracing-go v1.1.0 - github.com/openzipkin/zipkin-go v0.2.1 - github.com/otokaze/mock v0.0.0-20190125081256-8282b7a7c7c3 - github.com/philchia/agollo/v4 v4.1.1 - github.com/pkg/errors v0.8.1 - github.com/prometheus/client_golang v1.5.1 - github.com/prometheus/procfs v0.0.11 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect - github.com/rogpeppe/go-internal v1.5.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect - github.com/shirou/gopsutil v2.19.11+incompatible - github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 - github.com/sirupsen/logrus v1.5.0 - github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.6.1 - github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc // indirect - github.com/tsuna/gohbase v0.0.0-20190502052937-24ffed0537aa - github.com/uber/jaeger-client-go v2.25.0+incompatible - github.com/uber/jaeger-lib v2.4.0+incompatible // indirect - github.com/urfave/cli/v2 v2.3.0 - go.etcd.io/etcd v0.0.0-20200402134248-51bdeb39e698 - go.uber.org/atomic v1.6.0 - golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 // indirect - golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e - golang.org/x/sys v0.0.0-20201202213521-69691e467435 // indirect - golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect - golang.org/x/tools v0.0.0-20191105231337-689d0f08e67a - google.golang.org/appengine v1.6.1 // indirect - google.golang.org/genproto v0.0.0-20200402124713-8ff61da6d932 - google.golang.org/grpc v1.29.1 - gopkg.in/go-playground/assert.v1 v1.2.1 // indirect - gopkg.in/go-playground/validator.v9 v9.29.1 - gopkg.in/yaml.v2 v2.4.0 - sigs.k8s.io/yaml v1.2.0 // indirect + github.com/fsnotify/fsnotify v1.4.9 + github.com/golang/protobuf v1.4.3 + github.com/google/uuid v1.1.2 + github.com/gorilla/mux v1.8.0 + github.com/imdario/mergo v0.3.6 + github.com/opentracing/opentracing-go v1.2.0 + golang.org/x/sync v0.0.0-20190423024810-112230192c58 + google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f + google.golang.org/grpc v1.35.0 + google.golang.org/protobuf v1.25.0 ) diff --git a/go.sum b/go.sum index c720fc096..f4c905468 100644 --- a/go.sum +++ b/go.sum @@ -1,535 +1,108 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= -github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/aristanetworks/fsnotify v1.4.2/go.mod h1:D/rtu7LpjYM8tRJphJ0hUBYpjai8SfX+aSNsWDTq/Ks= -github.com/aristanetworks/glog v0.0.0-20180419172825-c15b03b3054f/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= -github.com/aristanetworks/goarista v0.0.0-20190912214011-b54698eaaca6 h1:6bZNnQcA2fkzH9AhZXbp2nDqbWa4bBqFeUb70Zq1HBM= -github.com/aristanetworks/goarista v0.0.0-20190912214011-b54698eaaca6/go.mod h1:Z4RTxGAuYhPzcq8+EdRM+R8M48Ssle2TsWtwRKa+vns= -github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8= -github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM= -github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc= -github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM= -github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= -github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-zookeeper/zk v1.0.1 h1:LmXNmSnkNsNKai+aDu6sHRr8ZJzIrHJo8z8Z4sm8cT8= -github.com/go-zookeeper/zk v1.0.1/go.mod h1:gpJdHazfkmlg4V0rt0vYeHYJHSL8hHFwV0qOd+HRTJE= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.1 h1:OQl5ys5MBea7OGCdvPbBJWRgnhC/fGona6QKfvFeau8= -github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= -github.com/gobuffalo/logger v1.0.1 h1:ZEgyRGgAm4ZAhAO45YXMs5Fp+bzGLESFewzAVBMKuTg= -github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= -github.com/gobuffalo/packd v0.3.0 h1:eMwymTkA1uXsqxS0Tpoop3Lc0u3kTfiMBE6nKtQU4g4= -github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packr/v2 v2.7.1 h1:n3CIW5T17T8v4GGK5sWXLVWJhCz7b5aNLSxW6gYim4o= -github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY= -github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20190809212627-fc22c7df067e/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/reedsolomon v1.9.2/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8= -github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/montanaflynn/stats v0.5.0 h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk= -github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= -github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.2.1 h1:noL5/5Uf1HpVl3wNsfkZhIKbSWCVi5jgqkONNx8PXcA= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/otokaze/mock v0.0.0-20190125081256-8282b7a7c7c3 h1:zjmNboC3QFuMdJSaZJ7Qvi3HUxWXPdj7wb3rc4jH5HI= -github.com/otokaze/mock v0.0.0-20190125081256-8282b7a7c7c3/go.mod h1:pLR8n2aimFxvvDJ6n8JuQWthMGezCYMjuhlaTjPTZf0= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philchia/agollo/v4 v4.1.1 h1:6AFFJ8+J1Ru5SPclUyMWLUmd62Ak/1fqZrEthiL6UeE= -github.com/philchia/agollo/v4 v4.1.1/go.mod h1:SBdQmfqqu/XCWJ1MDzYcCL3X+p3VJ+uQBy0nxxqjexg= -github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.4.0 h1:LUa41nrWTQNGhzdsZ5lTnkwbNjj6rXTdazA1cSdjkOY= -github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.5.0 h1:Usqs0/lDK/NqTkvrmKSwA/3XkZAs7ZAW/eLeQ2MVBTw= -github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/shirou/gopsutil v2.19.11+incompatible h1:lJHR0foqAjI4exXqWsU3DbH7bX1xvdhGdnXTIARA9W4= -github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM= -github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= -github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU= -github.com/templexxx/xor v0.0.0-20181023030647-4e92f724b73b/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4= -github.com/tjfoc/gmsm v1.0.1/go.mod h1:XxO4hdhhrzAd+G4CjDqaOkd0hUzmtPR/d3EiBBMn/wc= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc h1:yUaosFVTJwnltaHbSNC3i82I92quFs+OFPRl8kNMVwo= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tsuna/gohbase v0.0.0-20190502052937-24ffed0537aa h1:V/ABqiqsgqmpoIcLDSpJ1KqPfbxRmkcfET5+BRy9ctM= -github.com/tsuna/gohbase v0.0.0-20190502052937-24ffed0537aa/go.mod h1:3HfLQly3YNLGxNv/2YOfmz30vcjG9hbuME1GpxoLlGs= -github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= -github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ= -github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= -github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/xtaci/kcp-go v5.4.5+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE= -github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= -go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20200402134248-51bdeb39e698 h1:jWtjCJX1qxhHISBMLRztWwR+EXkI7MJAF2HjHAE/x/I= -go.etcd.io/etcd v0.0.0-20200402134248-51bdeb39e698/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2 h1:4dVFTC832rPn4pomLSz1vA+are2+dU19w1H8OngV7nc= -golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8 h1:41hwlulw1prEMBxLQSlMSux1zxJf07B3WPsdjJlKZxE= -golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435 h1:25AvDqqB9PrNqj1FLf2/70I4W0L19qqoaFq3gjNwbKk= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b h1:mSUCVIwDx4hfXJfWsOPfdzEHxzb2Xjl6BQ8YgPnazQA= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190912185636-87d9f09c5d89 h1:WiVZGyzQN7gPNLRkkpsNX3jC0Jx5j9GxadCZW/8eXw0= -golang.org/x/tools v0.0.0-20190912185636-87d9f09c5d89/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3 h1:2AmBLzhAfXj+2HCW09VCkJtHIYgHTIPcTeYqgP7Bwt0= -golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191105231337-689d0f08e67a h1:RzzIfXstYPS78k0QViPGpDcTlV+QuYrbxVmsxDHdxTs= -golang.org/x/tools v0.0.0-20191105231337-689d0f08e67a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20200402124713-8ff61da6d932 h1:aw1IXx+GKsPxp8MaZuDaKwNdOno9liI4TElk87LJFAo= -google.golang.org/genproto v0.0.0-20200402124713-8ff61da6d932/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f h1:izedQ6yVIc5mZsRuXzmSreCOlzI0lCU1HpG8yEdMiKw= +google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= +google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvRQyEAKbw1xc= -gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= -gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/README.md b/internal/README.md new file mode 100644 index 000000000..a57dcfad0 --- /dev/null +++ b/internal/README.md @@ -0,0 +1 @@ +# internal diff --git a/internal/host/host.go b/internal/host/host.go new file mode 100644 index 000000000..3ec498e05 --- /dev/null +++ b/internal/host/host.go @@ -0,0 +1,72 @@ +package host + +import ( + "fmt" + "net" + "strconv" +) + +var ( + privateAddrs = []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "100.64.0.0/10", "fd00::/8"} +) + +func isPrivateIP(addr string) bool { + ipAddr := net.ParseIP(addr) + for _, privateAddr := range privateAddrs { + if _, priv, err := net.ParseCIDR(privateAddr); err == nil { + if priv.Contains(ipAddr) { + return true + } + } + } + return false +} + +// Port return a real port. +func Port(lis net.Listener) (int, bool) { + if addr, ok := lis.Addr().(*net.TCPAddr); ok { + return addr.Port, true + } + return 0, false +} + +// Extract returns a private addr and port. +func Extract(hostport string, lis net.Listener) (string, error) { + addr, port, err := net.SplitHostPort(hostport) + if err != nil { + return "", err + } + if lis != nil { + if p, ok := Port(lis); ok { + port = strconv.Itoa(p) + } + } + if len(addr) > 0 && (addr != "0.0.0.0" && addr != "[::]" && addr != "::") { + return net.JoinHostPort(addr, port), nil + } + ifaces, err := net.Interfaces() + if err != nil { + return "", fmt.Errorf("Failed to get net interfaces: %v", err) + } + for _, iface := range ifaces { + addrs, err := iface.Addrs() + if err != nil { + continue + } + for _, rawAddr := range addrs { + var ip net.IP + switch addr := rawAddr.(type) { + case *net.IPAddr: + ip = addr.IP + case *net.IPNet: + ip = addr.IP + default: + continue + } + if isPrivateIP(ip.String()) { + return net.JoinHostPort(ip.String(), port), nil + } + } + } + return "", nil +} diff --git a/internal/host/host_test.go b/internal/host/host_test.go new file mode 100644 index 000000000..900f3ad0d --- /dev/null +++ b/internal/host/host_test.go @@ -0,0 +1,63 @@ +package host + +import ( + "net" + "testing" +) + +func TestPrivateIP(t *testing.T) { + tests := []struct { + addr string + expect bool + }{ + {"10.1.0.1", true}, + {"172.16.0.1", true}, + {"192.168.1.1", true}, + {"8.8.8.8", false}, + } + for _, test := range tests { + t.Run(test.addr, func(t *testing.T) { + res := isPrivateIP(test.addr) + if res != test.expect { + t.Fatalf("expected %t got %t", test.expect, res) + } + }) + } +} + +func TestExtract(t *testing.T) { + tests := []struct { + addr string + expect string + }{ + {"127.0.0.1:80", "127.0.0.1:80"}, + {"10.0.0.1:80", "10.0.0.1:80"}, + {"172.16.0.1:80", "172.16.0.1:80"}, + {"192.168.1.1:80", "192.168.1.1:80"}, + {"0.0.0.0:80", ""}, + {"[::]:80", ""}, + {":80", ""}, + } + for _, test := range tests { + t.Run(test.addr, func(t *testing.T) { + res, err := Extract(test.addr, nil) + if err != nil { + t.Fatal(err) + } + if res != test.expect && (test.expect == "" && test.addr == test.expect) { + t.Fatalf("expected %s got %s", test.expect, res) + } + }) + } +} + +func TestPort(t *testing.T) { + lis, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + port, ok := Port(lis) + if !ok || port == 0 { + t.Fatalf("expected: %s got %d", lis.Addr().String(), port) + } +} diff --git a/log/README.md b/log/README.md new file mode 100644 index 000000000..80758192e --- /dev/null +++ b/log/README.md @@ -0,0 +1,15 @@ +# Log + +## Usage + +### Structured logging + +``` +logger := stdlog.NewLogger(stdlog.Writer(os.Stdout)) +log := log.NewHelper("module_name", logger) +// Levels +log.Info("some log") +log.Infof("format %s", "some log") +log.Infow("field_name", "some log") +``` + diff --git a/log/helper.go b/log/helper.go new file mode 100644 index 000000000..24aa16cba --- /dev/null +++ b/log/helper.go @@ -0,0 +1,86 @@ +package log + +import ( + "fmt" +) + +var nop Logger = new(nopLogger) + +// Helper is a logger helper. +type Helper struct { + debug Logger + info Logger + warn Logger + err Logger +} + +// NewHelper new a logger helper. +func NewHelper(name string, logger Logger) *Helper { + log := With(logger, "module", name) + return &Helper{ + debug: Debug(log), + info: Info(log), + warn: Warn(log), + err: Error(log), + } +} + +// Debug logs a message at debug level. +func (h *Helper) Debug(a ...interface{}) { + h.debug.Print("message", fmt.Sprint(a...)) +} + +// Debugf logs a message at debug level. +func (h *Helper) Debugf(format string, a ...interface{}) { + h.debug.Print("message", fmt.Sprintf(format, a...)) +} + +// Debugw logs a message at debug level. +func (h *Helper) Debugw(kvpair ...interface{}) { + h.debug.Print(kvpair...) +} + +// Info logs a message at info level. +func (h *Helper) Info(a ...interface{}) { + h.info.Print("message", fmt.Sprint(a...)) +} + +// Infof logs a message at info level. +func (h *Helper) Infof(format string, a ...interface{}) { + h.info.Print("message", fmt.Sprintf(format, a...)) +} + +// Infow logs a message at info level. +func (h *Helper) Infow(kvpair ...interface{}) { + h.info.Print(kvpair...) +} + +// Warn logs a message at warn level. +func (h *Helper) Warn(a ...interface{}) { + h.warn.Print("message", fmt.Sprint(a...)) +} + +// Warnf logs a message at warnf level. +func (h *Helper) Warnf(format string, a ...interface{}) { + h.warn.Print("message", fmt.Sprintf(format, a...)) +} + +// Warnw logs a message at warnf level. +func (h *Helper) Warnw(kvpair ...interface{}) { + h.warn.Print(kvpair...) +} + +// Error logs a message at error level. +func (h *Helper) Error(a ...interface{}) { + h.err.Print("message", fmt.Sprint(a...)) +} + +// Errorf logs a message at error level. +func (h *Helper) Errorf(format string, a ...interface{}) { + h.err.Print("message", fmt.Sprintf(format, a...)) +} + +// Errorw logs a message at error level. +func (h *Helper) Errorw(kvpair ...interface{}) { + h.err.Print(kvpair...) +} diff --git a/log/helper_test.go b/log/helper_test.go new file mode 100644 index 000000000..51c45ad41 --- /dev/null +++ b/log/helper_test.go @@ -0,0 +1,20 @@ +package log + +import ( + "testing" +) + +func TestHelper(t *testing.T) { + log := NewHelper("test", DefaultLogger) + log.Debug("test debug") + log.Debugf("test %s", "debug") + log.Debugw("log", "test debug") +} + +func TestHelperLevel(t *testing.T) { + log := NewHelper("test", DefaultLogger) + log.Debug("test debug") + log.Info("test info") + log.Warn("test warn") + log.Error("test error") +} diff --git a/log/level.go b/log/level.go new file mode 100644 index 000000000..dcd26fef9 --- /dev/null +++ b/log/level.go @@ -0,0 +1,40 @@ +package log + +// Level is a logger level. +type Level int8 + +const ( + // LevelDebug is logger debug level. + LevelDebug Level = iota + // LevelInfo is logger info level. + LevelInfo + // LevelWarn is logger warn level. + LevelWarn + // LevelError is logger error level. + LevelError +) + +const ( + // LevelKey is logger level key. + LevelKey = "level" +) + +// Enabled compare whether the logging level is enabled. +func (l Level) Enabled(lv Level) bool { + return lv >= l +} + +func (l Level) String() string { + switch l { + case LevelDebug: + return "DEBUG" + case LevelInfo: + return "INFO" + case LevelWarn: + return "WARN" + case LevelError: + return "ERROR" + default: + return "" + } +} diff --git a/log/log.go b/log/log.go new file mode 100644 index 000000000..3884b6b54 --- /dev/null +++ b/log/log.go @@ -0,0 +1,48 @@ +package log + +var ( + // DefaultLogger is default logger. + DefaultLogger Logger = NewStdLogger() +) + +// Logger is a logger interface. +type Logger interface { + Print(pairs ...interface{}) +} + +type logger struct { + log Logger + pairs []interface{} +} + +func (l *logger) Print(pairs ...interface{}) { + l.log.Print(append(pairs, l.pairs...)...) +} + +// With with logger kv pairs. +func With(log Logger, pairs ...interface{}) Logger { + if len(pairs) == 0 { + return log + } + return &logger{log: log, pairs: pairs} +} + +// Debug returns a debug logger. +func Debug(log Logger) Logger { + return With(log, LevelKey, LevelDebug) +} + +// Info returns a info logger. +func Info(log Logger) Logger { + return With(log, LevelKey, LevelInfo) +} + +// Warn return a warn logger. +func Warn(log Logger) Logger { + return With(log, LevelKey, LevelWarn) +} + +// Error returns a error logger. +func Error(log Logger) Logger { + return With(log, LevelKey, LevelError) +} diff --git a/log/log_test.go b/log/log_test.go new file mode 100644 index 000000000..d99fc121c --- /dev/null +++ b/log/log_test.go @@ -0,0 +1,13 @@ +package log + +import ( + "testing" +) + +func TestLogger(t *testing.T) { + logger := NewStdLogger() + Debug(logger).Print("log", "test debug") + Info(logger).Print("log", "test info") + Warn(logger).Print("log", "test warn") + Error(logger).Print("log", "test error") +} diff --git a/log/nop.go b/log/nop.go new file mode 100644 index 000000000..bca1f0a53 --- /dev/null +++ b/log/nop.go @@ -0,0 +1,5 @@ +package log + +type nopLogger struct{} + +func (l *nopLogger) Print(kvpair ...interface{}) {} diff --git a/log/std.go b/log/std.go new file mode 100644 index 000000000..e5eaf6ffb --- /dev/null +++ b/log/std.go @@ -0,0 +1,45 @@ +package log + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" +) + +var _ Logger = (*stdLogger)(nil) + +type stdLogger struct { + log *log.Logger + pool *sync.Pool +} + +// NewStdLogger new a std logger with options. +func NewStdLogger() Logger { + return &stdLogger{ + log: log.New(os.Stderr, "", log.LstdFlags), + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +// Print print the kv pairs log. +func (s *stdLogger) Print(kvpair ...interface{}) { + if len(kvpair) == 0 { + return + } + if len(kvpair)%2 != 0 { + kvpair = append(kvpair, "") + } + buf := s.pool.Get().(*bytes.Buffer) + for i := 0; i < len(kvpair); i += 2 { + fmt.Fprintf(buf, "%s=%v ", kvpair[i], kvpair[i+1]) + } + s.log.Println(buf.String()) + buf.Reset() + s.pool.Put(buf) +} diff --git a/log/std_test.go b/log/std_test.go new file mode 100644 index 000000000..60975856a --- /dev/null +++ b/log/std_test.go @@ -0,0 +1,12 @@ +package log + +import "testing" + +func TestFmtLogger(t *testing.T) { + logger := NewStdLogger() + + Debug(logger).Print("log", "test debug") + Info(logger).Print("log", "test info") + Warn(logger).Print("log", "test warn") + Error(logger).Print("log", "test error") +} diff --git a/metrics/README.md b/metrics/README.md new file mode 100644 index 000000000..f36beb4ed --- /dev/null +++ b/metrics/README.md @@ -0,0 +1,2 @@ +# Metrics +* [Prometheus](https://github.com/go-kratos/prometheus) diff --git a/metrics/metrics.go b/metrics/metrics.go new file mode 100644 index 000000000..4e221eb7a --- /dev/null +++ b/metrics/metrics.go @@ -0,0 +1,22 @@ +package metrics + +// Counter is metrics counter. +type Counter interface { + With(lvs ...string) Counter + Inc() + Add(delta float64) +} + +// Gauge is metrics gauge. +type Gauge interface { + With(lvs ...string) Gauge + Set(value float64) + Add(delta float64) + Sub(delta float64) +} + +// Observer is metrics observer. +type Observer interface { + With(lvs ...string) Observer + Observe(float64) +} diff --git a/middleware/logging/logging.go b/middleware/logging/logging.go new file mode 100644 index 000000000..2486b0603 --- /dev/null +++ b/middleware/logging/logging.go @@ -0,0 +1,118 @@ +package logging + +import ( + "context" + + "github.com/go-kratos/kratos/v2/errors" + "github.com/go-kratos/kratos/v2/log" + "github.com/go-kratos/kratos/v2/middleware" + "github.com/go-kratos/kratos/v2/transport/grpc" + "github.com/go-kratos/kratos/v2/transport/http" +) + +// Option is HTTP logging option. +type Option func(*options) + +type options struct { + logger log.Logger +} + +// WithLogger with middleware logger. +func WithLogger(logger log.Logger) Option { + return func(o *options) { + o.logger = logger + } +} + +// Server is an server logging middleware. +func Server(opts ...Option) middleware.Middleware { + options := options{ + logger: log.DefaultLogger, + } + for _, o := range opts { + o(&options) + } + log := log.NewHelper("middleware/logging", options.logger) + return func(handler middleware.Handler) middleware.Handler { + return func(ctx context.Context, req interface{}) (interface{}, error) { + var ( + path string + method string + ) + if info, ok := http.FromServerContext(ctx); ok { + path = info.Request.RequestURI + method = info.Request.Method + } else if info, ok := grpc.FromServerContext(ctx); ok { + path = info.FullMethod + method = "POST" + } + reply, err := handler(ctx, req) + if err != nil { + log.Errorw( + "kind", "server", + "path", path, + "method", method, + "code", errors.Code(err), + "error", err.Error(), + ) + return nil, err + } + log.Infow( + "kind", "server", + "path", path, + "method", method, + "code", 0, + ) + return reply, nil + } + } +} + +// Client is an client logging middleware. +func Client(opts ...Option) middleware.Middleware { + options := options{ + logger: log.DefaultLogger, + } + for _, o := range opts { + o(&options) + } + log := log.NewHelper("middleware/logging", options.logger) + return func(handler middleware.Handler) middleware.Handler { + return func(ctx context.Context, req interface{}) (interface{}, error) { + var ( + component string + path string + method string + ) + if info, ok := http.FromClientContext(ctx); ok { + component = "HTTP" + path = info.Request.RequestURI + method = info.Request.Method + } else if info, ok := grpc.FromClientContext(ctx); ok { + component = "gRPC" + path = info.FullMethod + method = "POST" + } + reply, err := handler(ctx, req) + if err != nil { + log.Errorw( + "kind", "client", + "component", component, + "path", path, + "method", method, + "code", errors.Code(err), + "error", err.Error(), + ) + return nil, err + } + log.Infow( + "kind", "client", + "component", component, + "path", path, + "method", method, + "code", 0, + ) + return reply, nil + } + } +} diff --git a/middleware/middleware.go b/middleware/middleware.go new file mode 100644 index 000000000..8d687c0f7 --- /dev/null +++ b/middleware/middleware.go @@ -0,0 +1,21 @@ +package middleware + +import ( + "context" +) + +// Handler defines the handler invoked by Middleware. +type Handler func(ctx context.Context, req interface{}) (interface{}, error) + +// Middleware is HTTP/gRPC transport middleware. +type Middleware func(Handler) Handler + +// Chain returns a Middleware that specifies the chained handler for endpoint. +func Chain(outer Middleware, others ...Middleware) Middleware { + return func(next Handler) Handler { + for i := len(others) - 1; i >= 0; i-- { + next = others[i](next) + } + return outer(next) + } +} diff --git a/middleware/recovery/recovery.go b/middleware/recovery/recovery.go new file mode 100644 index 000000000..940d2fc5d --- /dev/null +++ b/middleware/recovery/recovery.go @@ -0,0 +1,64 @@ +package recovery + +import ( + "context" + "runtime" + + "github.com/go-kratos/kratos/v2/errors" + "github.com/go-kratos/kratos/v2/log" + "github.com/go-kratos/kratos/v2/middleware" +) + +// HandlerFunc is recovery handler func. +type HandlerFunc func(ctx context.Context, req, err interface{}) error + +// Option is recovery option. +type Option func(*options) + +type options struct { + handler HandlerFunc + logger log.Logger +} + +// WithHandler with recovery handler. +func WithHandler(h HandlerFunc) Option { + return func(o *options) { + o.handler = h + } +} + +// WithLogger with recovery logger. +func WithLogger(logger log.Logger) Option { + return func(o *options) { + o.logger = logger + } +} + +// Recovery is a server middleware that recovers from any panics. +func Recovery(opts ...Option) middleware.Middleware { + options := options{ + logger: log.DefaultLogger, + handler: func(ctx context.Context, req, err interface{}) error { + return errors.Unknown("Unknown", "panic triggered: %v", err) + }, + } + for _, o := range opts { + o(&options) + } + log := log.NewHelper("middleware/recovery", log.DefaultLogger) + return func(handler middleware.Handler) middleware.Handler { + return func(ctx context.Context, req interface{}) (reply interface{}, err error) { + defer func() { + if rerr := recover(); rerr != nil { + buf := make([]byte, 64<<10) + n := runtime.Stack(buf, false) + buf = buf[:n] + log.Errorf("%v: %+v\n%s\n", rerr, req, buf) + + err = options.handler(ctx, req, rerr) + } + }() + return handler(ctx, req) + } + } +} diff --git a/middleware/status/status.go b/middleware/status/status.go new file mode 100644 index 000000000..29fb67e99 --- /dev/null +++ b/middleware/status/status.go @@ -0,0 +1,113 @@ +package status + +import ( + "context" + + "github.com/go-kratos/kratos/v2/errors" + "github.com/go-kratos/kratos/v2/middleware" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// HandlerFunc is middleware error handler. +type HandlerFunc func(error) error + +// Option is recovery option. +type Option func(*options) + +type options struct { + handler HandlerFunc +} + +// WithHandler with status handler. +func WithHandler(h HandlerFunc) Option { + return func(o *options) { + o.handler = h + } +} + +// Server is an error middleware. +func Server(opts ...Option) middleware.Middleware { + options := options{ + handler: errorEncode, + } + for _, o := range opts { + o(&options) + } + return func(handler middleware.Handler) middleware.Handler { + return func(ctx context.Context, req interface{}) (interface{}, error) { + reply, err := handler(ctx, req) + if err != nil { + return nil, options.handler(err) + } + return reply, nil + } + } +} + +// Client is an error middleware. +func Client(opts ...Option) middleware.Middleware { + options := options{ + handler: errorDecode, + } + for _, o := range opts { + o(&options) + } + return func(handler middleware.Handler) middleware.Handler { + return func(ctx context.Context, req interface{}) (interface{}, error) { + reply, err := handler(ctx, req) + if err != nil { + return nil, options.handler(err) + } + return reply, nil + } + } +} + +func errorEncode(err error) error { + se, ok := errors.FromError(err) + if !ok { + se = &errors.StatusError{ + Code: 2, + } + } + gs := status.Newf(codes.Code(se.Code), "%s: %s", se.Reason, se.Message) + details := []proto.Message{ + &errdetails.ErrorInfo{ + Reason: se.Reason, + Metadata: map[string]string{"message": se.Message}, + }, + } + for _, any := range se.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + continue + } + details = append(details, detail.Message) + } + gs, err = gs.WithDetails(details...) + if err != nil { + return err + } + return gs.Err() +} + +func errorDecode(err error) error { + gs := status.Convert(err) + se := &errors.StatusError{ + Code: int32(gs.Code()), + Details: gs.Proto().Details, + } + for _, detail := range gs.Details() { + switch d := detail.(type) { + case *errdetails.ErrorInfo: + se.Reason = d.Reason + se.Message = d.Metadata["message"] + return se + } + } + return se +} diff --git a/middleware/status/status_test.go b/middleware/status/status_test.go new file mode 100644 index 000000000..f6636bba9 --- /dev/null +++ b/middleware/status/status_test.go @@ -0,0 +1,21 @@ +package status + +import ( + "testing" + + "github.com/go-kratos/kratos/v2/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestErrEncoder(t *testing.T) { + err := errors.InvalidArgument("InvalidArgument", "format") + en := errorEncode(err) + if code := status.Code(en); code != codes.InvalidArgument { + t.Errorf("expected %d got %d", codes.InvalidArgument, code) + } + de := errorDecode(en) + if !errors.IsInvalidArgument(de) { + t.Errorf("expected %v got %v", err, de) + } +} diff --git a/middleware/tracing/tracing.go b/middleware/tracing/tracing.go new file mode 100644 index 000000000..bf4779ae6 --- /dev/null +++ b/middleware/tracing/tracing.go @@ -0,0 +1,128 @@ +package tracing + +import ( + "context" + + "github.com/go-kratos/kratos/v2/middleware" + "github.com/go-kratos/kratos/v2/transport/grpc" + "github.com/go-kratos/kratos/v2/transport/http" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "google.golang.org/grpc/metadata" +) + +// Option is tracing option. +type Option func(*options) + +type options struct { + tracer opentracing.Tracer +} + +// WithTracer sets a custom tracer to be used for this middleware, otherwise the opentracing.GlobalTracer is used. +func WithTracer(tracer opentracing.Tracer) Option { + return func(o *options) { + o.tracer = tracer + } +} + +// Server returns a new server middleware for OpenTracing. +func Server(opts ...Option) middleware.Middleware { + options := options{ + tracer: opentracing.GlobalTracer(), + } + for _, o := range opts { + o(&options) + } + return func(handler middleware.Handler) middleware.Handler { + return func(ctx context.Context, req interface{}) (reply interface{}, err error) { + var ( + component string + operation string + spanContext opentracing.SpanContext + ) + if info, ok := http.FromServerContext(ctx); ok { + // HTTP span + component = "HTTP" + operation = info.Request.RequestURI + spanContext, _ = options.tracer.Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(info.Request.Header), + ) + } else if info, ok := grpc.FromServerContext(ctx); ok { + // gRPC span + component = "gRPC" + operation = info.FullMethod + if md, ok := metadata.FromIncomingContext(ctx); ok { + spanContext, _ = options.tracer.Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(md), + ) + } + } + span := options.tracer.StartSpan( + operation, + ext.RPCServerOption(spanContext), + opentracing.Tag{Key: string(ext.Component), Value: component}, + ) + defer span.Finish() + if reply, err = handler(ctx, req); err != nil { + ext.Error.Set(span, true) + span.LogFields( + log.String("event", "error"), + log.String("message", err.Error()), + ) + } + return + } + } +} + +// Client returns a new client middleware for OpenTracing. +func Client(opts ...Option) middleware.Middleware { + options := options{} + for _, o := range opts { + o(&options) + } + return func(handler middleware.Handler) middleware.Handler { + return func(ctx context.Context, req interface{}) (reply interface{}, err error) { + var ( + operation string + parent opentracing.SpanContext + carrier opentracing.HTTPHeadersCarrier + ) + if span := opentracing.SpanFromContext(ctx); span != nil { + parent = span.Context() + } + if info, ok := http.FromClientContext(ctx); ok { + // HTTP span + operation = info.Request.RequestURI + carrier = opentracing.HTTPHeadersCarrier(info.Request.Header) + } else if info, ok := grpc.FromClientContext(ctx); ok { + // gRPC span + operation = info.FullMethod + if md, ok := metadata.FromOutgoingContext(ctx); ok { + carrier = opentracing.HTTPHeadersCarrier(md) + ctx = metadata.NewOutgoingContext(ctx, md) + } + } + span := options.tracer.StartSpan( + operation, + opentracing.ChildOf(parent), + ext.SpanKindRPCClient, + ) + defer span.Finish() + options.tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier) + ctx = opentracing.ContextWithSpan(ctx, span) + // send handler + if reply, err = handler(ctx, req); err != nil { + ext.Error.Set(span, true) + span.LogFields( + log.String("event", "error"), + log.String("message", err.Error()), + ) + } + return + } + } +} diff --git a/misc/stat/dashboard/README.md b/misc/stat/dashboard/README.md deleted file mode 100644 index f6826f55b..000000000 --- a/misc/stat/dashboard/README.md +++ /dev/null @@ -1,15 +0,0 @@ -#### dashboard - -> 监控模版,针对服务框架内的监控指标的UI展示。 - -##### Requirments - -- [Grafana](https://grafana.com) >= v6.1.4 -- [Prometheus](https://prometheus.io) >= 2.x - -##### Quick start - -1. 搭建grafana -2. 导入`prometheus.json`文件 -3. 修改对应的`Data source` -4. 保存 diff --git a/misc/stat/dashboard/prometheus.json b/misc/stat/dashboard/prometheus.json deleted file mode 100644 index d1529a8a7..000000000 --- a/misc/stat/dashboard/prometheus.json +++ /dev/null @@ -1,5917 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": 632, - "iteration": 1564558933838, - "links": [], - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(40, 237, 51, 0.89)", - "rgba(242, 18, 18, 0.97)" - ], - "datasource": "$datasource", - "decimals": 1, - "format": "s", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 4, - "x": 0, - "y": 0 - }, - "hideTimeOverride": false, - "id": 78, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "dsType": "influxdb", - "expr": "min(time() - process_start_time_seconds{instance=~\"$instance\"})", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": "", - "timeFrom": null, - "title": "启动时间", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 4, - "x": 4, - "y": 0 - }, - "id": 79, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "dsType": "influxdb", - "expr": "max(process_open_fds{instance=~\"$instance\"})", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": "5000", - "title": "最大FDs", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "$datasource", - "decimals": 2, - "format": "decbytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 4, - "x": 8, - "y": 0 - }, - "id": 81, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "dsType": "influxdb", - "expr": "max(process_virtual_memory_bytes{instance=~\"$instance\"})", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": "10240000000", - "title": "VM", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "$datasource", - "decimals": 2, - "format": "decbytes", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 4, - "x": 12, - "y": 0 - }, - "id": 82, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "dsType": "influxdb", - "expr": "max(process_resident_memory_bytes{instance=~\"$instance\"})", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": "1024000000", - "title": "RM", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "percentunit", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 4, - "x": 16, - "y": 0 - }, - "id": 16, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(irate(process_cpu_seconds_total{instance=~\"$instance\"}[5m]))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 60, - "tags": [] - } - ], - "thresholds": "1,10", - "timeFrom": null, - "timeShift": null, - "title": "系统 CPU", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": true, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "short", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 4, - "x": 20, - "y": 0 - }, - "id": 15, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(max(go_memstats_alloc_bytes{instance=~\"$instance\"}) by (instance))", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": "100000000", - "timeFrom": null, - "timeShift": null, - "title": "系统MEM", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": null, - "description": "网络的每秒包数量", - "editable": true, - "error": false, - "fill": 6, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 3 - }, - "id": 264, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideEmpty": false, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "Outbound pps", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "calculatedInterval": "2s", - "datasourceErrors": {}, - "errors": {}, - "expr": "irate(node_network_receive_packets_total{instance=\"$instance\", device!~\"^(lo|bond).*\"}[5m]) or irate(node_network_receive_packets{instance=\"$instance\", device!~\"^(lo|bond).*\"}[5m]) ", - "format": "time_series", - "hide": false, - "interval": "1m", - "intervalFactor": 1, - "legendFormat": "Inbound pps", - "metric": "", - "refId": "A", - "step": 120, - "target": "" - }, - { - "calculatedInterval": "2s", - "datasourceErrors": {}, - "errors": {}, - "expr": "irate(node_network_transmit_packets_total{instance_name=\"$instance\", device!~\"^(lo|bond).*\"}[5m]) or irate(node_network_transmit_packets{instance_name=\"$instance\", device!~\"^(lo|bond).*\"}[5m]) ", - "format": "time_series", - "hide": false, - "interval": "1m", - "intervalFactor": 1, - "legendFormat": "Outbound pps", - "metric": "", - "refId": "B", - "step": 120, - "target": "" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Network Packet Per Sec", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": null, - "description": "系统网络流量速率", - "editable": true, - "error": false, - "fill": 2, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 3 - }, - "id": 266, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideEmpty": false, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "Outbound", - "transform": "negative-Y" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "calculatedInterval": "2s", - "datasourceErrors": {}, - "errors": {}, - "expr": "irate(node_network_receive_bytes{instance=\"$instance\", device!~\"^(lo|bond).*\"}[5m]) * 8 or irate(node_network_receive_bytes_total{instance=\"$instance\", device!~\"^(lo|bond).*\"}[5m]) * 8", - "format": "time_series", - "interval": "$interval", - "intervalFactor": 1, - "legendFormat": "Inbound", - "metric": "", - "refId": "B", - "step": 300, - "target": "" - }, - { - "calculatedInterval": "2s", - "datasourceErrors": {}, - "errors": {}, - "expr": "irate(node_network_transmit_bytes{instance=\"$instance\", device!~\"^(lo|bond).*\"}[5m]) * 8 or irate(node_network_transmit_bytes_total{instance=\"$instance\", device!~\"^(lo|bond).*\"}[5m]) * 8", - "format": "time_series", - "interval": "$interval", - "intervalFactor": 1, - "legendFormat": "Outbound", - "metric": "", - "refId": "A", - "step": 300, - "target": "" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Network Traffic", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bps", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "bps", - "logBase": 1, - "max": null, - "min": 0, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 10 - }, - "id": 231, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 5, - "w": 8, - "x": 0, - "y": 4 - }, - "id": 208, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(go_goroutines{instance=~\"$instance\"}) by (instance)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "interval": "10s", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Rounine数量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 5, - "w": 8, - "x": 8, - "y": 4 - }, - "id": 210, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(go_memstats_heap_objects{instance=~\"$instance\"}) by (instance)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "interval": "10s", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "堆对象数", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 5, - "w": 8, - "x": 16, - "y": 4 - }, - "id": 212, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "go_gc_duration_seconds{quantile=\"1\", instance=~\"$instance\" }", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{instance}} ", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "GC耗时", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "title": "Runtime", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 11 - }, - "id": 200, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 10 - }, - "id": 178, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "total", - "sortDesc": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(http_server_requests_duration_ms_count{instance=~\"$instance\",path!~\"register|.*ping\"}[2m])) by (path)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "interval": "10s", - "intervalFactor": 2, - "legendFormat": "{{path}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP服务QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 10 - }, - "id": 41, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": false, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(http_server_requests_duration_ms_sum{instance=~\"$instance\",path!~\"register|.*ping\"}[5m])/ increase(http_server_requests_duration_ms_count{instance=~\"$instance\",path!~\"register|.*ping\"}[5m]) >0) by (path)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "{{path}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP服务平均耗时", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "breakPoint": "50%", - "cacheTimeout": null, - "combine": { - "label": "Others", - "threshold": 0 - }, - "datasource": "$datasource", - "fontSize": "80%", - "format": "short", - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 18 - }, - "id": 248, - "interval": null, - "legend": { - "percentage": true, - "show": true, - "values": true - }, - "legendType": "Right side", - "links": [], - "maxDataPoints": 3, - "nullPointMode": "connected", - "pieType": "pie", - "strokeWidth": 1, - "targets": [ - { - "expr": "sum(irate(http_server_requests_duration_ms_count{instance=~\"$instance\",caller=~\"$http_user\",path=~\"$http_method\",path!~\"register|.*ping\"}[5m])) by (caller,path)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 1, - "legendFormat": "{{caller}} => {{path}}", - "orderByTime": "ASC", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "title": "HTTP caller 占比", - "type": "grafana-piechart-panel", - "valueName": "current" - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 26 - }, - "id": 250, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "max", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(http_server_requests_code_total{app=\"$app\",code!~\"0\",path=~\"$http_method\",caller=~\"$http_user\",env=\"$env\"}[5m])) BY (app, job, path, code) / IGNORING(code) GROUP_LEFT() sum(irate(http_server_requests_code_total{app=\"$app\",path=~\"$http_method\",caller=~\"$http_user\",env=\"$env\"}[5m])) BY (app, job, path) * 100", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "interval": "30s", - "intervalFactor": 2, - "legendFormat": "{{path}} : {{code}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 60, - "tags": [] - }, - { - "expr": "sum(irate(http_server_requests_code_total{instance=~\"$instance\",code!~\"0\",path=~\"$http_method\",caller=~\"$http_user\",path!~\"register|.*ping\"}[5m])) BY (path, code)", - "format": "time_series", - "hide": true, - "intervalFactor": 1, - "refId": "B" - }, - { - "expr": "sum(irate(http_server_requests_code_total{instance=~\"$instance\",path=~\"$http_method\",caller=~\"$http_user\",path!~\"register|.*ping\"}[5m])) BY ( path)", - "format": "time_series", - "hide": true, - "intervalFactor": 1, - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": " HTTP服务错误率", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percent", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 26 - }, - "id": 42, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": false, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(http_server_requests_code_total{instance=~\"$instance\", code!=\"0\",path!~\"register|.*ping\"}[5m])) by (path, code)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "instant": false, - "interval": "60s", - "intervalFactor": 1, - "legendFormat": "{{path}} : {{code}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 20, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP服务错误返回码情况", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 35 - }, - "id": 252, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": false, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(irate(http_server_requests_duration_ms_bucket{instance=~\"$instance\",path=~\"$http_method\",caller=~\"$http_user\",path!~\"register|.*ping\"}[5m])) by (path,le))", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "pt99: {{path}}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "[$app] HTTP服务 99分位响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 35 - }, - "id": 254, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": false, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.90, sum(irate(http_server_requests_duration_ms_bucket{instance=~\"$instance\",path=~\"$http_method\",caller=~\"$http_user\",path!~\"register|.*ping\"}[5m])) by (path,le))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "pt90: {{path}}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "[$app] HTTP服务 90分位响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "title": "HTTP", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 12 - }, - "id": 220, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 11 - }, - "id": 45, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(grpc_server_requests_duration_ms_count{instance=~\"$instance\",method!~\".*Ping\"}[2m])) by (method)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "grpc {{method}}", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "GRPC服务QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 11 - }, - "id": 46, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(grpc_server_requests_duration_ms_sum{instance=~\"$instance\",method!~\".*Ping\"}[5m])/ increase(grpc_server_requests_duration_ms_count{instance=~\"$instance\"}[5m])>0 ) by (method)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "{{method}}", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "GRPC服务平均耗时", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "breakPoint": "50%", - "cacheTimeout": null, - "combine": { - "label": "Others", - "threshold": 0 - }, - "datasource": "$datasource", - "fontSize": "80%", - "format": "short", - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 18 - }, - "id": 256, - "interval": null, - "legend": { - "percentage": true, - "show": true, - "values": true - }, - "legendType": "Right side", - "links": [], - "maxDataPoints": 3, - "nullPointMode": "connected", - "pieType": "pie", - "strokeWidth": 1, - "targets": [ - { - "expr": "sum(irate(grpc_server_requests_duration_ms_count{instance=~\"$instance\",caller=~\"$grpc_caller\",method!~\".*Ping\"}[5m])) by (caller,method)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 1, - "legendFormat": "grpc {{caller}} => {{method}}", - "orderByTime": "ASC", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - } - ], - "title": "RPC caller 占比", - "type": "grafana-piechart-panel", - "valueName": "current" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 26 - }, - "id": 47, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(grpc_server_requests_code_total{instance=~\"$instance\", code!=\"0\",method!~\".*Ping\"}[5m])) by (method, code)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{method}} {{code}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "GRPC错误码返回情况", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 26 - }, - "id": 258, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(grpc_server_requests_code_total{instance=~\"$instance\",code!~\"0\",caller=~\"$grpc_caller\",method!~\".*Ping\"}[5m])) BY ( method, code) / IGNORING(code) GROUP_LEFT() sum(irate(grpc_server_requests_code_total{instance=~\"$instance\",caller=~\"$grpc_caller\",method!~\".*Ping\"}[5m])) BY ( method) * 100", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "grpc {{method}} : {{code}}", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "RPC服务错误率", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percent", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 35 - }, - "id": 260, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "max", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(irate(grpc_server_requests_duration_ms_bucket{instance=~\"$instance\",method!~\".*Ping\",caller=~\"$grpc_caller\"}[5m])) by (le, method))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "grpc: pt99: {{method}}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "[$app] RPC 99分位响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 35 - }, - "id": 262, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.90, sum(irate(grpc_server_requests_duration_ms_bucket{instance=~\"$instance\",method!~\".*Ping\",caller=~\"$grpc_caller\"}[5m])) by (le, method))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "grpc: pt90: {{method}}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "[$app] RPC 90分位响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "title": "gRPC", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 13 - }, - "id": 114, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 12 - }, - "id": 244, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(grpc_client_requests_duration_ms_count{instance=~\"$instance\",method!~\".*Ping\"}[5m])) by (method)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 1, - "legendFormat": "{{method}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "gRPC依赖组件QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": true, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 12 - }, - "id": 245, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(grpc_client_requests_duration_ms_sum{instance=~\"$instance\",method!~\".*Ping\"}[5m])/ increase(grpc_client_requests_duration_ms_count{instance=~\"$instance\",method!~\".*Ping\"}[5m]) > 0 ) by (method)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{method}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "gRPC依赖组件平均响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 12 - }, - "id": 232, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(grpc_client_requests_code_total{instance=~\"$instance\",method!~\".*Ping\",code!=\"0\"}[5m])) by (method,code)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{method}}: {{code}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "gRPC依赖组件错误量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 19 - }, - "id": 238, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(http_client_requests_duration_ms_count{instance=~\"$instance\",path!~\".*/discovery/.*\"}[5m])) by (path)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "{{path}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP依赖组件QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 19 - }, - "id": 239, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(http_client_requests_duration_ms_sum{instance=~\"$instance\",path!~\".*/discovery/.*\"}[5m])/ increase(http_client_requests_duration_ms_count{instance=~\"$instance\",path!~\".*/discovery/.*\"}[5m]) > 0 ) by (path)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{path}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP依赖组件平均响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 19 - }, - "id": 243, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(http_client_requests_code_total{instance=~\"$instance\",path!~\".*/discovery/.*\",code!=\"0\"}[5m])) by (path,code)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{path}}: {{code}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "HTTP依赖组件错误量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 26 - }, - "id": 59, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(mysql_client_requests_duration_ms_count{instance=~\"$instance\"}[5m])) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "mysql组件QPS", - "tooltip": { - "shared": true, - "sort": 1, - "value_type": "individual" - }, - "transparent": true, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 26 - }, - "id": 62, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(mysql_client_requests_duration_ms_sum{instance=~\"$instance\"}[5m])/ increase(mysql_client_requests_duration_ms_count{instance=~\"$instance\"}[5m]) > 0 ) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "mysql组件平均响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 26 - }, - "id": 240, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(mysql_client_requests_error_total{instance=~\"$instance\"}[5m])) by (error)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{error}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "mysql组件错误量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 33 - }, - "id": 193, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(redis_client_requests_duration_ms_count{instance=~\"$instance\"}[5m])) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "redis {{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "redis组件QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 33 - }, - "id": 194, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(redis_client_requests_duration_ms_sum{instance=~\"$instance\"}[5m])/ increase(redis_client_requests_duration_ms_count{instance=~\"$instance\"}[5m]) > 0 ) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "redis组件平均响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 33 - }, - "id": 233, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(redis_client_requests_error_total{instance=~\"$instance\"}[5m])) by (error)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{error}}", - "policy": "default", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "redis组件错误量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 40 - }, - "id": 191, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(memcache_client_requests_duration_ms_count{instance=~\"$instance\"}[5m])) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "memcache组件QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 40 - }, - "id": 196, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(memcache_client_requests_duration_ms_sum{instance=~\"$instance\"}[5m])/ increase(memcache_client_requests_duration_ms_count{instance=~\"$instance\"}[5m]) >0) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "memcache组件平均响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 40 - }, - "id": 234, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(memcache_client_requests_error_total{instance=~\"$instance\"}[5m])) by (error)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{error}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "memcache组件错误量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 47 - }, - "id": 192, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(tidb_client_requests_duration_ms_count{instance=~\"$instance\"}[5m])) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "tidb组件QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 47 - }, - "id": 197, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(tidb_client_requests_duration_ms_sum{instance=~\"$instance\"}[5m])/ increase(tidb_client_requests_duration_ms_count{instance=~\"$instance\"}[5m]) > 0 ) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "tidb组件平均响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 47 - }, - "id": 235, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(tidb_client_requests_error_total{instance=~\"$instance\"}[5m])) by (error)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{error}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "tidb组件错误量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 54 - }, - "id": 190, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(hbase_client_requests_duration_ms_count{instance=~\"$instance\"}[5m])) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "hbase组件QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 54 - }, - "id": 198, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(hbase_client_requests_duration_ms_sum{instance=~\"$instance\"}[5m])/ increase(hbase_client_requests_duration_ms_count{instance=~\"$instance\"}[5m]) > 0 ) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "hbase组件平均响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 54 - }, - "id": 236, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(hbase_client_requests_error_total{instance=~\"$instance\"}[5m])) by (error)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{error}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "hbase组件平错误量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 61 - }, - "id": 189, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": "current", - "sortDesc": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(rate(databus_client_requests_duration_ms_count{instance=~\"$instance\"}[5m])) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 2, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "databus组件QPS", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 61 - }, - "id": 195, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "avg(increase(databus_client_requests_duration_ms_sum{instance=~\"$instance\"}[5m])/ increase(databus_client_requests_duration_ms_count{instance=~\"$instance\"}[5m]) > 0 ) by (command)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{command}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "databus组件平均响应时间", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 61 - }, - "id": 237, - "legend": { - "alignAsTable": true, - "avg": false, - "current": true, - "max": true, - "min": true, - "show": true, - "sort": null, - "sortDesc": null, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "expr": "sum(irate(databus_client_requests_error_total{instance=~\"$instance\"}[5m])) by (error)", - "format": "time_series", - "groupBy": [ - { - "params": [ - "$__interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{error}}", - "policy": "default", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "step": 240, - "tags": [] - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "databus组件错误量", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Component", - "type": "row" - } - ], - "refresh": false, - "schemaVersion": 18, - "style": "dark", - "tags": [ - "Go", - "业务监控" - ], - "templating": { - "list": [ - { - "hide": 2, - "includeAll": false, - "label": null, - "multi": true, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "skipUrlSync": false, - "type": "datasource" - }, - { - "allValue": ".*", - "current": { - "text": "All", - "value": [ - "$__all" - ] - }, - "datasource": "$datasource", - "definition": "label_values(go_goroutines,instance)", - "hide": 0, - "includeAll": true, - "label": "实例", - "multi": true, - "name": "instance", - "options": [], - "query": "label_values(go_goroutines,instance)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "All", - "value": [ - "$__all" - ] - }, - "datasource": "$datasource", - "definition": "label_values(http_server_requests_duration_ms_count{path!=\"\",instance=~\"$instance\"}, path)", - "hide": 0, - "includeAll": true, - "label": "http接口", - "multi": true, - "name": "http_method", - "options": [], - "query": "label_values(http_server_requests_duration_ms_count{path!=\"\",instance=~\"$instance\"}, path)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "All", - "value": [ - "$__all" - ] - }, - "datasource": "$datasource", - "definition": "label_values(http_server_requests_duration_ms_count{caller!=\"\",instance=~\"$instance\"}, caller)", - "hide": 0, - "includeAll": true, - "label": "http调用者", - "multi": true, - "name": "http_user", - "options": [], - "query": "label_values(http_server_requests_duration_ms_count{caller!=\"\",instance=~\"$instance\"}, caller)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "All", - "value": [ - "$__all" - ] - }, - "datasource": "$datasource", - "definition": "label_values(http_client_requests_duration_ms_count{path!=\"\",instance=~\"$instance\"}, path)", - "hide": 0, - "includeAll": true, - "label": "http依赖接口", - "multi": true, - "name": "http_client_method", - "options": [], - "query": "label_values(http_client_requests_duration_ms_count{path!=\"\",instance=~\"$instance\"}, path)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "All", - "value": [ - "$__all" - ] - }, - "datasource": "$datasource", - "definition": "label_values(grpc_server_requests_duration_ms_bucket{instance=~\"$instance\"},caller)", - "hide": 0, - "includeAll": true, - "label": "grpc依赖接口", - "multi": true, - "name": "grpc_caller", - "options": [], - "query": "label_values(grpc_server_requests_duration_ms_bucket{instance=~\"$instance\"},caller)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-30m", - "to": "now" - }, - "timepicker": { - "hidden": false, - "refresh_intervals": [ - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "browser", - "title": "Go业务监控", - "uid": "FitP_nDZz", - "version": 10 -} diff --git a/options.go b/options.go new file mode 100644 index 000000000..d5aa4c804 --- /dev/null +++ b/options.go @@ -0,0 +1,79 @@ +package kratos + +import ( + "context" + "os" + + "github.com/go-kratos/kratos/v2/log" + "github.com/go-kratos/kratos/v2/registry" + "github.com/go-kratos/kratos/v2/transport" +) + +// Option is an application option. +type Option func(o *options) + +// options is an application options. +type options struct { + id string + name string + version string + metadata map[string]string + endpoints []string + + ctx context.Context + sigs []os.Signal + + logger log.Logger + registry registry.Registry + servers []transport.Server +} + +// ID with service id. +func ID(id string) Option { + return func(o *options) { o.id = id } +} + +// Name with service name. +func Name(name string) Option { + return func(o *options) { o.name = name } +} + +// Version with service version. +func Version(version string) Option { + return func(o *options) { o.version = version } +} + +// Metadata with service metadata. +func Metadata(md map[string]string) Option { + return func(o *options) { o.metadata = md } +} + +// Endpoint with service endpoint. +func Endpoint(endpoints ...string) Option { + return func(o *options) { o.endpoints = endpoints } +} + +// Context with service context. +func Context(ctx context.Context) Option { + return func(o *options) { o.ctx = ctx } +} + +// Signal with exit signals. +func Signal(sigs ...os.Signal) Option { + return func(o *options) { o.sigs = sigs } +} + +// Logger with service logger. +func Logger(logger log.Logger) Option { + return func(o *options) { o.logger = logger } +} + +// Registry with service registry. +func Registry(r registry.Registry) Option { + return func(o *options) { o.registry = r } +} + +// Server with transport servers. +func Server(srv ...transport.Server) Option { + return func(o *options) { o.servers = srv } +} diff --git a/pkg/cache/memcache/ascii_conn.go b/pkg/cache/memcache/ascii_conn.go deleted file mode 100644 index a09b7ed3a..000000000 --- a/pkg/cache/memcache/ascii_conn.go +++ /dev/null @@ -1,262 +0,0 @@ -package memcache - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io" - "net" - "strconv" - "strings" - "time" - - pkgerr "github.com/pkg/errors" -) - -var ( - crlf = []byte("\r\n") - space = []byte(" ") - replyOK = []byte("OK\r\n") - replyStored = []byte("STORED\r\n") - replyNotStored = []byte("NOT_STORED\r\n") - replyExists = []byte("EXISTS\r\n") - replyNotFound = []byte("NOT_FOUND\r\n") - replyDeleted = []byte("DELETED\r\n") - replyEnd = []byte("END\r\n") - replyTouched = []byte("TOUCHED\r\n") - replyClientErrorPrefix = []byte("CLIENT_ERROR ") - replyServerErrorPrefix = []byte("SERVER_ERROR ") -) - -var _ protocolConn = &asiiConn{} - -// asiiConn is the low-level implementation of Conn -type asiiConn struct { - err error - conn net.Conn - // Read & Write - readTimeout time.Duration - writeTimeout time.Duration - rw *bufio.ReadWriter -} - -func replyToError(line []byte) error { - switch { - case bytes.Equal(line, replyStored): - return nil - case bytes.Equal(line, replyOK): - return nil - case bytes.Equal(line, replyDeleted): - return nil - case bytes.Equal(line, replyTouched): - return nil - case bytes.Equal(line, replyNotStored): - return ErrNotStored - case bytes.Equal(line, replyExists): - return ErrCASConflict - case bytes.Equal(line, replyNotFound): - return ErrNotFound - case bytes.Equal(line, replyNotStored): - return ErrNotStored - case bytes.Equal(line, replyExists): - return ErrCASConflict - } - return pkgerr.WithStack(protocolError(string(line))) -} - -func (c *asiiConn) Populate(ctx context.Context, cmd string, key string, flags uint32, expiration int32, cas uint64, data []byte) error { - var err error - c.conn.SetWriteDeadline(shrinkDeadline(ctx, c.writeTimeout)) - // [noreply]\r\n - if cmd == "cas" { - _, err = fmt.Fprintf(c.rw, "%s %s %d %d %d %d\r\n", cmd, key, flags, expiration, len(data), cas) - } else { - _, err = fmt.Fprintf(c.rw, "%s %s %d %d %d\r\n", cmd, key, flags, expiration, len(data)) - } - if err != nil { - return c.fatal(err) - } - c.rw.Write(data) - c.rw.Write(crlf) - if err = c.rw.Flush(); err != nil { - return c.fatal(err) - } - c.conn.SetReadDeadline(shrinkDeadline(ctx, c.readTimeout)) - line, err := c.rw.ReadSlice('\n') - if err != nil { - return c.fatal(err) - } - return replyToError(line) -} - -// newConn returns a new memcache connection for the given net connection. -func newASCIIConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) (protocolConn, error) { - if writeTimeout <= 0 || readTimeout <= 0 { - return nil, pkgerr.Errorf("readTimeout writeTimeout can't be zero") - } - c := &asiiConn{ - conn: netConn, - rw: bufio.NewReadWriter(bufio.NewReader(netConn), - bufio.NewWriter(netConn)), - readTimeout: readTimeout, - writeTimeout: writeTimeout, - } - return c, nil -} - -func (c *asiiConn) Close() error { - if c.err == nil { - c.err = pkgerr.New("memcache: closed") - } - return c.conn.Close() -} - -func (c *asiiConn) fatal(err error) error { - if c.err == nil { - c.err = pkgerr.WithStack(err) - // Close connection to force errors on subsequent calls and to unblock - // other reader or writer. - c.conn.Close() - } - return c.err -} - -func (c *asiiConn) Err() error { - return c.err -} - -func (c *asiiConn) Get(ctx context.Context, key string) (result *Item, err error) { - c.conn.SetWriteDeadline(shrinkDeadline(ctx, c.writeTimeout)) - if _, err = fmt.Fprintf(c.rw, "gets %s\r\n", key); err != nil { - return nil, c.fatal(err) - } - if err = c.rw.Flush(); err != nil { - return nil, c.fatal(err) - } - if err = c.parseGetReply(ctx, func(it *Item) { - result = it - }); err != nil { - return - } - if result == nil { - return nil, ErrNotFound - } - return -} - -func (c *asiiConn) GetMulti(ctx context.Context, keys ...string) (map[string]*Item, error) { - var err error - c.conn.SetWriteDeadline(shrinkDeadline(ctx, c.writeTimeout)) - if _, err = fmt.Fprintf(c.rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil { - return nil, c.fatal(err) - } - if err = c.rw.Flush(); err != nil { - return nil, c.fatal(err) - } - results := make(map[string]*Item, len(keys)) - if err = c.parseGetReply(ctx, func(it *Item) { - results[it.Key] = it - }); err != nil { - return nil, err - } - return results, nil -} - -func (c *asiiConn) parseGetReply(ctx context.Context, f func(*Item)) error { - c.conn.SetReadDeadline(shrinkDeadline(ctx, c.readTimeout)) - for { - line, err := c.rw.ReadSlice('\n') - if err != nil { - return c.fatal(err) - } - if bytes.Equal(line, replyEnd) { - return nil - } - if bytes.HasPrefix(line, replyServerErrorPrefix) { - errMsg := line[len(replyServerErrorPrefix):] - return c.fatal(protocolError(errMsg)) - } - it := new(Item) - size, err := scanGetReply(line, it) - if err != nil { - return c.fatal(err) - } - it.Value = make([]byte, size+2) - if _, err = io.ReadFull(c.rw, it.Value); err != nil { - return c.fatal(err) - } - if !bytes.HasSuffix(it.Value, crlf) { - return c.fatal(protocolError("corrupt get reply, no except CRLF")) - } - it.Value = it.Value[:size] - f(it) - } -} - -func scanGetReply(line []byte, item *Item) (size int, err error) { - pattern := "VALUE %s %d %d %d\r\n" - dest := []interface{}{&item.Key, &item.Flags, &size, &item.cas} - if bytes.Count(line, space) == 3 { - pattern = "VALUE %s %d %d\r\n" - dest = dest[:3] - } - n, err := fmt.Sscanf(string(line), pattern, dest...) - if err != nil || n != len(dest) { - return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line) - } - return size, nil -} - -func (c *asiiConn) Touch(ctx context.Context, key string, expire int32) error { - line, err := c.writeReadLine(ctx, "touch %s %d\r\n", key, expire) - if err != nil { - return err - } - return replyToError(line) -} - -func (c *asiiConn) IncrDecr(ctx context.Context, cmd, key string, delta uint64) (uint64, error) { - line, err := c.writeReadLine(ctx, "%s %s %d\r\n", cmd, key, delta) - if err != nil { - return 0, err - } - switch { - case bytes.Equal(line, replyNotFound): - return 0, ErrNotFound - case bytes.HasPrefix(line, replyClientErrorPrefix): - errMsg := line[len(replyClientErrorPrefix):] - return 0, pkgerr.WithStack(protocolError(errMsg)) - } - val, err := strconv.ParseUint(string(line[:len(line)-2]), 10, 64) - if err != nil { - return 0, err - } - return val, nil -} - -func (c *asiiConn) Delete(ctx context.Context, key string) error { - line, err := c.writeReadLine(ctx, "delete %s\r\n", key) - if err != nil { - return err - } - return replyToError(line) -} - -func (c *asiiConn) writeReadLine(ctx context.Context, format string, args ...interface{}) ([]byte, error) { - var err error - c.conn.SetWriteDeadline(shrinkDeadline(ctx, c.writeTimeout)) - _, err = fmt.Fprintf(c.rw, format, args...) - if err != nil { - return nil, c.fatal(pkgerr.WithStack(err)) - } - if err = c.rw.Flush(); err != nil { - return nil, c.fatal(pkgerr.WithStack(err)) - } - c.conn.SetReadDeadline(shrinkDeadline(ctx, c.readTimeout)) - line, err := c.rw.ReadSlice('\n') - if err != nil { - return line, c.fatal(pkgerr.WithStack(err)) - } - return line, nil -} diff --git a/pkg/cache/memcache/ascii_conn_test.go b/pkg/cache/memcache/ascii_conn_test.go deleted file mode 100644 index 32efa5c4b..000000000 --- a/pkg/cache/memcache/ascii_conn_test.go +++ /dev/null @@ -1,567 +0,0 @@ -package memcache - -import ( - "bytes" - "strconv" - "strings" - - "testing" -) - -func TestASCIIConnAdd(t *testing.T) { - tests := []struct { - name string - a *Item - e error - }{ - { - "Add", - &Item{ - Key: "test_add", - Value: []byte("0"), - Flags: 0, - Expiration: 60, - }, - nil, - }, - { - "Add_Large", - &Item{ - Key: "test_add_large", - Value: bytes.Repeat(space, _largeValue+1), - Flags: 0, - Expiration: 60, - }, - nil, - }, - { - "Add_Exist", - &Item{ - Key: "test_add", - Value: []byte("0"), - Flags: 0, - Expiration: 60, - }, - ErrNotStored, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if err := testConnASCII.Add(test.a); err != test.e { - t.Fatal(err) - } - if b, err := testConnASCII.Get(test.a.Key); err != nil { - t.Fatal(err) - } else { - compareItem(t, test.a, b) - } - }) - } -} - -func TestASCIIConnGet(t *testing.T) { - tests := []struct { - name string - a *Item - k string - e error - }{ - { - "Get", - &Item{ - Key: "test_get", - Value: []byte("0"), - Flags: 0, - Expiration: 60, - }, - "test_get", - nil, - }, - { - "Get_NotExist", - &Item{ - Key: "test_get_not_exist", - Value: []byte("0"), - Flags: 0, - Expiration: 60, - }, - "test_get_not_exist!", - ErrNotFound, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if err := testConnASCII.Add(test.a); err != nil { - t.Fatal(err) - } - if b, err := testConnASCII.Get(test.a.Key); err != nil { - t.Fatal(err) - } else { - compareItem(t, test.a, b) - } - }) - } -} - -//func TestGetHasErr(t *testing.T) { -// prepareEnv(t) -// -// st := &TestItem{Name: "json", Age: 10} -// itemx := &Item{Key: "test", Object: st, Flags: FlagJSON} -// c.Set(itemx) -// -// expected := errors.New("some error") -// monkey.Patch(scanGetReply, func(line []byte, item *Item) (size int, err error) { -// return 0, expected -// }) -// -// if _, err := c.Get("test"); err.Error() != expected.Error() { -// t.Errorf("conn.Get() unexpected error(%v)", err) -// } -// if err := c.(*asciiConn).err; err.Error() != expected.Error() { -// t.Errorf("unexpected error(%v)", err) -// } -//} - -func TestASCIIConnGetMulti(t *testing.T) { - tests := []struct { - name string - a []*Item - k []string - e error - }{ - {"getMulti_Add", - []*Item{ - { - Key: "get_multi_1", - Value: []byte("test"), - Flags: FlagRAW, - Expiration: 60, - cas: 0, - }, - { - Key: "get_multi_2", - Value: []byte("test2"), - Flags: FlagRAW, - Expiration: 60, - cas: 0, - }, - }, - []string{"get_multi_1", "get_multi_2"}, - nil, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - for _, i := range test.a { - if err := testConnASCII.Set(i); err != nil { - t.Fatal(err) - } - } - if r, err := testConnASCII.GetMulti(test.k); err != nil { - t.Fatal(err) - } else { - reply := r["get_multi_1"] - compareItem(t, reply, test.a[0]) - reply = r["get_multi_2"] - compareItem(t, reply, test.a[1]) - } - }) - } -} - -func TestASCIIConnSet(t *testing.T) { - tests := []struct { - name string - a *Item - e error - }{ - { - "SetLowerBound", - &Item{ - Key: strings.Repeat("a", 1), - Value: []byte("4"), - Flags: 0, - Expiration: 60, - }, - nil, - }, - { - "SetUpperBound", - &Item{ - Key: strings.Repeat("a", 250), - Value: []byte("3"), - Flags: 0, - Expiration: 60, - }, - nil, - }, - { - "SetIllegalKeyZeroLength", - &Item{ - Key: "", - Value: []byte("2"), - Flags: 0, - Expiration: 60, - }, - ErrMalformedKey, - }, - { - "SetIllegalKeyLengthExceededLimit", - &Item{ - Key: " ", - Value: []byte("1"), - Flags: 0, - Expiration: 60, - }, - ErrMalformedKey, - }, - { - "SeJsonItem", - &Item{ - Key: "set_obj", - Object: &struct { - Name string - Age int - }{"json", 10}, - Expiration: 60, - Flags: FlagJSON, - }, - nil, - }, - { - "SeErrItemJSONGzip", - &Item{ - Key: "set_err_item", - Expiration: 60, - Flags: FlagJSON | FlagGzip, - }, - ErrItem, - }, - { - "SeErrItemBytesValueWrongFlag", - &Item{ - Key: "set_err_item", - Value: []byte("2"), - Expiration: 60, - Flags: FlagJSON, - }, - ErrItem, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if err := testConnASCII.Set(test.a); err != test.e { - t.Fatal(err) - } - }) - } -} - -func TestASCIIConnCompareAndSwap(t *testing.T) { - tests := []struct { - name string - a *Item - b *Item - c *Item - k string - e error - }{ - { - "CompareAndSwap", - &Item{ - Key: "test_cas", - Value: []byte("2"), - Flags: 0, - Expiration: 60, - }, - nil, - &Item{ - Key: "test_cas", - Value: []byte("3"), - Flags: 0, - Expiration: 60, - }, - "test_cas", - nil, - }, - { - "CompareAndSwapErrCASConflict", - &Item{ - Key: "test_cas_conflict", - Value: []byte("2"), - Flags: 0, - Expiration: 60, - }, - &Item{ - Key: "test_cas_conflict", - Value: []byte("1"), - Flags: 0, - Expiration: 60, - }, - &Item{ - Key: "test_cas_conflict", - Value: []byte("3"), - Flags: 0, - Expiration: 60, - }, - "test_cas_conflict", - ErrCASConflict, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if err := testConnASCII.Set(test.a); err != nil { - t.Fatal(err) - } - r, err := testConnASCII.Get(test.k) - if err != nil { - t.Fatal(err) - } - - if test.b != nil { - if err := testConnASCII.Set(test.b); err != nil { - t.Fatal(err) - } - } - - r.Value = test.c.Value - if err := testConnASCII.CompareAndSwap(r); err != nil { - if err != test.e { - t.Fatal(err) - } - } else { - if fr, err := testConnASCII.Get(test.k); err != nil { - t.Fatal(err) - } else { - compareItem(t, fr, test.c) - } - } - }) - } - - t.Run("TestCompareAndSwapErrNotFound", func(t *testing.T) { - ti := &Item{ - Key: "test_cas_notfound", - Value: []byte("2"), - Flags: 0, - Expiration: 60, - } - if err := testConnASCII.Set(ti); err != nil { - t.Fatal(err) - } - r, err := testConnASCII.Get(ti.Key) - if err != nil { - t.Fatal(err) - } - - r.Key = "test_cas_notfound_boom" - r.Value = []byte("3") - if err := testConnASCII.CompareAndSwap(r); err != nil { - if err != ErrNotFound { - t.Fatal(err) - } - } - }) -} - -func TestASCIIConnReplace(t *testing.T) { - tests := []struct { - name string - a *Item - b *Item - e error - }{ - { - "TestReplace", - &Item{ - Key: "test_replace", - Value: []byte("2"), - Flags: 0, - Expiration: 60, - }, - &Item{ - Key: "test_replace", - Value: []byte("3"), - Flags: 0, - Expiration: 60, - }, - nil, - }, - { - "TestReplaceErrNotStored", - &Item{ - Key: "test_replace_not_stored", - Value: []byte("2"), - Flags: 0, - Expiration: 60, - }, - &Item{ - Key: "test_replace_not_stored_boom", - Value: []byte("3"), - Flags: 0, - Expiration: 60, - }, - ErrNotStored, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if err := testConnASCII.Set(test.a); err != nil { - t.Fatal(err) - } - if err := testConnASCII.Replace(test.b); err != nil { - if err == test.e { - return - } - t.Fatal(err) - } - if r, err := testConnASCII.Get(test.b.Key); err != nil { - t.Fatal(err) - } else { - compareItem(t, r, test.b) - } - }) - } -} - -func TestASCIIConnIncrDecr(t *testing.T) { - tests := []struct { - fn func(key string, delta uint64) (uint64, error) - name string - k string - v uint64 - w uint64 - }{ - { - testConnASCII.Increment, - "Incr_10", - "test_incr", - 10, - 10, - }, - { - testConnASCII.Increment, - "Incr_10(2)", - "test_incr", - 10, - 20, - }, - { - testConnASCII.Decrement, - "Decr_10", - "test_incr", - 10, - 10, - }, - } - if err := testConnASCII.Add(&Item{ - Key: "test_incr", - Value: []byte("0"), - }); err != nil { - t.Fatal(err) - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if a, err := test.fn(test.k, test.v); err != nil { - t.Fatal(err) - } else { - if a != test.w { - t.Fatalf("want %d, got %d", test.w, a) - } - } - if b, err := testConnASCII.Get(test.k); err != nil { - t.Fatal(err) - } else { - if string(b.Value) != strconv.FormatUint(test.w, 10) { - t.Fatalf("want %s, got %d", b.Value, test.w) - } - } - }) - } -} - -func TestASCIIConnTouch(t *testing.T) { - tests := []struct { - name string - k string - a *Item - e error - }{ - { - "Touch", - "test_touch", - &Item{ - Key: "test_touch", - Value: []byte("0"), - Expiration: 60, - }, - nil, - }, - { - "Touch_NotExist", - "test_touch_not_exist", - nil, - ErrNotFound, - }, - } - for _, test := range tests { - if test.a != nil { - if err := testConnASCII.Add(test.a); err != nil { - t.Fatal(err) - } - if err := testConnASCII.Touch(test.k, 1); err != test.e { - t.Fatal(err) - } - } - } -} - -func TestASCIIConnDelete(t *testing.T) { - tests := []struct { - name string - k string - a *Item - e error - }{ - { - "Delete", - "test_delete", - &Item{ - Key: "test_delete", - Value: []byte("0"), - Expiration: 60, - }, - nil, - }, - { - "Delete_NotExist", - "test_delete_not_exist", - nil, - ErrNotFound, - }, - } - for _, test := range tests { - if test.a != nil { - if err := testConnASCII.Add(test.a); err != nil { - t.Fatal(err) - } - if err := testConnASCII.Delete(test.k); err != test.e { - t.Fatal(err) - } - if _, err := testConnASCII.Get(test.k); err != ErrNotFound { - t.Fatal(err) - } - } - } -} - -func compareItem(t *testing.T, a, b *Item) { - if a.Key != b.Key || !bytes.Equal(a.Value, b.Value) || a.Flags != b.Flags { - t.Fatalf("compareItem: a(%s, %d, %d) : b(%s, %d, %d)", a.Key, len(a.Value), a.Flags, b.Key, len(b.Value), b.Flags) - } -} diff --git a/pkg/cache/memcache/conn.go b/pkg/cache/memcache/conn.go deleted file mode 100644 index 4637e0815..000000000 --- a/pkg/cache/memcache/conn.go +++ /dev/null @@ -1,287 +0,0 @@ -package memcache - -import ( - "context" - "fmt" - "net" - "strconv" - "time" - - pkgerr "github.com/pkg/errors" -) - -const ( - // 1024*1024 - 1, set error??? - _largeValue = 1000 * 1000 // 1MB -) - -// low level connection that implement memcache protocol provide basic operation. -type protocolConn interface { - Populate(ctx context.Context, cmd string, key string, flags uint32, expiration int32, cas uint64, data []byte) error - Get(ctx context.Context, key string) (*Item, error) - GetMulti(ctx context.Context, keys ...string) (map[string]*Item, error) - Touch(ctx context.Context, key string, expire int32) error - IncrDecr(ctx context.Context, cmd, key string, delta uint64) (uint64, error) - Delete(ctx context.Context, key string) error - Close() error - Err() error -} - -// DialOption specifies an option for dialing a Memcache server. -type DialOption struct { - f func(*dialOptions) -} - -type dialOptions struct { - readTimeout time.Duration - writeTimeout time.Duration - protocol string - dial func(network, addr string) (net.Conn, error) -} - -// DialReadTimeout specifies the timeout for reading a single command reply. -func DialReadTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.readTimeout = d - }} -} - -// DialWriteTimeout specifies the timeout for writing a single command. -func DialWriteTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.writeTimeout = d - }} -} - -// DialConnectTimeout specifies the timeout for connecting to the Memcache server. -func DialConnectTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - dialer := net.Dialer{Timeout: d} - do.dial = dialer.Dial - }} -} - -// DialNetDial specifies a custom dial function for creating TCP -// connections. If this option is left out, then net.Dial is -// used. DialNetDial overrides DialConnectTimeout. -func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { - return DialOption{func(do *dialOptions) { - do.dial = dial - }} -} - -// Dial connects to the Memcache server at the given network and -// address using the specified options. -func Dial(network, address string, options ...DialOption) (Conn, error) { - do := dialOptions{ - dial: net.Dial, - } - for _, option := range options { - option.f(&do) - } - netConn, err := do.dial(network, address) - if err != nil { - return nil, pkgerr.WithStack(err) - } - pconn, err := newASCIIConn(netConn, do.readTimeout, do.writeTimeout) - return &conn{pconn: pconn, ed: newEncodeDecoder()}, err -} - -type conn struct { - // low level connection. - pconn protocolConn - ed *encodeDecode -} - -func (c *conn) Close() error { - return c.pconn.Close() -} - -func (c *conn) Err() error { - return c.pconn.Err() -} - -func (c *conn) AddContext(ctx context.Context, item *Item) error { - return c.populate(ctx, "add", item) -} - -func (c *conn) SetContext(ctx context.Context, item *Item) error { - return c.populate(ctx, "set", item) -} - -func (c *conn) ReplaceContext(ctx context.Context, item *Item) error { - return c.populate(ctx, "replace", item) -} - -func (c *conn) CompareAndSwapContext(ctx context.Context, item *Item) error { - return c.populate(ctx, "cas", item) -} - -func (c *conn) populate(ctx context.Context, cmd string, item *Item) error { - if !legalKey(item.Key) { - return ErrMalformedKey - } - data, err := c.ed.encode(item) - if err != nil { - return err - } - length := len(data) - if length < _largeValue { - return c.pconn.Populate(ctx, cmd, item.Key, item.Flags, item.Expiration, item.cas, data) - } - count := length/_largeValue + 1 - if err = c.pconn.Populate(ctx, cmd, item.Key, item.Flags|flagLargeValue, item.Expiration, item.cas, []byte(strconv.Itoa(length))); err != nil { - return err - } - var chunk []byte - for i := 1; i <= count; i++ { - if i == count { - chunk = data[_largeValue*(count-1):] - } else { - chunk = data[_largeValue*(i-1) : _largeValue*i] - } - key := fmt.Sprintf("%s%d", item.Key, i) - if err = c.pconn.Populate(ctx, cmd, key, item.Flags, item.Expiration, item.cas, chunk); err != nil { - return err - } - } - return nil -} - -func (c *conn) GetContext(ctx context.Context, key string) (*Item, error) { - if !legalKey(key) { - return nil, ErrMalformedKey - } - result, err := c.pconn.Get(ctx, key) - if err != nil { - return nil, err - } - if result.Flags&flagLargeValue != flagLargeValue { - return result, err - } - return c.getLargeItem(ctx, result) -} - -func (c *conn) getLargeItem(ctx context.Context, result *Item) (*Item, error) { - length, err := strconv.Atoi(string(result.Value)) - if err != nil { - return nil, err - } - count := length/_largeValue + 1 - keys := make([]string, 0, count) - for i := 1; i <= count; i++ { - keys = append(keys, fmt.Sprintf("%s%d", result.Key, i)) - } - var results map[string]*Item - if results, err = c.pconn.GetMulti(ctx, keys...); err != nil { - return nil, err - } - if len(results) < count { - return nil, ErrNotFound - } - result.Value = make([]byte, 0, length) - for _, k := range keys { - ti := results[k] - if ti == nil || ti.Value == nil { - return nil, ErrNotFound - } - result.Value = append(result.Value, ti.Value...) - } - result.Flags = result.Flags ^ flagLargeValue - return result, nil -} - -func (c *conn) GetMultiContext(ctx context.Context, keys []string) (map[string]*Item, error) { - // TODO: move to protocolConn? - for _, key := range keys { - if !legalKey(key) { - return nil, ErrMalformedKey - } - } - results, err := c.pconn.GetMulti(ctx, keys...) - if err != nil { - return results, err - } - for k, v := range results { - if v.Flags&flagLargeValue != flagLargeValue { - continue - } - if v, err = c.getLargeItem(ctx, v); err != nil { - return results, err - } - results[k] = v - } - return results, nil -} - -func (c *conn) DeleteContext(ctx context.Context, key string) error { - if !legalKey(key) { - return ErrMalformedKey - } - return c.pconn.Delete(ctx, key) -} - -func (c *conn) IncrementContext(ctx context.Context, key string, delta uint64) (uint64, error) { - if !legalKey(key) { - return 0, ErrMalformedKey - } - return c.pconn.IncrDecr(ctx, "incr", key, delta) -} - -func (c *conn) DecrementContext(ctx context.Context, key string, delta uint64) (uint64, error) { - if !legalKey(key) { - return 0, ErrMalformedKey - } - return c.pconn.IncrDecr(ctx, "decr", key, delta) -} - -func (c *conn) TouchContext(ctx context.Context, key string, seconds int32) error { - if !legalKey(key) { - return ErrMalformedKey - } - return c.pconn.Touch(ctx, key, seconds) -} - -func (c *conn) Add(item *Item) error { - return c.AddContext(context.TODO(), item) -} - -func (c *conn) Set(item *Item) error { - return c.SetContext(context.TODO(), item) -} - -func (c *conn) Replace(item *Item) error { - return c.ReplaceContext(context.TODO(), item) -} - -func (c *conn) Get(key string) (*Item, error) { - return c.GetContext(context.TODO(), key) -} - -func (c *conn) GetMulti(keys []string) (map[string]*Item, error) { - return c.GetMultiContext(context.TODO(), keys) -} - -func (c *conn) Delete(key string) error { - return c.DeleteContext(context.TODO(), key) -} - -func (c *conn) Increment(key string, delta uint64) (newValue uint64, err error) { - return c.IncrementContext(context.TODO(), key, delta) -} - -func (c *conn) Decrement(key string, delta uint64) (newValue uint64, err error) { - return c.DecrementContext(context.TODO(), key, delta) -} - -func (c *conn) CompareAndSwap(item *Item) error { - return c.CompareAndSwapContext(context.TODO(), item) -} - -func (c *conn) Touch(key string, seconds int32) (err error) { - return c.TouchContext(context.TODO(), key, seconds) -} - -func (c *conn) Scan(item *Item, v interface{}) (err error) { - return pkgerr.WithStack(c.ed.decode(item, v)) -} diff --git a/pkg/cache/memcache/conn_test.go b/pkg/cache/memcache/conn_test.go deleted file mode 100644 index 789e52cfc..000000000 --- a/pkg/cache/memcache/conn_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package memcache - -import ( - "bytes" - "encoding/json" - "testing" - - "github.com/gogo/protobuf/proto" - - test "github.com/go-kratos/kratos/pkg/cache/memcache/test" -) - -func TestConnRaw(t *testing.T) { - item := &Item{ - Key: "test", - Value: []byte("test"), - Flags: FlagRAW, - Expiration: 60, - cas: 0, - } - if err := testConnASCII.Set(item); err != nil { - t.Errorf("conn.Store() error(%v)", err) - } -} - -func TestConnSerialization(t *testing.T) { - type TestObj struct { - Name string - Age int32 - } - - tests := []struct { - name string - a *Item - e error - }{ - - { - "JSON", - &Item{ - Key: "test_json", - Object: &TestObj{"json", 1}, - Expiration: 60, - Flags: FlagJSON, - }, - nil, - }, - { - "JSONGzip", - &Item{ - Key: "test_json_gzip", - Object: &TestObj{"jsongzip", 2}, - Expiration: 60, - Flags: FlagJSON | FlagGzip, - }, - nil, - }, - { - "GOB", - &Item{ - Key: "test_gob", - Object: &TestObj{"gob", 3}, - Expiration: 60, - Flags: FlagGOB, - }, - nil, - }, - { - "GOBGzip", - &Item{ - Key: "test_gob_gzip", - Object: &TestObj{"gobgzip", 4}, - Expiration: 60, - Flags: FlagGOB | FlagGzip, - }, - nil, - }, - { - "Protobuf", - &Item{ - Key: "test_protobuf", - Object: &test.TestItem{Name: "protobuf", Age: 6}, - Expiration: 60, - Flags: FlagProtobuf, - }, - nil, - }, - { - "ProtobufGzip", - &Item{ - Key: "test_protobuf_gzip", - Object: &test.TestItem{Name: "protobufgzip", Age: 7}, - Expiration: 60, - Flags: FlagProtobuf | FlagGzip, - }, - nil, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - if err := testConnASCII.Set(tc.a); err != nil { - t.Fatal(err) - } - if r, err := testConnASCII.Get(tc.a.Key); err != tc.e { - t.Fatal(err) - } else { - if (tc.a.Flags & FlagProtobuf) > 0 { - var no test.TestItem - if err := testConnASCII.Scan(r, &no); err != nil { - t.Fatal(err) - } - if (tc.a.Object.(*test.TestItem).Name != no.Name) || (tc.a.Object.(*test.TestItem).Age != no.Age) { - t.Fatalf("compare failed error, %v %v", tc.a.Object.(*test.TestItem), no) - } - } else { - var no TestObj - if err := testConnASCII.Scan(r, &no); err != nil { - t.Fatal(err) - } - if (tc.a.Object.(*TestObj).Name != no.Name) || (tc.a.Object.(*TestObj).Age != no.Age) { - t.Fatalf("compare failed error, %v %v", tc.a.Object.(*TestObj), no) - } - } - } - }) - } -} - -func BenchmarkConnJSON(b *testing.B) { - st := &struct { - Name string - Age int - }{"json", 10} - itemx := &Item{Key: "json", Object: st, Flags: FlagJSON} - var ( - eb bytes.Buffer - je *json.Encoder - ir bytes.Reader - jd *json.Decoder - jr reader - nst test.TestItem - ) - jd = json.NewDecoder(&jr) - je = json.NewEncoder(&eb) - eb.Grow(_encodeBuf) - // NOTE reuse bytes.Buffer internal buf - // DON'T concurrency call Scan - b.ResetTimer() - for i := 0; i < b.N; i++ { - eb.Reset() - if err := je.Encode(itemx.Object); err != nil { - return - } - data := eb.Bytes() - ir.Reset(data) - jr.Reset(&ir) - jd.Decode(&nst) - } -} - -func BenchmarkConnProtobuf(b *testing.B) { - st := &test.TestItem{Name: "protobuf", Age: 10} - itemx := &Item{Key: "protobuf", Object: st, Flags: FlagJSON} - var ( - eb bytes.Buffer - nst test.TestItem - ped *proto.Buffer - ) - ped = proto.NewBuffer(eb.Bytes()) - eb.Grow(_encodeBuf) - b.ResetTimer() - for i := 0; i < b.N; i++ { - ped.Reset() - pb, ok := itemx.Object.(proto.Message) - if !ok { - return - } - if err := ped.Marshal(pb); err != nil { - return - } - data := ped.Bytes() - ped.SetBuf(data) - ped.Unmarshal(&nst) - } -} diff --git a/pkg/cache/memcache/encoding.go b/pkg/cache/memcache/encoding.go deleted file mode 100644 index 1a386af9b..000000000 --- a/pkg/cache/memcache/encoding.go +++ /dev/null @@ -1,162 +0,0 @@ -package memcache - -import ( - "bytes" - "compress/gzip" - "encoding/gob" - "encoding/json" - "io" - - "github.com/gogo/protobuf/proto" -) - -type reader struct { - io.Reader -} - -func (r *reader) Reset(rd io.Reader) { - r.Reader = rd -} - -const _encodeBuf = 4096 // 4kb - -type encodeDecode struct { - // Item Reader - ir bytes.Reader - // Compress - gr gzip.Reader - gw *gzip.Writer - cb bytes.Buffer - // Encoding - edb bytes.Buffer - // json - jr reader - jd *json.Decoder - je *json.Encoder - // protobuffer - ped *proto.Buffer -} - -func newEncodeDecoder() *encodeDecode { - ed := &encodeDecode{} - ed.jd = json.NewDecoder(&ed.jr) - ed.je = json.NewEncoder(&ed.edb) - ed.gw = gzip.NewWriter(&ed.cb) - ed.edb.Grow(_encodeBuf) - // NOTE reuse bytes.Buffer internal buf - // DON'T concurrency call Scan - ed.ped = proto.NewBuffer(ed.edb.Bytes()) - return ed -} - -func (ed *encodeDecode) encode(item *Item) (data []byte, err error) { - if (item.Flags | _flagEncoding) == _flagEncoding { - if item.Value == nil { - return nil, ErrItem - } - } else if item.Object == nil { - return nil, ErrItem - } - // encoding - switch { - case item.Flags&FlagGOB == FlagGOB: - ed.edb.Reset() - if err = gob.NewEncoder(&ed.edb).Encode(item.Object); err != nil { - return - } - data = ed.edb.Bytes() - case item.Flags&FlagProtobuf == FlagProtobuf: - ed.edb.Reset() - ed.ped.SetBuf(ed.edb.Bytes()) - pb, ok := item.Object.(proto.Message) - if !ok { - err = ErrItemObject - return - } - if err = ed.ped.Marshal(pb); err != nil { - return - } - data = ed.ped.Bytes() - case item.Flags&FlagJSON == FlagJSON: - ed.edb.Reset() - if err = ed.je.Encode(item.Object); err != nil { - return - } - data = ed.edb.Bytes() - default: - data = item.Value - } - // compress - if item.Flags&FlagGzip == FlagGzip { - ed.cb.Reset() - ed.gw.Reset(&ed.cb) - if _, err = ed.gw.Write(data); err != nil { - return - } - if err = ed.gw.Close(); err != nil { - return - } - data = ed.cb.Bytes() - } - if len(data) > 8000000 { - err = ErrValueSize - } - return -} - -func (ed *encodeDecode) decode(item *Item, v interface{}) (err error) { - var ( - data []byte - rd io.Reader - ) - ed.ir.Reset(item.Value) - rd = &ed.ir - if item.Flags&FlagGzip == FlagGzip { - rd = &ed.gr - if err = ed.gr.Reset(&ed.ir); err != nil { - return - } - defer func() { - if e := ed.gr.Close(); e != nil { - err = e - } - }() - } - switch { - case item.Flags&FlagGOB == FlagGOB: - err = gob.NewDecoder(rd).Decode(v) - case item.Flags&FlagJSON == FlagJSON: - ed.jr.Reset(rd) - err = ed.jd.Decode(v) - default: - data = item.Value - if item.Flags&FlagGzip == FlagGzip { - ed.edb.Reset() - if _, err = io.Copy(&ed.edb, rd); err != nil { - return - } - data = ed.edb.Bytes() - } - if item.Flags&FlagProtobuf == FlagProtobuf { - m, ok := v.(proto.Message) - if !ok { - err = ErrItemObject - return - } - ed.ped.SetBuf(data) - err = ed.ped.Unmarshal(m) - } else { - switch v.(type) { - case *[]byte: - d := v.(*[]byte) - *d = data - case *string: - d := v.(*string) - *d = string(data) - case interface{}: - err = json.Unmarshal(data, v) - } - } - } - return -} diff --git a/pkg/cache/memcache/encoding_test.go b/pkg/cache/memcache/encoding_test.go deleted file mode 100644 index c00a45c95..000000000 --- a/pkg/cache/memcache/encoding_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package memcache - -import ( - "bytes" - "testing" - - mt "github.com/go-kratos/kratos/pkg/cache/memcache/test" -) - -func TestEncode(t *testing.T) { - type TestObj struct { - Name string - Age int32 - } - testObj := TestObj{"abc", 1} - - ed := newEncodeDecoder() - tests := []struct { - name string - a *Item - r []byte - e error - }{ - { - "EncodeRawFlagErrItem", - &Item{ - Object: &TestObj{"abc", 1}, - Flags: FlagRAW, - }, - []byte{}, - ErrItem, - }, - { - "EncodeEncodeFlagErrItem", - &Item{ - Value: []byte("test"), - Flags: FlagJSON, - }, - []byte{}, - ErrItem, - }, - { - "EncodeEmpty", - &Item{ - Value: []byte(""), - Flags: FlagRAW, - }, - []byte(""), - nil, - }, - { - "EncodeMaxSize", - &Item{ - Value: bytes.Repeat([]byte("A"), 8000000), - Flags: FlagRAW, - }, - bytes.Repeat([]byte("A"), 8000000), - nil, - }, - { - "EncodeExceededMaxSize", - &Item{ - Value: bytes.Repeat([]byte("A"), 8000000+1), - Flags: FlagRAW, - }, - nil, - ErrValueSize, - }, - { - "EncodeGOB", - &Item{ - Object: testObj, - Flags: FlagGOB, - }, - []byte{38, 255, 131, 3, 1, 1, 7, 84, 101, 115, 116, 79, 98, 106, 1, 255, 132, 0, 1, 2, 1, 4, 78, 97, 109, 101, 1, 12, 0, 1, 3, 65, 103, 101, 1, 4, 0, 0, 0, 10, 255, 132, 1, 3, 97, 98, 99, 1, 2, 0}, - nil, - }, - { - "EncodeJSON", - &Item{ - Object: testObj, - Flags: FlagJSON, - }, - []byte{123, 34, 78, 97, 109, 101, 34, 58, 34, 97, 98, 99, 34, 44, 34, 65, 103, 101, 34, 58, 49, 125, 10}, - nil, - }, - { - "EncodeProtobuf", - &Item{ - Object: &mt.TestItem{Name: "abc", Age: 1}, - Flags: FlagProtobuf, - }, - []byte{10, 3, 97, 98, 99, 16, 1}, - nil, - }, - { - "EncodeGzip", - &Item{ - Value: bytes.Repeat([]byte("B"), 50), - Flags: FlagGzip, - }, - []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 114, 34, 25, 0, 2, 0, 0, 255, 255, 252, 253, 67, 209, 50, 0, 0, 0}, - nil, - }, - { - "EncodeGOBGzip", - &Item{ - Object: testObj, - Flags: FlagGOB | FlagGzip, - }, - []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 82, 251, 223, 204, 204, 200, 200, 30, 146, 90, 92, 226, 159, 148, 197, 248, 191, 133, 129, 145, 137, 145, 197, 47, 49, 55, 149, 145, 135, 129, 145, 217, 49, 61, 149, 145, 133, 129, 129, 129, 235, 127, 11, 35, 115, 98, 82, 50, 35, 19, 3, 32, 0, 0, 255, 255, 211, 249, 1, 154, 50, 0, 0, 0}, - nil, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if r, err := ed.encode(test.a); err != test.e { - t.Fatal(err) - } else { - if err == nil { - if !bytes.Equal(r, test.r) { - t.Fatalf("not equal, expect %v\n got %v", test.r, r) - } - } - } - }) - } -} - -func TestDecode(t *testing.T) { - type TestObj struct { - Name string - Age int32 - } - testObj := &TestObj{"abc", 1} - - ed := newEncodeDecoder() - tests := []struct { - name string - a *Item - r interface{} - e error - }{ - { - "DecodeGOB", - &Item{ - Flags: FlagGOB, - Value: []byte{38, 255, 131, 3, 1, 1, 7, 84, 101, 115, 116, 79, 98, 106, 1, 255, 132, 0, 1, 2, 1, 4, 78, 97, 109, 101, 1, 12, 0, 1, 3, 65, 103, 101, 1, 4, 0, 0, 0, 10, 255, 132, 1, 3, 97, 98, 99, 1, 2, 0}, - }, - testObj, - nil, - }, - { - "DecodeJSON", - &Item{ - Value: []byte{123, 34, 78, 97, 109, 101, 34, 58, 34, 97, 98, 99, 34, 44, 34, 65, 103, 101, 34, 58, 49, 125, 10}, - Flags: FlagJSON, - }, - testObj, - nil, - }, - { - "DecodeProtobuf", - &Item{ - Value: []byte{10, 3, 97, 98, 99, 16, 1}, - - Flags: FlagProtobuf, - }, - &mt.TestItem{Name: "abc", Age: 1}, - nil, - }, - { - "DecodeGzip", - &Item{ - Value: []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 114, 34, 25, 0, 2, 0, 0, 255, 255, 252, 253, 67, 209, 50, 0, 0, 0}, - Flags: FlagGzip, - }, - bytes.Repeat([]byte("B"), 50), - nil, - }, - { - "DecodeGOBGzip", - &Item{ - Value: []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 82, 251, 223, 204, 204, 200, 200, 30, 146, 90, 92, 226, 159, 148, 197, 248, 191, 133, 129, 145, 137, 145, 197, 47, 49, 55, 149, 145, 135, 129, 145, 217, 49, 61, 149, 145, 133, 129, 129, 129, 235, 127, 11, 35, 115, 98, 82, 50, 35, 19, 3, 32, 0, 0, 255, 255, 211, 249, 1, 154, 50, 0, 0, 0}, - Flags: FlagGOB | FlagGzip, - }, - testObj, - nil, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if (test.a.Flags & FlagProtobuf) > 0 { - var dd mt.TestItem - if err := ed.decode(test.a, &dd); err != nil { - t.Fatal(err) - } - if (test.r.(*mt.TestItem).Name != dd.Name) || (test.r.(*mt.TestItem).Age != dd.Age) { - t.Fatalf("compare failed error, expect %v\n got %v", test.r.(*mt.TestItem), dd) - } - } else if test.a.Flags == FlagGzip { - var dd []byte - if err := ed.decode(test.a, &dd); err != nil { - t.Fatal(err) - } - if !bytes.Equal(dd, test.r.([]byte)) { - t.Fatalf("compare failed error, expect %v\n got %v", test.r, dd) - } - } else { - var dd TestObj - if err := ed.decode(test.a, &dd); err != nil { - t.Fatal(err) - } - if (test.r.(*TestObj).Name != dd.Name) || (test.r.(*TestObj).Age != dd.Age) { - t.Fatalf("compare failed error, expect %v\n got %v", test.r.(*TestObj), dd) - } - } - }) - } -} diff --git a/pkg/cache/memcache/errors.go b/pkg/cache/memcache/errors.go deleted file mode 100644 index db43bcfe4..000000000 --- a/pkg/cache/memcache/errors.go +++ /dev/null @@ -1,79 +0,0 @@ -package memcache - -import ( - "errors" - "fmt" - "strings" - - pkgerr "github.com/pkg/errors" -) - -var ( - // ErrNotFound not found - ErrNotFound = errors.New("memcache: key not found") - // ErrExists exists - ErrExists = errors.New("memcache: key exists") - // ErrNotStored not stored - ErrNotStored = errors.New("memcache: key not stored") - // ErrCASConflict means that a CompareAndSwap call failed due to the - // cached value being modified between the Get and the CompareAndSwap. - // If the cached value was simply evicted rather than replaced, - // ErrNotStored will be returned instead. - ErrCASConflict = errors.New("memcache: compare-and-swap conflict") - - // ErrPoolExhausted is returned from a pool connection method (Store, Get, - // Delete, IncrDecr, Err) when the maximum number of database connections - // in the pool has been reached. - ErrPoolExhausted = errors.New("memcache: connection pool exhausted") - // ErrPoolClosed pool closed - ErrPoolClosed = errors.New("memcache: connection pool closed") - // ErrConnClosed conn closed - ErrConnClosed = errors.New("memcache: connection closed") - // ErrMalformedKey is returned when an invalid key is used. - // Keys must be at maximum 250 bytes long and not - // contain whitespace or control characters. - ErrMalformedKey = errors.New("memcache: malformed key is too long or contains invalid characters") - // ErrValueSize item value size must less than 1mb - ErrValueSize = errors.New("memcache: item value size must not greater than 1mb") - // ErrStat stat error for monitor - ErrStat = errors.New("memcache unexpected errors") - // ErrItem item nil. - ErrItem = errors.New("memcache: item object nil") - // ErrItemObject object type Assertion failed - ErrItemObject = errors.New("memcache: item object protobuf type assertion failed") -) - -type protocolError string - -func (pe protocolError) Error() string { - return fmt.Sprintf("memcache: %s (possible server error or unsupported concurrent read by application)", string(pe)) -} - -func (pc *poolConn) formatErr(err error) string { - e := pkgerr.Cause(err) - switch e { - case ErrNotFound, ErrExists, ErrNotStored, nil: - if e == ErrNotFound { - _metricMisses.Inc(pc.p.c.Name, pc.p.c.Addr) - } - return "" - default: - es := e.Error() - switch { - case strings.HasPrefix(es, "read"): - return "read timeout" - case strings.HasPrefix(es, "dial"): - return "dial timeout" - case strings.HasPrefix(es, "write"): - return "write timeout" - case strings.Contains(es, "EOF"): - return "eof" - case strings.Contains(es, "reset"): - return "reset" - case strings.Contains(es, "broken"): - return "broken pipe" - default: - return "unexpected err" - } - } -} diff --git a/pkg/cache/memcache/example_test.go b/pkg/cache/memcache/example_test.go deleted file mode 100644 index bab5d1f00..000000000 --- a/pkg/cache/memcache/example_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package memcache - -import ( - "encoding/json" - "fmt" - "time" -) - -var testExampleAddr string - -func ExampleConn_set() { - var ( - err error - value []byte - conn Conn - expire int32 = 100 - p = struct { - Name string - Age int64 - }{"golang", 10} - ) - cnop := DialConnectTimeout(time.Duration(time.Second)) - rdop := DialReadTimeout(time.Duration(time.Second)) - wrop := DialWriteTimeout(time.Duration(time.Second)) - if value, err = json.Marshal(p); err != nil { - fmt.Println(err) - return - } - if conn, err = Dial("tcp", testExampleAddr, cnop, rdop, wrop); err != nil { - fmt.Println(err) - return - } - // FlagRAW test - itemRaw := &Item{ - Key: "test_raw", - Value: value, - Expiration: expire, - } - if err = conn.Set(itemRaw); err != nil { - fmt.Println(err) - return - } - // FlagGzip - itemGZip := &Item{ - Key: "test_gzip", - Value: value, - Flags: FlagGzip, - Expiration: expire, - } - if err = conn.Set(itemGZip); err != nil { - fmt.Println(err) - return - } - // FlagGOB - itemGOB := &Item{ - Key: "test_gob", - Object: p, - Flags: FlagGOB, - Expiration: expire, - } - if err = conn.Set(itemGOB); err != nil { - fmt.Println(err) - return - } - // FlagJSON - itemJSON := &Item{ - Key: "test_json", - Object: p, - Flags: FlagJSON, - Expiration: expire, - } - if err = conn.Set(itemJSON); err != nil { - fmt.Println(err) - return - } - // FlagJSON | FlagGzip - itemJSONGzip := &Item{ - Key: "test_jsonGzip", - Object: p, - Flags: FlagJSON | FlagGzip, - Expiration: expire, - } - if err = conn.Set(itemJSONGzip); err != nil { - fmt.Println(err) - return - } - // Output: -} - -func ExampleConn_get() { - var ( - err error - item2 *Item - conn Conn - p struct { - Name string - Age int64 - } - ) - cnop := DialConnectTimeout(time.Duration(time.Second)) - rdop := DialReadTimeout(time.Duration(time.Second)) - wrop := DialWriteTimeout(time.Duration(time.Second)) - if conn, err = Dial("tcp", testExampleAddr, cnop, rdop, wrop); err != nil { - fmt.Println(err) - return - } - if item2, err = conn.Get("test_raw"); err != nil { - fmt.Println(err) - } else { - if err = conn.Scan(item2, &p); err != nil { - fmt.Printf("FlagRAW conn.Scan error(%v)\n", err) - return - } - } - // FlagGZip - if item2, err = conn.Get("test_gzip"); err != nil { - fmt.Println(err) - } else { - if err = conn.Scan(item2, &p); err != nil { - fmt.Printf("FlagGZip conn.Scan error(%v)\n", err) - return - } - } - // FlagGOB - if item2, err = conn.Get("test_gob"); err != nil { - fmt.Println(err) - } else { - if err = conn.Scan(item2, &p); err != nil { - fmt.Printf("FlagGOB conn.Scan error(%v)\n", err) - return - } - } - // FlagJSON - if item2, err = conn.Get("test_json"); err != nil { - fmt.Println(err) - } else { - if err = conn.Scan(item2, &p); err != nil { - fmt.Printf("FlagJSON conn.Scan error(%v)\n", err) - return - } - } - // Output: -} - -func ExampleConn_getMulti() { - var ( - err error - conn Conn - res map[string]*Item - keys = []string{"test_raw", "test_gzip"} - p struct { - Name string - Age int64 - } - ) - cnop := DialConnectTimeout(time.Duration(time.Second)) - rdop := DialReadTimeout(time.Duration(time.Second)) - wrop := DialWriteTimeout(time.Duration(time.Second)) - if conn, err = Dial("tcp", testExampleAddr, cnop, rdop, wrop); err != nil { - fmt.Println(err) - return - } - if res, err = conn.GetMulti(keys); err != nil { - fmt.Printf("conn.GetMulti(%v) error(%v)", keys, err) - return - } - for _, v := range res { - if err = conn.Scan(v, &p); err != nil { - fmt.Printf("conn.Scan error(%v)\n", err) - return - } - fmt.Println(p) - } - // Output: - //{golang 10} - //{golang 10} -} diff --git a/pkg/cache/memcache/main_test.go b/pkg/cache/memcache/main_test.go deleted file mode 100644 index 46e8b5689..000000000 --- a/pkg/cache/memcache/main_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package memcache - -import ( - "log" - "os" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/container/pool" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -var testConnASCII Conn -var testMemcache *Memcache -var testPool *Pool -var testMemcacheAddr string - -func setupTestConnASCII(addr string) { - var err error - cnop := DialConnectTimeout(time.Duration(2 * time.Second)) - rdop := DialReadTimeout(time.Duration(2 * time.Second)) - wrop := DialWriteTimeout(time.Duration(2 * time.Second)) - testConnASCII, err = Dial("tcp", addr, cnop, rdop, wrop) - if err != nil { - log.Fatal(err) - } - testConnASCII.Delete("test") - testConnASCII.Delete("test1") - testConnASCII.Delete("test2") - if err != nil { - log.Fatal(err) - } -} - -func setupTestMemcache(addr string) { - testConfig := &Config{ - Config: &pool.Config{ - Active: 10, - Idle: 10, - IdleTimeout: xtime.Duration(time.Second), - WaitTimeout: xtime.Duration(time.Second), - Wait: false, - }, - Addr: addr, - Proto: "tcp", - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - } - testMemcache = New(testConfig) -} - -func setupTestPool(addr string) { - config := &Config{ - Name: "test", - Proto: "tcp", - Addr: addr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - } - config.Config = &pool.Config{ - Active: 10, - Idle: 5, - IdleTimeout: xtime.Duration(90 * time.Second), - } - testPool = NewPool(config) -} - -func TestMain(m *testing.M) { - testMemcacheAddr = os.Getenv("TEST_MEMCACHE_ADDR") - if testExampleAddr == "" { - log.Print("TEST_MEMCACHE_ADDR not provide skip test.") - // ignored test. - os.Exit(0) - } - setupTestConnASCII(testMemcacheAddr) - setupTestMemcache(testMemcacheAddr) - setupTestPool(testMemcacheAddr) - // TODO: add setupexample? - testExampleAddr = testMemcacheAddr - - ret := m.Run() - os.Exit(ret) -} diff --git a/pkg/cache/memcache/memcache.go b/pkg/cache/memcache/memcache.go deleted file mode 100644 index ce51a0720..000000000 --- a/pkg/cache/memcache/memcache.go +++ /dev/null @@ -1,377 +0,0 @@ -package memcache - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/container/pool" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -const ( - // Flag, 15(encoding) bit+ 17(compress) bit - - // FlagRAW default flag. - FlagRAW = uint32(0) - // FlagGOB gob encoding. - FlagGOB = uint32(1) << 0 - // FlagJSON json encoding. - FlagJSON = uint32(1) << 1 - // FlagProtobuf protobuf - FlagProtobuf = uint32(1) << 2 - - _flagEncoding = uint32(0xFFFF8000) - - // FlagGzip gzip compress. - FlagGzip = uint32(1) << 15 - - // left mv 31??? not work!!! - flagLargeValue = uint32(1) << 30 -) - -// Item is an reply to be got or stored in a memcached server. -type Item struct { - // Key is the Item's key (250 bytes maximum). - Key string - - // Value is the Item's value. - Value []byte - - // Object is the Item's object for use codec. - Object interface{} - - // Flags are server-opaque flags whose semantics are entirely - // up to the app. - Flags uint32 - - // Expiration is the cache expiration time, in seconds: either a relative - // time from now (up to 1 month), or an absolute Unix epoch time. - // Zero means the Item has no expiration time. - Expiration int32 - - // Compare and swap ID. - cas uint64 -} - -// Conn represents a connection to a Memcache server. -// Command Reference: https://github.com/memcached/memcached/wiki/Commands -type Conn interface { - // Close closes the connection. - Close() error - - // Err returns a non-nil value if the connection is broken. The returned - // value is either the first non-nil value returned from the underlying - // network connection or a protocol parsing error. Applications should - // close broken connections. - Err() error - - // Add writes the given item, if no value already exists for its key. - // ErrNotStored is returned if that condition is not met. - Add(item *Item) error - - // Set writes the given item, unconditionally. - Set(item *Item) error - - // Replace writes the given item, but only if the server *does* already - // hold data for this key. - Replace(item *Item) error - - // Get sends a command to the server for gets data. - Get(key string) (*Item, error) - - // GetMulti is a batch version of Get. The returned map from keys to items - // may have fewer elements than the input slice, due to memcache cache - // misses. Each key must be at most 250 bytes in length. - // If no error is returned, the returned map will also be non-nil. - GetMulti(keys []string) (map[string]*Item, error) - - // Delete deletes the item with the provided key. - // The error ErrNotFound is returned if the item didn't already exist in - // the cache. - Delete(key string) error - - // Increment atomically increments key by delta. The return value is the - // new value after being incremented or an error. If the value didn't exist - // in memcached the error is ErrNotFound. The value in memcached must be - // an decimal number, or an error will be returned. - // On 64-bit overflow, the new value wraps around. - Increment(key string, delta uint64) (newValue uint64, err error) - - // Decrement atomically decrements key by delta. The return value is the - // new value after being decremented or an error. If the value didn't exist - // in memcached the error is ErrNotFound. The value in memcached must be - // an decimal number, or an error will be returned. On underflow, the new - // value is capped at zero and does not wrap around. - Decrement(key string, delta uint64) (newValue uint64, err error) - - // CompareAndSwap writes the given item that was previously returned by - // Get, if the value was neither modified or evicted between the Get and - // the CompareAndSwap calls. The item's Key should not change between calls - // but all other item fields may differ. ErrCASConflict is returned if the - // value was modified in between the calls. - // ErrNotStored is returned if the value was evicted in between the calls. - CompareAndSwap(item *Item) error - - // Touch updates the expiry for the given key. The seconds parameter is - // either a Unix timestamp or, if seconds is less than 1 month, the number - // of seconds into the future at which time the item will expire. - // ErrNotFound is returned if the key is not in the cache. The key must be - // at most 250 bytes in length. - Touch(key string, seconds int32) (err error) - - // Scan converts value read from the memcache into the following - // common Go types and special types: - // - // *string - // *[]byte - // *interface{} - // - Scan(item *Item, v interface{}) (err error) - - // Add writes the given item, if no value already exists for its key. - // ErrNotStored is returned if that condition is not met. - AddContext(ctx context.Context, item *Item) error - - // Set writes the given item, unconditionally. - SetContext(ctx context.Context, item *Item) error - - // Replace writes the given item, but only if the server *does* already - // hold data for this key. - ReplaceContext(ctx context.Context, item *Item) error - - // Get sends a command to the server for gets data. - GetContext(ctx context.Context, key string) (*Item, error) - - // GetMulti is a batch version of Get. The returned map from keys to items - // may have fewer elements than the input slice, due to memcache cache - // misses. Each key must be at most 250 bytes in length. - // If no error is returned, the returned map will also be non-nil. - GetMultiContext(ctx context.Context, keys []string) (map[string]*Item, error) - - // Delete deletes the item with the provided key. - // The error ErrNotFound is returned if the item didn't already exist in - // the cache. - DeleteContext(ctx context.Context, key string) error - - // Increment atomically increments key by delta. The return value is the - // new value after being incremented or an error. If the value didn't exist - // in memcached the error is ErrNotFound. The value in memcached must be - // an decimal number, or an error will be returned. - // On 64-bit overflow, the new value wraps around. - IncrementContext(ctx context.Context, key string, delta uint64) (newValue uint64, err error) - - // Decrement atomically decrements key by delta. The return value is the - // new value after being decremented or an error. If the value didn't exist - // in memcached the error is ErrNotFound. The value in memcached must be - // an decimal number, or an error will be returned. On underflow, the new - // value is capped at zero and does not wrap around. - DecrementContext(ctx context.Context, key string, delta uint64) (newValue uint64, err error) - - // CompareAndSwap writes the given item that was previously returned by - // Get, if the value was neither modified or evicted between the Get and - // the CompareAndSwap calls. The item's Key should not change between calls - // but all other item fields may differ. ErrCASConflict is returned if the - // value was modified in between the calls. - // ErrNotStored is returned if the value was evicted in between the calls. - CompareAndSwapContext(ctx context.Context, item *Item) error - - // Touch updates the expiry for the given key. The seconds parameter is - // either a Unix timestamp or, if seconds is less than 1 month, the number - // of seconds into the future at which time the item will expire. - // ErrNotFound is returned if the key is not in the cache. The key must be - // at most 250 bytes in length. - TouchContext(ctx context.Context, key string, seconds int32) (err error) -} - -// Config memcache config. -type Config struct { - *pool.Config - - Name string // memcache name, for trace - Proto string - Addr string - DialTimeout xtime.Duration - ReadTimeout xtime.Duration - WriteTimeout xtime.Duration -} - -// Memcache memcache client -type Memcache struct { - pool *Pool -} - -// Reply is the result of Get -type Reply struct { - err error - item *Item - conn Conn - closed bool -} - -// Replies is the result of GetMulti -type Replies struct { - err error - items map[string]*Item - usedItems map[string]struct{} - conn Conn - closed bool -} - -// New get a memcache client -func New(cfg *Config) *Memcache { - return &Memcache{pool: NewPool(cfg)} -} - -// Close close connection pool -func (mc *Memcache) Close() error { - return mc.pool.Close() -} - -// Conn direct get a connection -func (mc *Memcache) Conn(ctx context.Context) Conn { - return mc.pool.Get(ctx) -} - -// Set writes the given item, unconditionally. -func (mc *Memcache) Set(ctx context.Context, item *Item) (err error) { - conn := mc.pool.Get(ctx) - err = conn.SetContext(ctx, item) - conn.Close() - return -} - -// Add writes the given item, if no value already exists for its key. -// ErrNotStored is returned if that condition is not met. -func (mc *Memcache) Add(ctx context.Context, item *Item) (err error) { - conn := mc.pool.Get(ctx) - err = conn.AddContext(ctx, item) - conn.Close() - return -} - -// Replace writes the given item, but only if the server *does* already hold data for this key. -func (mc *Memcache) Replace(ctx context.Context, item *Item) (err error) { - conn := mc.pool.Get(ctx) - err = conn.ReplaceContext(ctx, item) - conn.Close() - return -} - -// CompareAndSwap writes the given item that was previously returned by Get -func (mc *Memcache) CompareAndSwap(ctx context.Context, item *Item) (err error) { - conn := mc.pool.Get(ctx) - err = conn.CompareAndSwapContext(ctx, item) - conn.Close() - return -} - -// Get sends a command to the server for gets data. -func (mc *Memcache) Get(ctx context.Context, key string) *Reply { - conn := mc.pool.Get(ctx) - item, err := conn.GetContext(ctx, key) - if err != nil { - conn.Close() - } - return &Reply{err: err, item: item, conn: conn} -} - -// Item get raw Item -func (r *Reply) Item() *Item { - return r.item -} - -// Scan converts value, read from the memcache -func (r *Reply) Scan(v interface{}) (err error) { - if r.err != nil { - return r.err - } - err = r.conn.Scan(r.item, v) - if !r.closed { - r.conn.Close() - r.closed = true - } - return -} - -// GetMulti is a batch version of Get -func (mc *Memcache) GetMulti(ctx context.Context, keys []string) (*Replies, error) { - conn := mc.pool.Get(ctx) - items, err := conn.GetMultiContext(ctx, keys) - rs := &Replies{err: err, items: items, conn: conn, usedItems: make(map[string]struct{}, len(keys))} - if (err != nil) || (len(items) == 0) { - rs.Close() - } - return rs, err -} - -// Close close rows. -func (rs *Replies) Close() (err error) { - if !rs.closed { - err = rs.conn.Close() - rs.closed = true - } - return -} - -// Item get Item from rows -func (rs *Replies) Item(key string) *Item { - return rs.items[key] -} - -// Scan converts value, read from key in rows -func (rs *Replies) Scan(key string, v interface{}) (err error) { - if rs.err != nil { - return rs.err - } - item, ok := rs.items[key] - if !ok { - rs.Close() - return ErrNotFound - } - rs.usedItems[key] = struct{}{} - err = rs.conn.Scan(item, v) - if (err != nil) || (len(rs.items) == len(rs.usedItems)) { - rs.Close() - } - return -} - -// Keys keys of result -func (rs *Replies) Keys() (keys []string) { - keys = make([]string, 0, len(rs.items)) - for key := range rs.items { - keys = append(keys, key) - } - return -} - -// Touch updates the expiry for the given key. -func (mc *Memcache) Touch(ctx context.Context, key string, timeout int32) (err error) { - conn := mc.pool.Get(ctx) - err = conn.TouchContext(ctx, key, timeout) - conn.Close() - return -} - -// Delete deletes the item with the provided key. -func (mc *Memcache) Delete(ctx context.Context, key string) (err error) { - conn := mc.pool.Get(ctx) - err = conn.DeleteContext(ctx, key) - conn.Close() - return -} - -// Increment atomically increments key by delta. -func (mc *Memcache) Increment(ctx context.Context, key string, delta uint64) (newValue uint64, err error) { - conn := mc.pool.Get(ctx) - newValue, err = conn.IncrementContext(ctx, key, delta) - conn.Close() - return -} - -// Decrement atomically decrements key by delta. -func (mc *Memcache) Decrement(ctx context.Context, key string, delta uint64) (newValue uint64, err error) { - conn := mc.pool.Get(ctx) - newValue, err = conn.DecrementContext(ctx, key, delta) - conn.Close() - return -} diff --git a/pkg/cache/memcache/memcache_test.go b/pkg/cache/memcache/memcache_test.go deleted file mode 100644 index 878841c6a..000000000 --- a/pkg/cache/memcache/memcache_test.go +++ /dev/null @@ -1,300 +0,0 @@ -package memcache - -import ( - "context" - "fmt" - "reflect" - "testing" - "time" -) - -func Test_client_Set(t *testing.T) { - type args struct { - c context.Context - item *Item - } - tests := []struct { - name string - args args - wantErr bool - }{ - {name: "set value", args: args{c: context.Background(), item: &Item{Key: "Test_client_Set", Value: []byte("abc")}}, wantErr: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := testMemcache.Set(tt.args.c, tt.args.item); (err != nil) != tt.wantErr { - t.Errorf("client.Set() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func Test_client_Add(t *testing.T) { - type args struct { - c context.Context - item *Item - } - key := fmt.Sprintf("Test_client_Add_%d", time.Now().Unix()) - tests := []struct { - name string - args args - wantErr bool - }{ - {name: "add not exist value", args: args{c: context.Background(), item: &Item{Key: key, Value: []byte("abc")}}, wantErr: false}, - {name: "add exist value", args: args{c: context.Background(), item: &Item{Key: key, Value: []byte("abc")}}, wantErr: true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := testMemcache.Add(tt.args.c, tt.args.item); (err != nil) != tt.wantErr { - t.Errorf("client.Add() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func Test_client_Replace(t *testing.T) { - key := fmt.Sprintf("Test_client_Replace_%d", time.Now().Unix()) - ekey := "Test_client_Replace_exist" - testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("ok")}) - type args struct { - c context.Context - item *Item - } - tests := []struct { - name string - args args - wantErr bool - }{ - {name: "not exist value", args: args{c: context.Background(), item: &Item{Key: key, Value: []byte("abc")}}, wantErr: true}, - {name: "exist value", args: args{c: context.Background(), item: &Item{Key: ekey, Value: []byte("abc")}}, wantErr: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := testMemcache.Replace(tt.args.c, tt.args.item); (err != nil) != tt.wantErr { - t.Errorf("client.Replace() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func Test_client_CompareAndSwap(t *testing.T) { - key := fmt.Sprintf("Test_client_CompareAndSwap_%d", time.Now().Unix()) - ekey := "Test_client_CompareAndSwap_k" - testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("old")}) - cas := testMemcache.Get(context.Background(), ekey).Item().cas - type args struct { - c context.Context - item *Item - } - tests := []struct { - name string - args args - wantErr bool - }{ - {name: "not exist value", args: args{c: context.Background(), item: &Item{Key: key, Value: []byte("abc")}}, wantErr: true}, - {name: "exist value", args: args{c: context.Background(), item: &Item{Key: ekey, cas: cas, Value: []byte("abc")}}, wantErr: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := testMemcache.CompareAndSwap(tt.args.c, tt.args.item); (err != nil) != tt.wantErr { - t.Errorf("client.CompareAndSwap() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func Test_client_Get(t *testing.T) { - key := fmt.Sprintf("Test_client_Get_%d", time.Now().Unix()) - ekey := "Test_client_Get_k" - testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("old")}) - type args struct { - c context.Context - key string - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - {name: "not exist value", args: args{c: context.Background(), key: key}, wantErr: true}, - {name: "exist value", args: args{c: context.Background(), key: ekey}, wantErr: false, want: "old"}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var res string - if err := testMemcache.Get(tt.args.c, tt.args.key).Scan(&res); (err != nil) != tt.wantErr || res != tt.want { - t.Errorf("client.Get() = %v, want %v, got err: %v, want err: %v", err, tt.want, err, tt.wantErr) - } - }) - } -} - -func Test_client_Touch(t *testing.T) { - key := fmt.Sprintf("Test_client_Touch_%d", time.Now().Unix()) - ekey := "Test_client_Touch_k" - testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("old")}) - type args struct { - c context.Context - key string - timeout int32 - } - tests := []struct { - name string - args args - wantErr bool - }{ - {name: "not exist value", args: args{c: context.Background(), key: key, timeout: 100000}, wantErr: true}, - {name: "exist value", args: args{c: context.Background(), key: ekey, timeout: 100000}, wantErr: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := testMemcache.Touch(tt.args.c, tt.args.key, tt.args.timeout); (err != nil) != tt.wantErr { - t.Errorf("client.Touch() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func Test_client_Delete(t *testing.T) { - key := fmt.Sprintf("Test_client_Delete_%d", time.Now().Unix()) - ekey := "Test_client_Delete_k" - testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("old")}) - type args struct { - c context.Context - key string - } - tests := []struct { - name string - args args - wantErr bool - }{ - {name: "not exist value", args: args{c: context.Background(), key: key}, wantErr: true}, - {name: "exist value", args: args{c: context.Background(), key: ekey}, wantErr: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := testMemcache.Delete(tt.args.c, tt.args.key); (err != nil) != tt.wantErr { - t.Errorf("client.Delete() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func Test_client_Increment(t *testing.T) { - key := fmt.Sprintf("Test_client_Increment_%d", time.Now().Unix()) - ekey := "Test_client_Increment_k" - testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("1")}) - type args struct { - c context.Context - key string - delta uint64 - } - tests := []struct { - name string - args args - wantNewValue uint64 - wantErr bool - }{ - {name: "not exist value", args: args{c: context.Background(), key: key, delta: 10}, wantErr: true}, - {name: "exist value", args: args{c: context.Background(), key: ekey, delta: 10}, wantErr: false, wantNewValue: 11}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotNewValue, err := testMemcache.Increment(tt.args.c, tt.args.key, tt.args.delta) - if (err != nil) != tt.wantErr { - t.Errorf("client.Increment() error = %v, wantErr %v", err, tt.wantErr) - return - } - if gotNewValue != tt.wantNewValue { - t.Errorf("client.Increment() = %v, want %v", gotNewValue, tt.wantNewValue) - } - }) - } -} - -func Test_client_Decrement(t *testing.T) { - key := fmt.Sprintf("Test_client_Decrement_%d", time.Now().Unix()) - ekey := "Test_client_Decrement_k" - testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("100")}) - type args struct { - c context.Context - key string - delta uint64 - } - tests := []struct { - name string - args args - wantNewValue uint64 - wantErr bool - }{ - {name: "not exist value", args: args{c: context.Background(), key: key, delta: 10}, wantErr: true}, - {name: "exist value", args: args{c: context.Background(), key: ekey, delta: 10}, wantErr: false, wantNewValue: 90}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotNewValue, err := testMemcache.Decrement(tt.args.c, tt.args.key, tt.args.delta) - if (err != nil) != tt.wantErr { - t.Errorf("client.Decrement() error = %v, wantErr %v", err, tt.wantErr) - return - } - if gotNewValue != tt.wantNewValue { - t.Errorf("client.Decrement() = %v, want %v", gotNewValue, tt.wantNewValue) - } - }) - } -} - -func Test_client_GetMulti(t *testing.T) { - key := fmt.Sprintf("Test_client_GetMulti_%d", time.Now().Unix()) - ekey1 := "Test_client_GetMulti_k1" - ekey2 := "Test_client_GetMulti_k2" - testMemcache.Set(context.Background(), &Item{Key: ekey1, Value: []byte("1")}) - testMemcache.Set(context.Background(), &Item{Key: ekey2, Value: []byte("2")}) - keys := []string{key, ekey1, ekey2} - rows, err := testMemcache.GetMulti(context.Background(), keys) - if err != nil { - t.Errorf("client.GetMulti() error = %v, wantErr %v", err, nil) - } - tests := []struct { - key string - wantNewValue string - wantErr bool - nilItem bool - }{ - {key: ekey1, wantErr: false, wantNewValue: "1", nilItem: false}, - {key: ekey2, wantErr: false, wantNewValue: "2", nilItem: false}, - {key: key, wantErr: true, nilItem: true}, - } - if reflect.DeepEqual(keys, rows.Keys()) { - t.Errorf("got %v, expect: %v", rows.Keys(), keys) - } - for _, tt := range tests { - t.Run(tt.key, func(t *testing.T) { - var gotNewValue string - err = rows.Scan(tt.key, &gotNewValue) - if (err != nil) != tt.wantErr { - t.Errorf("rows.Scan() error = %v, wantErr %v", err, tt.wantErr) - return - } - if gotNewValue != tt.wantNewValue { - t.Errorf("rows.Value() = %v, want %v", gotNewValue, tt.wantNewValue) - } - if (rows.Item(tt.key) == nil) != tt.nilItem { - t.Errorf("rows.Item() = %v, want %v", rows.Item(tt.key) == nil, tt.nilItem) - } - }) - } - err = rows.Close() - if err != nil { - t.Errorf("client.Replies.Close() error = %v, wantErr %v", err, nil) - } -} - -func Test_client_Conn(t *testing.T) { - conn := testMemcache.Conn(context.Background()) - defer conn.Close() - if conn == nil { - t.Errorf("expect get conn, get nil") - } -} diff --git a/pkg/cache/memcache/metrics.go b/pkg/cache/memcache/metrics.go deleted file mode 100644 index 0eeb60573..000000000 --- a/pkg/cache/memcache/metrics.go +++ /dev/null @@ -1,51 +0,0 @@ -package memcache - -import "github.com/go-kratos/kratos/pkg/stat/metric" - -const namespace = "memcache_client" - -var ( - _metricReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "memcache client requests duration(ms).", - Labels: []string{"name", "addr", "command"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000, 2500}, - }) - _metricReqErr = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "error_total", - Help: "memcache client requests error count.", - Labels: []string{"name", "addr", "command", "error"}, - }) - _metricConnTotal = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "connections", - Name: "total", - Help: "memcache client connections total count.", - Labels: []string{"name", "addr", "state"}, - }) - _metricConnCurrent = metric.NewGaugeVec(&metric.GaugeVecOpts{ - Namespace: namespace, - Subsystem: "connections", - Name: "current", - Help: "memcache client connections current.", - Labels: []string{"name", "addr", "state"}, - }) - _metricHits = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "", - Name: "hits_total", - Help: "memcache client hits total.", - Labels: []string{"name", "addr"}, - }) - _metricMisses = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "", - Name: "misses_total", - Help: "memcache client misses total.", - Labels: []string{"name", "addr"}, - }) -) diff --git a/pkg/cache/memcache/pool_conn.go b/pkg/cache/memcache/pool_conn.go deleted file mode 100644 index 6de7a31cd..000000000 --- a/pkg/cache/memcache/pool_conn.go +++ /dev/null @@ -1,203 +0,0 @@ -package memcache - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/go-kratos/kratos/pkg/container/pool" -) - -// Pool memcache connection pool struct. -// Deprecated: Use Memcache instead -type Pool struct { - p pool.Pool - c *Config -} - -// NewPool new a memcache conn pool. -// Deprecated: Use New instead -func NewPool(cfg *Config) (p *Pool) { - if cfg.DialTimeout <= 0 || cfg.ReadTimeout <= 0 || cfg.WriteTimeout <= 0 { - panic("must config memcache timeout") - } - p1 := pool.NewList(cfg.Config) - cnop := DialConnectTimeout(time.Duration(cfg.DialTimeout)) - rdop := DialReadTimeout(time.Duration(cfg.ReadTimeout)) - wrop := DialWriteTimeout(time.Duration(cfg.WriteTimeout)) - p1.New = func(ctx context.Context) (io.Closer, error) { - conn, err := Dial(cfg.Proto, cfg.Addr, cnop, rdop, wrop) - return newTraceConn(conn, fmt.Sprintf("%s://%s", cfg.Proto, cfg.Addr)), err - } - p = &Pool{p: p1, c: cfg} - return -} - -// Get gets a connection. The application must close the returned connection. -// This method always returns a valid connection so that applications can defer -// error handling to the first use of the connection. If there is an error -// getting an underlying connection, then the connection Err, Do, Send, Flush -// and Receive methods return that error. -func (p *Pool) Get(ctx context.Context) Conn { - c, err := p.p.Get(ctx) - if err != nil { - return errConn{err} - } - c1, _ := c.(Conn) - return &poolConn{p: p, c: c1, ctx: ctx} -} - -// Close release the resources used by the pool. -func (p *Pool) Close() error { - return p.p.Close() -} - -type poolConn struct { - c Conn - p *Pool - ctx context.Context -} - -func (pc *poolConn) pstat(key string, t time.Time, err error) { - _metricReqDur.Observe(int64(time.Since(t)/time.Millisecond), pc.p.c.Name, pc.p.c.Addr, key) - if err != nil { - if msg := pc.formatErr(err); msg != "" { - _metricReqErr.Inc(pc.p.c.Name, pc.p.c.Addr, key, msg) - } - return - } - _metricHits.Inc(pc.p.c.Name, pc.p.c.Addr) -} - -func (pc *poolConn) Close() error { - c := pc.c - if _, ok := c.(errConn); ok { - return nil - } - pc.c = errConn{ErrConnClosed} - pc.p.p.Put(context.Background(), c, c.Err() != nil) - return nil -} - -func (pc *poolConn) Err() error { - return pc.c.Err() -} - -func (pc *poolConn) Set(item *Item) (err error) { - return pc.SetContext(pc.ctx, item) -} - -func (pc *poolConn) Add(item *Item) (err error) { - return pc.AddContext(pc.ctx, item) -} - -func (pc *poolConn) Replace(item *Item) (err error) { - return pc.ReplaceContext(pc.ctx, item) -} - -func (pc *poolConn) CompareAndSwap(item *Item) (err error) { - return pc.CompareAndSwapContext(pc.ctx, item) -} - -func (pc *poolConn) Get(key string) (r *Item, err error) { - return pc.GetContext(pc.ctx, key) -} - -func (pc *poolConn) GetMulti(keys []string) (res map[string]*Item, err error) { - return pc.GetMultiContext(pc.ctx, keys) -} - -func (pc *poolConn) Touch(key string, timeout int32) (err error) { - return pc.TouchContext(pc.ctx, key, timeout) -} - -func (pc *poolConn) Scan(item *Item, v interface{}) error { - return pc.c.Scan(item, v) -} - -func (pc *poolConn) Delete(key string) (err error) { - return pc.DeleteContext(pc.ctx, key) -} - -func (pc *poolConn) Increment(key string, delta uint64) (newValue uint64, err error) { - return pc.IncrementContext(pc.ctx, key, delta) -} - -func (pc *poolConn) Decrement(key string, delta uint64) (newValue uint64, err error) { - return pc.DecrementContext(pc.ctx, key, delta) -} - -func (pc *poolConn) AddContext(ctx context.Context, item *Item) error { - now := time.Now() - err := pc.c.AddContext(ctx, item) - pc.pstat("add", now, err) - return err -} - -func (pc *poolConn) SetContext(ctx context.Context, item *Item) error { - now := time.Now() - err := pc.c.SetContext(ctx, item) - pc.pstat("set", now, err) - return err -} - -func (pc *poolConn) ReplaceContext(ctx context.Context, item *Item) error { - now := time.Now() - err := pc.c.ReplaceContext(ctx, item) - pc.pstat("replace", now, err) - return err -} - -func (pc *poolConn) GetContext(ctx context.Context, key string) (*Item, error) { - now := time.Now() - item, err := pc.c.Get(key) - pc.pstat("get", now, err) - return item, err -} - -func (pc *poolConn) GetMultiContext(ctx context.Context, keys []string) (map[string]*Item, error) { - // if keys is empty slice returns empty map direct - if len(keys) == 0 { - return make(map[string]*Item), nil - } - now := time.Now() - items, err := pc.c.GetMulti(keys) - pc.pstat("gets", now, err) - return items, err -} - -func (pc *poolConn) DeleteContext(ctx context.Context, key string) error { - now := time.Now() - err := pc.c.Delete(key) - pc.pstat("delete", now, err) - return err -} - -func (pc *poolConn) IncrementContext(ctx context.Context, key string, delta uint64) (uint64, error) { - now := time.Now() - newValue, err := pc.c.IncrementContext(ctx, key, delta) - pc.pstat("increment", now, err) - return newValue, err -} - -func (pc *poolConn) DecrementContext(ctx context.Context, key string, delta uint64) (uint64, error) { - now := time.Now() - newValue, err := pc.c.DecrementContext(ctx, key, delta) - pc.pstat("decrement", now, err) - return newValue, err -} - -func (pc *poolConn) CompareAndSwapContext(ctx context.Context, item *Item) error { - now := time.Now() - err := pc.c.CompareAndSwap(item) - pc.pstat("cas", now, err) - return err -} - -func (pc *poolConn) TouchContext(ctx context.Context, key string, seconds int32) error { - now := time.Now() - err := pc.c.Touch(key, seconds) - pc.pstat("touch", now, err) - return err -} diff --git a/pkg/cache/memcache/pool_conn_test.go b/pkg/cache/memcache/pool_conn_test.go deleted file mode 100644 index 35505945f..000000000 --- a/pkg/cache/memcache/pool_conn_test.go +++ /dev/null @@ -1,543 +0,0 @@ -package memcache - -import ( - "bytes" - "context" - "reflect" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/container/pool" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -var itempool = &Item{ - Key: "testpool", - Value: []byte("testpool"), - Flags: 0, - Expiration: 60, - cas: 0, -} -var itempool2 = &Item{ - Key: "test_count", - Value: []byte("0"), - Flags: 0, - Expiration: 1000, - cas: 0, -} - -type testObject struct { - Mid int64 - Value []byte -} - -var largeValue = &Item{ - Key: "large_value", - Flags: FlagGOB | FlagGzip, - Expiration: 1000, - cas: 0, -} - -var largeValueBoundary = &Item{ - Key: "large_value", - Flags: FlagGOB | FlagGzip, - Expiration: 1000, - cas: 0, -} - -func TestPoolSet(t *testing.T) { - conn := testPool.Get(context.Background()) - defer conn.Close() - // set - if err := conn.Set(itempool); err != nil { - t.Errorf("memcache: set error(%v)", err) - } else { - t.Logf("memcache: set value: %s", itempool.Value) - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolGet(t *testing.T) { - key := "testpool" - conn := testPool.Get(context.Background()) - defer conn.Close() - // get - if res, err := conn.Get(key); err != nil { - t.Errorf("memcache: get error(%v)", err) - } else { - t.Logf("memcache: get value: %s", res.Value) - } - if _, err := conn.Get("not_found"); err != ErrNotFound { - t.Errorf("memcache: expceted err is not found but got: %v", err) - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolGetMulti(t *testing.T) { - conn := testPool.Get(context.Background()) - defer conn.Close() - s := []string{"testpool", "test1"} - // get - if res, err := conn.GetMulti(s); err != nil { - t.Errorf("memcache: gets error(%v)", err) - } else { - t.Logf("memcache: gets value: %d", len(res)) - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolTouch(t *testing.T) { - key := "testpool" - conn := testPool.Get(context.Background()) - defer conn.Close() - // touch - if err := conn.Touch(key, 10); err != nil { - t.Errorf("memcache: touch error(%v)", err) - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolIncrement(t *testing.T) { - key := "test_count" - conn := testPool.Get(context.Background()) - defer conn.Close() - // set - if err := conn.Set(itempool2); err != nil { - t.Errorf("memcache: set error(%v)", err) - } else { - t.Logf("memcache: set value: 0") - } - // incr - if res, err := conn.Increment(key, 1); err != nil { - t.Errorf("memcache: incr error(%v)", err) - } else { - t.Logf("memcache: incr n: %d", res) - if res != 1 { - t.Errorf("memcache: expected res=1 but got %d", res) - } - } - // decr - if res, err := conn.Decrement(key, 1); err != nil { - t.Errorf("memcache: decr error(%v)", err) - } else { - t.Logf("memcache: decr n: %d", res) - if res != 0 { - t.Errorf("memcache: expected res=0 but got %d", res) - } - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolErr(t *testing.T) { - conn := testPool.Get(context.Background()) - defer conn.Close() - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } - if err := conn.Err(); err == nil { - t.Errorf("memcache: err not nil") - } else { - t.Logf("memcache: err: %v", err) - } -} - -func TestPoolCompareAndSwap(t *testing.T) { - conn := testPool.Get(context.Background()) - defer conn.Close() - key := "testpool" - //cas - if r, err := conn.Get(key); err != nil { - t.Errorf("conn.Get() error(%v)", err) - } else { - r.Value = []byte("shit") - if err := conn.CompareAndSwap(r); err != nil { - t.Errorf("conn.Get() error(%v)", err) - } - r, _ := conn.Get("testpool") - if r.Key != "testpool" || !bytes.Equal(r.Value, []byte("shit")) || r.Flags != 0 { - t.Error("conn.Get() error, value") - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } - } -} - -func TestPoolDel(t *testing.T) { - key := "testpool" - conn := testPool.Get(context.Background()) - defer conn.Close() - // delete - if err := conn.Delete(key); err != nil { - t.Errorf("memcache: delete error(%v)", err) - } else { - t.Logf("memcache: delete key: %s", key) - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func BenchmarkMemcache(b *testing.B) { - c := &Config{ - Name: "test", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - } - c.Config = &pool.Config{ - Active: 10, - Idle: 5, - IdleTimeout: xtime.Duration(90 * time.Second), - } - testPool = NewPool(c) - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn := testPool.Get(context.Background()) - if err := conn.Close(); err != nil { - b.Errorf("memcache: close error(%v)", err) - } - } - }) - if err := testPool.Close(); err != nil { - b.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolSetLargeValue(t *testing.T) { - var b bytes.Buffer - for i := 0; i < 4000000; i++ { - b.WriteByte(1) - } - obj := &testObject{} - obj.Mid = 1000 - obj.Value = b.Bytes() - largeValue.Object = obj - conn := testPool.Get(context.Background()) - defer conn.Close() - // set - if err := conn.Set(largeValue); err != nil { - t.Errorf("memcache: set error(%v)", err) - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolGetLargeValue(t *testing.T) { - key := largeValue.Key - conn := testPool.Get(context.Background()) - defer conn.Close() - // get - var err error - if _, err = conn.Get(key); err != nil { - t.Errorf("memcache: large get error(%+v)", err) - } -} - -func TestPoolGetMultiLargeValue(t *testing.T) { - conn := testPool.Get(context.Background()) - defer conn.Close() - s := []string{largeValue.Key, largeValue.Key} - // get - if res, err := conn.GetMulti(s); err != nil { - t.Errorf("memcache: gets error(%v)", err) - } else { - t.Logf("memcache: gets value: %d", len(res)) - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolSetLargeValueBoundary(t *testing.T) { - var b bytes.Buffer - for i := 0; i < _largeValue; i++ { - b.WriteByte(1) - } - obj := &testObject{} - obj.Mid = 1000 - obj.Value = b.Bytes() - largeValueBoundary.Object = obj - conn := testPool.Get(context.Background()) - defer conn.Close() - // set - if err := conn.Set(largeValueBoundary); err != nil { - t.Errorf("memcache: set error(%v)", err) - } - if err := conn.Close(); err != nil { - t.Errorf("memcache: close error(%v)", err) - } -} - -func TestPoolGetLargeValueBoundary(t *testing.T) { - key := largeValueBoundary.Key - conn := testPool.Get(context.Background()) - defer conn.Close() - // get - var err error - if _, err = conn.Get(key); err != nil { - t.Errorf("memcache: large get error(%v)", err) - } -} - -func TestPoolAdd(t *testing.T) { - var ( - key = "test_add" - item = &Item{ - Key: key, - Value: []byte("0"), - Flags: 0, - Expiration: 60, - cas: 0, - } - conn = testPool.Get(context.Background()) - ) - defer conn.Close() - conn.Delete(key) - if err := conn.Add(item); err != nil { - t.Errorf("memcache: add error(%v)", err) - } - if err := conn.Add(item); err != ErrNotStored { - t.Errorf("memcache: add error(%v)", err) - } -} - -func TestNewPool(t *testing.T) { - type args struct { - cfg *Config - } - tests := []struct { - name string - args args - wantErr error - wantPanic bool - }{ - { - "NewPoolIllegalDialTimeout", - args{ - &Config{ - Name: "test_illegal_dial_timeout", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(-time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - }, - }, - nil, - true, - }, - { - "NewPoolIllegalReadTimeout", - args{ - &Config{ - Name: "test_illegal_read_timeout", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(-time.Second), - WriteTimeout: xtime.Duration(time.Second), - }, - }, - nil, - true, - }, - { - "NewPoolIllegalWriteTimeout", - args{ - &Config{ - Name: "test_illegal_write_timeout", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(-time.Second), - }, - }, - nil, - true, - }, - { - "NewPool", - args{ - &Config{ - Name: "test_new", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - }, - }, - nil, - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - defer func() { - r := recover() - if (r != nil) != tt.wantPanic { - t.Errorf("wantPanic recover = %v, wantPanic = %v", r, tt.wantPanic) - } - }() - - if gotP := NewPool(tt.args.cfg); gotP == nil { - t.Error("NewPool() failed, got nil") - } - }) - } -} - -func TestPool_Get(t *testing.T) { - type args struct { - ctx context.Context - } - tests := []struct { - name string - p *Pool - args args - wantErr bool - n int - }{ - { - "Get", - NewPool(&Config{ - Config: &pool.Config{ - Active: 3, - Idle: 2, - }, - Name: "test_get", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - }), - args{context.TODO()}, - false, - 3, - }, - { - "GetExceededPoolSize", - NewPool(&Config{ - Config: &pool.Config{ - Active: 3, - Idle: 2, - }, - Name: "test_get_out", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - }), - args{context.TODO()}, - true, - 6, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - for i := 1; i <= tt.n; i++ { - got := tt.p.Get(tt.args.ctx) - if reflect.TypeOf(got) == reflect.TypeOf(errConn{}) { - if !tt.wantErr { - t.Errorf("got errConn, export Conn") - } - return - } else { - if tt.wantErr { - if i > tt.p.c.Active { - t.Errorf("got Conn, export errConn") - } - } - } - } - }) - } -} - -func TestPool_Close(t *testing.T) { - type args struct { - ctx context.Context - } - tests := []struct { - name string - p *Pool - args args - wantErr bool - g int - c int - }{ - { - "Close", - NewPool(&Config{ - Config: &pool.Config{ - Active: 1, - Idle: 1, - }, - Name: "test_get", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - }), - args{context.TODO()}, - false, - 3, - 3, - }, - { - "CloseExceededPoolSize", - NewPool(&Config{ - Config: &pool.Config{ - Active: 1, - Idle: 1, - }, - Name: "test_get_out", - Proto: "tcp", - Addr: testMemcacheAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - }), - args{context.TODO()}, - true, - 5, - 3, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - for i := 1; i <= tt.g; i++ { - got := tt.p.Get(tt.args.ctx) - if err := got.Close(); err != nil { - if !tt.wantErr { - t.Error(err) - } - } - if i <= tt.c { - if err := got.Close(); err != nil { - t.Error(err) - } - } - } - }) - } -} diff --git a/pkg/cache/memcache/test/docker-compose.yaml b/pkg/cache/memcache/test/docker-compose.yaml deleted file mode 100755 index ace3ebedf..000000000 --- a/pkg/cache/memcache/test/docker-compose.yaml +++ /dev/null @@ -1,9 +0,0 @@ -version: "3.7" - -services: - mc: - image: memcached:1 - ports: - - 11211:11211 - - diff --git a/pkg/cache/memcache/test/test.pb.go b/pkg/cache/memcache/test/test.pb.go deleted file mode 100644 index 1dc41aa00..000000000 --- a/pkg/cache/memcache/test/test.pb.go +++ /dev/null @@ -1,375 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: test.proto - -/* - Package proto is a generated protocol buffer package. - - It is generated from these files: - test.proto - - It has these top-level messages: - TestItem -*/ -package proto - -import proto1 "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto1.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package - -type FOO int32 - -const ( - FOO_X FOO = 0 -) - -var FOO_name = map[int32]string{ - 0: "X", -} -var FOO_value = map[string]int32{ - "X": 0, -} - -func (x FOO) String() string { - return proto1.EnumName(FOO_name, int32(x)) -} -func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptorTest, []int{0} } - -type TestItem struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Age int32 `protobuf:"varint,2,opt,name=Age,proto3" json:"Age,omitempty"` -} - -func (m *TestItem) Reset() { *m = TestItem{} } -func (m *TestItem) String() string { return proto1.CompactTextString(m) } -func (*TestItem) ProtoMessage() {} -func (*TestItem) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{0} } - -func (m *TestItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *TestItem) GetAge() int32 { - if m != nil { - return m.Age - } - return 0 -} - -func init() { - proto1.RegisterType((*TestItem)(nil), "proto.TestItem") - proto1.RegisterEnum("proto.FOO", FOO_name, FOO_value) -} -func (m *TestItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TestItem) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintTest(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Age != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintTest(dAtA, i, uint64(m.Age)) - } - return i, nil -} - -func encodeFixed64Test(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Test(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintTest(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *TestItem) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovTest(uint64(l)) - } - if m.Age != 0 { - n += 1 + sovTest(uint64(m.Age)) - } - return n -} - -func sovTest(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozTest(x uint64) (n int) { - return sovTest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *TestItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TestItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TestItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Age", wireType) - } - m.Age = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Age |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTest(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTest(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthTest - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipTest(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthTest = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTest = fmt.Errorf("proto: integer overflow") -) - -func init() { proto1.RegisterFile("test.proto", fileDescriptorTest) } - -var fileDescriptorTest = []byte{ - // 122 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, - 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x53, 0x4a, 0x06, 0x5c, 0x1c, 0x21, 0xa9, - 0xc5, 0x25, 0x9e, 0x25, 0xa9, 0xb9, 0x42, 0x42, 0x5c, 0x2c, 0x7e, 0x89, 0xb9, 0xa9, 0x12, 0x8c, - 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x90, 0x00, 0x17, 0xb3, 0x63, 0x7a, 0xaa, 0x04, 0x93, - 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x88, 0xa9, 0xc5, 0xc3, 0xc5, 0xec, 0xe6, 0xef, 0x2f, 0xc4, 0xca, - 0xc5, 0x18, 0x21, 0xc0, 0xe0, 0x24, 0x70, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, - 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x36, 0xd8, 0x18, 0x10, 0x00, 0x00, - 0xff, 0xff, 0x16, 0x80, 0x60, 0x15, 0x6d, 0x00, 0x00, 0x00, -} diff --git a/pkg/cache/memcache/test/test.proto b/pkg/cache/memcache/test/test.proto deleted file mode 100644 index adad15bea..000000000 --- a/pkg/cache/memcache/test/test.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; -package proto; - -enum FOO -{ - X = 0; -}; - -message TestItem{ - string Name = 1; - int32 Age = 2; -} \ No newline at end of file diff --git a/pkg/cache/memcache/trace_conn.go b/pkg/cache/memcache/trace_conn.go deleted file mode 100644 index 776a2488c..000000000 --- a/pkg/cache/memcache/trace_conn.go +++ /dev/null @@ -1,103 +0,0 @@ -package memcache - -import ( - "context" - "strconv" - "strings" - "time" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/trace" -) - -const ( - _slowLogDuration = time.Millisecond * 250 -) - -func newTraceConn(conn Conn, address string) Conn { - tags := []trace.Tag{ - trace.String(trace.TagSpanKind, "client"), - trace.String(trace.TagComponent, "cache/memcache"), - trace.String(trace.TagPeerService, "memcache"), - trace.String(trace.TagPeerAddress, address), - } - return &traceConn{Conn: conn, tags: tags} -} - -type traceConn struct { - Conn - tags []trace.Tag -} - -func (t *traceConn) setTrace(ctx context.Context, action, statement string) func(error) error { - now := time.Now() - parent, ok := trace.FromContext(ctx) - if !ok { - return func(err error) error { return err } - } - span := parent.Fork("", "Memcache:"+action) - span.SetTag(t.tags...) - span.SetTag(trace.String(trace.TagDBStatement, action+" "+statement)) - return func(err error) error { - span.Finish(&err) - t := time.Since(now) - if t > _slowLogDuration { - log.Warn("memcache slow log action: %s key: %s time: %v", action, statement, t) - } - return err - } -} - -func (t *traceConn) AddContext(ctx context.Context, item *Item) error { - finishFn := t.setTrace(ctx, "Add", item.Key) - return finishFn(t.Conn.Add(item)) -} - -func (t *traceConn) SetContext(ctx context.Context, item *Item) error { - finishFn := t.setTrace(ctx, "Set", item.Key) - return finishFn(t.Conn.Set(item)) -} - -func (t *traceConn) ReplaceContext(ctx context.Context, item *Item) error { - finishFn := t.setTrace(ctx, "Replace", item.Key) - return finishFn(t.Conn.Replace(item)) -} - -func (t *traceConn) GetContext(ctx context.Context, key string) (*Item, error) { - finishFn := t.setTrace(ctx, "Get", key) - item, err := t.Conn.Get(key) - return item, finishFn(err) -} - -func (t *traceConn) GetMultiContext(ctx context.Context, keys []string) (map[string]*Item, error) { - finishFn := t.setTrace(ctx, "GetMulti", strings.Join(keys, " ")) - items, err := t.Conn.GetMulti(keys) - return items, finishFn(err) -} - -func (t *traceConn) DeleteContext(ctx context.Context, key string) error { - finishFn := t.setTrace(ctx, "Delete", key) - return finishFn(t.Conn.Delete(key)) -} - -func (t *traceConn) IncrementContext(ctx context.Context, key string, delta uint64) (newValue uint64, err error) { - finishFn := t.setTrace(ctx, "Increment", key+" "+strconv.FormatUint(delta, 10)) - newValue, err = t.Conn.Increment(key, delta) - return newValue, finishFn(err) -} - -func (t *traceConn) DecrementContext(ctx context.Context, key string, delta uint64) (newValue uint64, err error) { - finishFn := t.setTrace(ctx, "Decrement", key+" "+strconv.FormatUint(delta, 10)) - newValue, err = t.Conn.Decrement(key, delta) - return newValue, finishFn(err) -} - -func (t *traceConn) CompareAndSwapContext(ctx context.Context, item *Item) error { - finishFn := t.setTrace(ctx, "CompareAndSwap", item.Key) - return finishFn(t.Conn.CompareAndSwap(item)) -} - -func (t *traceConn) TouchContext(ctx context.Context, key string, seconds int32) (err error) { - finishFn := t.setTrace(ctx, "Touch", key+" "+strconv.Itoa(int(seconds))) - return finishFn(t.Conn.Touch(key, seconds)) -} diff --git a/pkg/cache/memcache/util.go b/pkg/cache/memcache/util.go deleted file mode 100644 index e42d49910..000000000 --- a/pkg/cache/memcache/util.go +++ /dev/null @@ -1,88 +0,0 @@ -package memcache - -import ( - "context" - "time" - - "github.com/gogo/protobuf/proto" -) - -func legalKey(key string) bool { - if len(key) > 250 || len(key) == 0 { - return false - } - for i := 0; i < len(key); i++ { - if key[i] <= ' ' || key[i] == 0x7f { - return false - } - } - return true -} - -// MockWith error -func MockWith(err error) Conn { - return errConn{err} -} - -type errConn struct{ err error } - -func (c errConn) Err() error { return c.err } -func (c errConn) Close() error { return c.err } -func (c errConn) Add(*Item) error { return c.err } -func (c errConn) Set(*Item) error { return c.err } -func (c errConn) Replace(*Item) error { return c.err } -func (c errConn) CompareAndSwap(*Item) error { return c.err } -func (c errConn) Get(string) (*Item, error) { return nil, c.err } -func (c errConn) GetMulti([]string) (map[string]*Item, error) { return nil, c.err } -func (c errConn) Touch(string, int32) error { return c.err } -func (c errConn) Delete(string) error { return c.err } -func (c errConn) Increment(string, uint64) (uint64, error) { return 0, c.err } -func (c errConn) Decrement(string, uint64) (uint64, error) { return 0, c.err } -func (c errConn) Scan(*Item, interface{}) error { return c.err } -func (c errConn) AddContext(context.Context, *Item) error { return c.err } -func (c errConn) SetContext(context.Context, *Item) error { return c.err } -func (c errConn) ReplaceContext(context.Context, *Item) error { return c.err } -func (c errConn) GetContext(context.Context, string) (*Item, error) { return nil, c.err } -func (c errConn) DecrementContext(context.Context, string, uint64) (uint64, error) { return 0, c.err } -func (c errConn) CompareAndSwapContext(context.Context, *Item) error { return c.err } -func (c errConn) TouchContext(context.Context, string, int32) error { return c.err } -func (c errConn) DeleteContext(context.Context, string) error { return c.err } -func (c errConn) IncrementContext(context.Context, string, uint64) (uint64, error) { return 0, c.err } -func (c errConn) GetMultiContext(context.Context, []string) (map[string]*Item, error) { - return nil, c.err -} - -// RawItem item with FlagRAW flag. -// -// Expiration is the cache expiration time, in seconds: either a relative -// time from now (up to 1 month), or an absolute Unix epoch time. -// Zero means the Item has no expiration time. -func RawItem(key string, data []byte, flags uint32, expiration int32) *Item { - return &Item{Key: key, Flags: flags | FlagRAW, Value: data, Expiration: expiration} -} - -// JSONItem item with FlagJSON flag. -// -// Expiration is the cache expiration time, in seconds: either a relative -// time from now (up to 1 month), or an absolute Unix epoch time. -// Zero means the Item has no expiration time. -func JSONItem(key string, v interface{}, flags uint32, expiration int32) *Item { - return &Item{Key: key, Flags: flags | FlagJSON, Object: v, Expiration: expiration} -} - -// ProtobufItem item with FlagProtobuf flag. -// -// Expiration is the cache expiration time, in seconds: either a relative -// time from now (up to 1 month), or an absolute Unix epoch time. -// Zero means the Item has no expiration time. -func ProtobufItem(key string, message proto.Message, flags uint32, expiration int32) *Item { - return &Item{Key: key, Flags: flags | FlagProtobuf, Object: message, Expiration: expiration} -} - -func shrinkDeadline(ctx context.Context, timeout time.Duration) time.Time { - timeoutTime := time.Now().Add(timeout) - if deadline, ok := ctx.Deadline(); ok && timeoutTime.After(deadline) { - return deadline - } - return timeoutTime -} diff --git a/pkg/cache/memcache/util_test.go b/pkg/cache/memcache/util_test.go deleted file mode 100644 index f14a888f6..000000000 --- a/pkg/cache/memcache/util_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package memcache - -import ( - "context" - "testing" - "time" - - pb "github.com/go-kratos/kratos/pkg/cache/memcache/test" - - "github.com/stretchr/testify/assert" -) - -func TestItemUtil(t *testing.T) { - item1 := RawItem("test", []byte("hh"), 0, 0) - assert.Equal(t, "test", item1.Key) - assert.Equal(t, []byte("hh"), item1.Value) - assert.Equal(t, FlagRAW, FlagRAW&item1.Flags) - - item1 = JSONItem("test", &Item{}, 0, 0) - assert.Equal(t, "test", item1.Key) - assert.NotNil(t, item1.Object) - assert.Equal(t, FlagJSON, FlagJSON&item1.Flags) - - item1 = ProtobufItem("test", &pb.TestItem{}, 0, 0) - assert.Equal(t, "test", item1.Key) - assert.NotNil(t, item1.Object) - assert.Equal(t, FlagProtobuf, FlagProtobuf&item1.Flags) -} - -func TestLegalKey(t *testing.T) { - type args struct { - key string - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "test empty key", - want: false, - }, - { - name: "test too large key", - args: args{func() string { - var data []byte - for i := 0; i < 255; i++ { - data = append(data, 'k') - } - return string(data) - }()}, - want: false, - }, - { - name: "test invalid char", - args: args{"hello world"}, - want: false, - }, - { - name: "test invalid char", - args: args{string([]byte{0x7f})}, - want: false, - }, - { - name: "test normal key", - args: args{"hello"}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := legalKey(tt.args.key); got != tt.want { - t.Errorf("legalKey() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestShrinkDeadline(t *testing.T) { - t.Run("test not deadline", func(t *testing.T) { - timeout := time.Second - timeoutTime := time.Now().Add(timeout) - tm := shrinkDeadline(context.Background(), timeout) - assert.True(t, tm.After(timeoutTime)) - }) - t.Run("test big deadline", func(t *testing.T) { - timeout := time.Second - timeoutTime := time.Now().Add(timeout) - deadlineTime := time.Now().Add(2 * time.Second) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - tm := shrinkDeadline(ctx, timeout) - assert.True(t, tm.After(timeoutTime) && tm.Before(deadlineTime)) - }) - t.Run("test small deadline", func(t *testing.T) { - timeout := time.Second - deadlineTime := time.Now().Add(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cancel() - - tm := shrinkDeadline(ctx, timeout) - assert.True(t, tm.After(deadlineTime) && tm.Before(time.Now().Add(timeout))) - }) -} diff --git a/pkg/cache/metrics.go b/pkg/cache/metrics.go deleted file mode 100644 index 4c41b429c..000000000 --- a/pkg/cache/metrics.go +++ /dev/null @@ -1,23 +0,0 @@ -package cache - -import "github.com/go-kratos/kratos/pkg/stat/metric" - -const _metricNamespace = "cache" - -// be used in tool/kratos-gen-bts -var ( - MetricHits = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: _metricNamespace, - Subsystem: "", - Name: "hits_total", - Help: "cache hits total.", - Labels: []string{"name"}, - }) - MetricMisses = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: _metricNamespace, - Subsystem: "", - Name: "misses_total", - Help: "cache misses total.", - Labels: []string{"name"}, - }) -) diff --git a/pkg/cache/redis/README.md b/pkg/cache/redis/README.md deleted file mode 100644 index 588ca73a0..000000000 --- a/pkg/cache/redis/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# cache/redis - -##### 项目简介 -1. 提供redis接口 - -#### 使用方式 -请参考doc.go diff --git a/pkg/cache/redis/commandinfo.go b/pkg/cache/redis/commandinfo.go deleted file mode 100644 index f424eb438..000000000 --- a/pkg/cache/redis/commandinfo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "strings" -) - -// redis state -const ( - WatchState = 1 << iota - MultiState - SubscribeState - MonitorState -) - -// CommandInfo command info. -type CommandInfo struct { - Set, Clear int -} - -var commandInfos = map[string]CommandInfo{ - "WATCH": {Set: WatchState}, - "UNWATCH": {Clear: WatchState}, - "MULTI": {Set: MultiState}, - "EXEC": {Clear: WatchState | MultiState}, - "DISCARD": {Clear: WatchState | MultiState}, - "PSUBSCRIBE": {Set: SubscribeState}, - "SUBSCRIBE": {Set: SubscribeState}, - "MONITOR": {Set: MonitorState}, -} - -func init() { - for n, ci := range commandInfos { - commandInfos[strings.ToLower(n)] = ci - } -} - -// LookupCommandInfo get command info. -func LookupCommandInfo(commandName string) CommandInfo { - if ci, ok := commandInfos[commandName]; ok { - return ci - } - return commandInfos[strings.ToUpper(commandName)] -} diff --git a/pkg/cache/redis/commandinfo_test.go b/pkg/cache/redis/commandinfo_test.go deleted file mode 100644 index d8f4e5214..000000000 --- a/pkg/cache/redis/commandinfo_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package redis - -import "testing" - -func TestLookupCommandInfo(t *testing.T) { - for _, n := range []string{"watch", "WATCH", "wAtch"} { - if LookupCommandInfo(n) == (CommandInfo{}) { - t.Errorf("LookupCommandInfo(%q) = CommandInfo{}, expected non-zero value", n) - } - } -} - -func benchmarkLookupCommandInfo(b *testing.B, names ...string) { - for i := 0; i < b.N; i++ { - for _, c := range names { - LookupCommandInfo(c) - } - } -} - -func BenchmarkLookupCommandInfoCorrectCase(b *testing.B) { - benchmarkLookupCommandInfo(b, "watch", "WATCH", "monitor", "MONITOR") -} - -func BenchmarkLookupCommandInfoMixedCase(b *testing.B) { - benchmarkLookupCommandInfo(b, "wAtch", "WeTCH", "monItor", "MONiTOR") -} diff --git a/pkg/cache/redis/conn.go b/pkg/cache/redis/conn.go deleted file mode 100644 index 949a4bd55..000000000 --- a/pkg/cache/redis/conn.go +++ /dev/null @@ -1,609 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io" - "net" - "net/url" - "regexp" - "strconv" - "sync" - "time" - - "github.com/pkg/errors" -) - -// Conn represents a connection to a Redis server. -type Conn interface { - // Close closes the connection. - Close() error - - // Err returns a non-nil value if the connection is broken. The returned - // value is either the first non-nil value returned from the underlying - // network connection or a protocol parsing error. Applications should - // close broken connections. - Err() error - - // Do sends a command to the server and returns the received reply. - Do(commandName string, args ...interface{}) (reply interface{}, err error) - - // Send writes the command to the client's output buffer. - Send(commandName string, args ...interface{}) error - - // Flush flushes the output buffer to the Redis server. - Flush() error - - // Receive receives a single reply from the Redis server - Receive() (reply interface{}, err error) - - // WithContext returns Conn with the input ctx. - WithContext(ctx context.Context) Conn -} - -// conn is the low-level implementation of Conn -type conn struct { - // Shared - mu sync.Mutex - pending int - err error - conn net.Conn - - ctx context.Context - - // Read - readTimeout time.Duration - br *bufio.Reader - - // Write - writeTimeout time.Duration - bw *bufio.Writer - - // Scratch space for formatting argument length. - // '*' or '$', length, "\r\n" - lenScratch [32]byte - - // Scratch space for formatting integers and floats. - numScratch [40]byte -} - -// DialTimeout acts like Dial but takes timeouts for establishing the -// connection to the server, writing a command and reading a reply. -// -// Deprecated: Use Dial with options instead. -func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { - return Dial(network, address, - DialConnectTimeout(connectTimeout), - DialReadTimeout(readTimeout), - DialWriteTimeout(writeTimeout)) -} - -// DialOption specifies an option for dialing a Redis server. -type DialOption struct { - f func(*dialOptions) -} - -type dialOptions struct { - readTimeout time.Duration - writeTimeout time.Duration - dial func(network, addr string) (net.Conn, error) - db int - password string -} - -// DialReadTimeout specifies the timeout for reading a single command reply. -func DialReadTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.readTimeout = d - }} -} - -// DialWriteTimeout specifies the timeout for writing a single command. -func DialWriteTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.writeTimeout = d - }} -} - -// DialConnectTimeout specifies the timeout for connecting to the Redis server. -func DialConnectTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - dialer := net.Dialer{Timeout: d} - do.dial = dialer.Dial - }} -} - -// DialNetDial specifies a custom dial function for creating TCP -// connections. If this option is left out, then net.Dial is -// used. DialNetDial overrides DialConnectTimeout. -func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { - return DialOption{func(do *dialOptions) { - do.dial = dial - }} -} - -// DialDatabase specifies the database to select when dialing a connection. -func DialDatabase(db int) DialOption { - return DialOption{func(do *dialOptions) { - do.db = db - }} -} - -// DialPassword specifies the password to use when connecting to -// the Redis server. -func DialPassword(password string) DialOption { - return DialOption{func(do *dialOptions) { - do.password = password - }} -} - -// Dial connects to the Redis server at the given network and -// address using the specified options. -func Dial(network, address string, options ...DialOption) (Conn, error) { - do := dialOptions{ - dial: net.Dial, - } - for _, option := range options { - option.f(&do) - } - - netConn, err := do.dial(network, address) - if err != nil { - return nil, errors.WithStack(err) - } - c := &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: do.readTimeout, - writeTimeout: do.writeTimeout, - } - - if do.password != "" { - if _, err := c.Do("AUTH", do.password); err != nil { - netConn.Close() - return nil, errors.WithStack(err) - } - } - - if do.db != 0 { - if _, err := c.Do("SELECT", do.db); err != nil { - netConn.Close() - return nil, errors.WithStack(err) - } - } - return c, nil -} - -var pathDBRegexp = regexp.MustCompile(`/(\d+)\z`) - -// DialURL connects to a Redis server at the given URL using the Redis -// URI scheme. URLs should follow the draft IANA specification for the -// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). -func DialURL(rawurl string, options ...DialOption) (Conn, error) { - u, err := url.Parse(rawurl) - if err != nil { - return nil, errors.WithStack(err) - } - - if u.Scheme != "redis" { - return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) - } - - // As per the IANA draft spec, the host defaults to localhost and - // the port defaults to 6379. - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // assume port is missing - host = u.Host - port = "6379" - } - if host == "" { - host = "localhost" - } - address := net.JoinHostPort(host, port) - - if u.User != nil { - password, isSet := u.User.Password() - if isSet { - options = append(options, DialPassword(password)) - } - } - - match := pathDBRegexp.FindStringSubmatch(u.Path) - if len(match) == 2 { - db, err := strconv.Atoi(match[1]) - if err != nil { - return nil, errors.Errorf("invalid database: %s", u.Path[1:]) - } - if db != 0 { - options = append(options, DialDatabase(db)) - } - } else if u.Path != "" { - return nil, errors.Errorf("invalid database: %s", u.Path[1:]) - } - - return Dial("tcp", address, options...) -} - -// NewConn new a redis conn. -func NewConn(c *Config) (cn Conn, err error) { - cnop := DialConnectTimeout(time.Duration(c.DialTimeout)) - rdop := DialReadTimeout(time.Duration(c.ReadTimeout)) - wrop := DialWriteTimeout(time.Duration(c.WriteTimeout)) - auop := DialPassword(c.Auth) - // new conn - cn, err = Dial(c.Proto, c.Addr, cnop, rdop, wrop, auop) - return -} - -func (c *conn) Close() error { - c.mu.Lock() - c.ctx = nil - err := c.err - if c.err == nil { - c.err = errors.New("redigo: closed") - err = c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) fatal(err error) error { - c.mu.Lock() - if c.err == nil { - c.err = err - // Close connection to force errors on subsequent calls and to unblock - // other reader or writer. - c.conn.Close() - } - c.mu.Unlock() - return errors.WithStack(c.err) -} - -func (c *conn) Err() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err -} - -func (c *conn) writeLen(prefix byte, n int) error { - c.lenScratch[len(c.lenScratch)-1] = '\n' - c.lenScratch[len(c.lenScratch)-2] = '\r' - i := len(c.lenScratch) - 3 - for { - c.lenScratch[i] = byte('0' + n%10) - i-- - n = n / 10 - if n == 0 { - break - } - } - c.lenScratch[i] = prefix - _, err := c.bw.Write(c.lenScratch[i:]) - return errors.WithStack(err) -} - -func (c *conn) writeString(s string) error { - c.writeLen('$', len(s)) - c.bw.WriteString(s) - _, err := c.bw.WriteString("\r\n") - return errors.WithStack(err) -} - -func (c *conn) writeBytes(p []byte) error { - c.writeLen('$', len(p)) - c.bw.Write(p) - _, err := c.bw.WriteString("\r\n") - return errors.WithStack(err) -} - -func (c *conn) writeInt64(n int64) error { - return errors.WithStack(c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10))) -} - -func (c *conn) writeFloat64(n float64) error { - return errors.WithStack(c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))) -} - -func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(shrinkDeadline(c.ctx, c.writeTimeout)) - } - c.writeLen('*', 1+len(args)) - err = c.writeString(cmd) - for _, arg := range args { - if err != nil { - break - } - switch arg := arg.(type) { - case string: - err = c.writeString(arg) - case []byte: - err = c.writeBytes(arg) - case int: - err = c.writeInt64(int64(arg)) - case int64: - err = c.writeInt64(arg) - case float64: - err = c.writeFloat64(arg) - case bool: - if arg { - err = c.writeString("1") - } else { - err = c.writeString("0") - } - case nil: - err = c.writeString("") - default: - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - err = errors.WithStack(c.writeBytes(buf.Bytes())) - } - } - return err -} - -type protocolError string - -func (pe protocolError) Error() string { - return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) -} - -func (c *conn) readLine() ([]byte, error) { - p, err := c.br.ReadSlice('\n') - if err == bufio.ErrBufferFull { - return nil, errors.WithStack(protocolError("long response line")) - } - if err != nil { - return nil, err - } - i := len(p) - 2 - if i < 0 || p[i] != '\r' { - return nil, errors.WithStack(protocolError("bad response line terminator")) - } - return p[:i], nil -} - -// parseLen parses bulk string and array lengths. -func parseLen(p []byte) (int, error) { - if len(p) == 0 { - return -1, errors.WithStack(protocolError("malformed length")) - } - - if p[0] == '-' && len(p) == 2 && p[1] == '1' { - // handle $-1 and $-1 null replies. - return -1, nil - } - - var n int - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return -1, errors.WithStack(protocolError("illegal bytes in length")) - } - n += int(b - '0') - } - - return n, nil -} - -// parseInt parses an integer reply. -func parseInt(p []byte) (interface{}, error) { - if len(p) == 0 { - return 0, errors.WithStack(protocolError("malformed integer")) - } - - var negate bool - if p[0] == '-' { - negate = true - p = p[1:] - if len(p) == 0 { - return 0, errors.WithStack(protocolError("malformed integer")) - } - } - - var n int64 - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return 0, errors.WithStack(protocolError("illegal bytes in length")) - } - n += int64(b - '0') - } - - if negate { - n = -n - } - return n, nil -} - -var ( - okReply interface{} = "OK" - pongReply interface{} = "PONG" -) - -func (c *conn) readReply() (interface{}, error) { - line, err := c.readLine() - if err != nil { - return nil, err - } - if len(line) == 0 { - return nil, errors.WithStack(protocolError("short response line")) - } - switch line[0] { - case '+': - switch { - case len(line) == 3 && line[1] == 'O' && line[2] == 'K': - // Avoid allocation for frequent "+OK" response. - return okReply, nil - case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': - // Avoid allocation in PING command benchmarks :) - return pongReply, nil - default: - return string(line[1:]), nil - } - case '-': - return Error(string(line[1:])), nil - case ':': - return parseInt(line[1:]) - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - p := make([]byte, n) - _, err = io.ReadFull(c.br, p) - if err != nil { - return nil, errors.WithStack(err) - } - if line1, err := c.readLine(); err != nil { - return nil, err - } else if len(line1) != 0 { - return nil, errors.WithStack(protocolError("bad bulk string format")) - } - return p, nil - case '*': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - r := make([]interface{}, n) - for i := range r { - r[i], err = c.readReply() - if err != nil { - return nil, err - } - } - return r, nil - } - return nil, errors.WithStack(protocolError("unexpected response line")) -} -func (c *conn) Send(cmd string, args ...interface{}) (err error) { - c.mu.Lock() - c.pending++ - c.mu.Unlock() - if err = c.writeCommand(cmd, args); err != nil { - c.fatal(err) - } - return err -} - -func (c *conn) Flush() (err error) { - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(shrinkDeadline(c.ctx, c.writeTimeout)) - } - if err = c.bw.Flush(); err != nil { - c.fatal(err) - } - return err -} - -func (c *conn) Receive() (reply interface{}, err error) { - if c.readTimeout != 0 { - c.conn.SetReadDeadline(shrinkDeadline(c.ctx, c.readTimeout)) - } - if reply, err = c.readReply(); err != nil { - return nil, c.fatal(err) - } - // When using pub/sub, the number of receives can be greater than the - // number of sends. To enable normal use of the connection after - // unsubscribing from all channels, we do not decrement pending to a - // negative value. - // - // The pending field is decremented after the reply is read to handle the - // case where Receive is called before Send. - c.mu.Lock() - if c.pending > 0 { - c.pending-- - } - c.mu.Unlock() - if err, ok := reply.(Error); ok { - return nil, err - } - return -} - -func (c *conn) Do(cmd string, args ...interface{}) (reply interface{}, err error) { - c.mu.Lock() - pending := c.pending - c.pending = 0 - c.mu.Unlock() - if cmd == "" && pending == 0 { - return nil, nil - } - - if cmd != "" { - err = c.writeCommand(cmd, args) - } - if err == nil { - err = errors.WithStack(c.bw.Flush()) - } - if err != nil { - return nil, c.fatal(err) - } - if c.readTimeout != 0 { - c.conn.SetReadDeadline(shrinkDeadline(c.ctx, c.readTimeout)) - } - if cmd == "" { - reply := make([]interface{}, pending) - for i := range reply { - var r interface{} - r, err = c.readReply() - if err != nil { - break - } - reply[i] = r - } - if err != nil { - return nil, c.fatal(err) - } - return reply, nil - } - - for i := 0; i <= pending; i++ { - var e error - if reply, e = c.readReply(); e != nil { - return nil, c.fatal(e) - } - if e, ok := reply.(Error); ok && err == nil { - err = e - } - } - return reply, err -} - -func (c *conn) copy() *conn { - return &conn{ - pending: c.pending, - err: c.err, - conn: c.conn, - bw: c.bw, - br: c.br, - readTimeout: c.readTimeout, - writeTimeout: c.writeTimeout, - } -} - -func (c *conn) WithContext(ctx context.Context) Conn { - c2 := c.copy() - c2.ctx = ctx - return c2 -} diff --git a/pkg/cache/redis/conn_test.go b/pkg/cache/redis/conn_test.go deleted file mode 100644 index 3e37e882c..000000000 --- a/pkg/cache/redis/conn_test.go +++ /dev/null @@ -1,670 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "context" - "io" - "math" - "net" - "os" - "reflect" - "strings" - "testing" - "time" -) - -type tConn struct { - io.Reader - io.Writer -} - -func (*tConn) Close() error { return nil } -func (*tConn) LocalAddr() net.Addr { return nil } -func (*tConn) RemoteAddr() net.Addr { return nil } -func (*tConn) SetDeadline(t time.Time) error { return nil } -func (*tConn) SetReadDeadline(t time.Time) error { return nil } -func (*tConn) SetWriteDeadline(t time.Time) error { return nil } - -func dialTestConn(r io.Reader, w io.Writer) DialOption { - return DialNetDial(func(net, addr string) (net.Conn, error) { - return &tConn{Reader: r, Writer: w}, nil - }) -} - -var writeTests = []struct { - args []interface{} - expected string -}{ - { - []interface{}{"SET", "key", "value"}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", - }, - { - []interface{}{"SET", "key", "value"}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", - }, - { - []interface{}{"SET", "key", byte(100)}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", - }, - { - []interface{}{"SET", "key", 100}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", - }, - { - []interface{}{"SET", "key", int64(math.MinInt64)}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$20\r\n-9223372036854775808\r\n", - }, - { - []interface{}{"SET", "key", float64(1349673917.939762)}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$21\r\n1.349673917939762e+09\r\n", - }, - { - []interface{}{"SET", "key", ""}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", - }, - { - []interface{}{"SET", "key", nil}, - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", - }, - { - []interface{}{"ECHO", true, false}, - "*3\r\n$4\r\nECHO\r\n$1\r\n1\r\n$1\r\n0\r\n", - }, -} - -func TestWrite(t *testing.T) { - for _, tt := range writeTests { - var buf bytes.Buffer - c, _ := Dial("", "", dialTestConn(nil, &buf)) - err := c.Send(tt.args[0].(string), tt.args[1:]...) - if err != nil { - t.Errorf("Send(%v) returned error %v", tt.args, err) - continue - } - c.Flush() - actual := buf.String() - if actual != tt.expected { - t.Errorf("Send(%v) = %q, want %q", tt.args, actual, tt.expected) - } - } -} - -var errorSentinel = &struct{}{} - -var readTests = []struct { - reply string - expected interface{} -}{ - { - "+OK\r\n", - "OK", - }, - { - "+PONG\r\n", - "PONG", - }, - { - "@OK\r\n", - errorSentinel, - }, - { - "$6\r\nfoobar\r\n", - []byte("foobar"), - }, - { - "$-1\r\n", - nil, - }, - { - ":1\r\n", - int64(1), - }, - { - ":-2\r\n", - int64(-2), - }, - { - "*0\r\n", - []interface{}{}, - }, - { - "*-1\r\n", - nil, - }, - { - "*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n", - []interface{}{[]byte("foo"), []byte("bar"), []byte("Hello"), []byte("World")}, - }, - { - "*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n", - []interface{}{[]byte("foo"), nil, []byte("bar")}, - }, - - { - // "x" is not a valid length - "$x\r\nfoobar\r\n", - errorSentinel, - }, - { - // -2 is not a valid length - "$-2\r\n", - errorSentinel, - }, - { - // "x" is not a valid integer - ":x\r\n", - errorSentinel, - }, - { - // missing \r\n following value - "$6\r\nfoobar", - errorSentinel, - }, - { - // short value - "$6\r\nxx", - errorSentinel, - }, - { - // long value - "$6\r\nfoobarx\r\n", - errorSentinel, - }, -} - -func TestRead(t *testing.T) { - for _, tt := range readTests { - c, _ := Dial("", "", dialTestConn(strings.NewReader(tt.reply), nil)) - actual, err := c.Receive() - if tt.expected == errorSentinel { - if err == nil { - t.Errorf("Receive(%q) did not return expected error", tt.reply) - } - } else { - if err != nil { - t.Errorf("Receive(%q) returned error %v", tt.reply, err) - continue - } - if !reflect.DeepEqual(actual, tt.expected) { - t.Errorf("Receive(%q) = %v, want %v", tt.reply, actual, tt.expected) - } - } - } -} - -var testCommands = []struct { - args []interface{} - expected interface{} -}{ - { - []interface{}{"PING"}, - "PONG", - }, - { - []interface{}{"SET", "foo", "bar"}, - "OK", - }, - { - []interface{}{"GET", "foo"}, - []byte("bar"), - }, - { - []interface{}{"GET", "nokey"}, - nil, - }, - { - []interface{}{"MGET", "nokey", "foo"}, - []interface{}{nil, []byte("bar")}, - }, - { - []interface{}{"INCR", "mycounter"}, - int64(1), - }, - { - []interface{}{"LPUSH", "mylist", "foo"}, - int64(1), - }, - { - []interface{}{"LPUSH", "mylist", "bar"}, - int64(2), - }, - { - []interface{}{"LRANGE", "mylist", 0, -1}, - []interface{}{[]byte("bar"), []byte("foo")}, - }, - { - []interface{}{"MULTI"}, - "OK", - }, - { - []interface{}{"LRANGE", "mylist", 0, -1}, - "QUEUED", - }, - { - []interface{}{"PING"}, - "QUEUED", - }, - { - []interface{}{"EXEC"}, - []interface{}{ - []interface{}{[]byte("bar"), []byte("foo")}, - "PONG", - }, - }, -} - -func TestDoCommands(t *testing.T) { - c, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - for _, cmd := range testCommands { - actual, err := c.Do(cmd.args[0].(string), cmd.args[1:]...) - if err != nil { - t.Errorf("Do(%v) returned error %v", cmd.args, err) - continue - } - if !reflect.DeepEqual(actual, cmd.expected) { - t.Errorf("Do(%v) = %v, want %v", cmd.args, actual, cmd.expected) - } - } -} - -func TestPipelineCommands(t *testing.T) { - c, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - for _, cmd := range testCommands { - if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { - t.Fatalf("Send(%v) returned error %v", cmd.args, err) - } - } - if err := c.Flush(); err != nil { - t.Errorf("Flush() returned error %v", err) - } - for _, cmd := range testCommands { - actual, err := c.Receive() - if err != nil { - t.Fatalf("Receive(%v) returned error %v", cmd.args, err) - } - if !reflect.DeepEqual(actual, cmd.expected) { - t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) - } - } -} - -func TestBlankCommmand(t *testing.T) { - c, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - for _, cmd := range testCommands { - if err = c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { - t.Fatalf("Send(%v) returned error %v", cmd.args, err) - } - } - reply, err := Values(c.Do("")) - if err != nil { - t.Fatalf("Do() returned error %v", err) - } - if len(reply) != len(testCommands) { - t.Fatalf("len(reply)=%d, want %d", len(reply), len(testCommands)) - } - for i, cmd := range testCommands { - actual := reply[i] - if !reflect.DeepEqual(actual, cmd.expected) { - t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) - } - } -} - -func TestRecvBeforeSend(t *testing.T) { - c, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - done := make(chan struct{}) - go func() { - c.Receive() - close(done) - }() - time.Sleep(time.Millisecond) - c.Send("PING") - c.Flush() - <-done - _, err = c.Do("") - if err != nil { - t.Fatalf("error=%v", err) - } -} - -func TestError(t *testing.T) { - c, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - c.Do("SET", "key", "val") - _, err = c.Do("HSET", "key", "fld", "val") - if err == nil { - t.Errorf("Expected err for HSET on string key.") - } - if c.Err() != nil { - t.Errorf("Conn has Err()=%v, expect nil", c.Err()) - } - _, err = c.Do("SET", "key", "val") - if err != nil { - t.Errorf("Do(SET, key, val) returned error %v, expected nil.", err) - } -} - -func TestReadTimeout(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("net.Listen returned %v", err) - } - defer l.Close() - - go func() { - for { - c, err1 := l.Accept() - if err1 != nil { - return - } - go func() { - time.Sleep(time.Second) - c.Write([]byte("+OK\r\n")) - c.Close() - }() - } - }() - - // Do - - c1, err := Dial(l.Addr().Network(), l.Addr().String(), DialReadTimeout(time.Millisecond)) - if err != nil { - t.Fatalf("Dial returned %v", err) - } - defer c1.Close() - - _, err = c1.Do("PING") - if err == nil { - t.Fatalf("c1.Do() returned nil, expect error") - } - if c1.Err() == nil { - t.Fatalf("c1.Err() = nil, expect error") - } - - // Send/Flush/Receive - - c2, err := Dial(l.Addr().Network(), l.Addr().String(), DialReadTimeout(time.Millisecond)) - if err != nil { - t.Fatalf("Dial returned %v", err) - } - defer c2.Close() - - c2.Send("PING") - c2.Flush() - _, err = c2.Receive() - if err == nil { - t.Fatalf("c2.Receive() returned nil, expect error") - } - if c2.Err() == nil { - t.Fatalf("c2.Err() = nil, expect error") - } -} - -var dialErrors = []struct { - rawurl string - expectedError string -}{ - { - "localhost", - "invalid redis URL scheme", - }, - // The error message for invalid hosts is diffferent in different - // versions of Go, so just check that there is an error message. - { - "redis://weird url", - "", - }, - { - "redis://foo:bar:baz", - "", - }, - { - "http://www.google.com", - "invalid redis URL scheme: http", - }, - { - "redis://localhost:6379/abc123", - "invalid database: abc123", - }, -} - -func TestDialURLErrors(t *testing.T) { - for _, d := range dialErrors { - _, err := DialURL(d.rawurl) - if err == nil || !strings.Contains(err.Error(), d.expectedError) { - t.Errorf("DialURL did not return expected error (expected %v to contain %s)", err, d.expectedError) - } - } -} - -func TestDialURLPort(t *testing.T) { - checkPort := func(network, address string) (net.Conn, error) { - if address != "localhost:6379" { - t.Errorf("DialURL did not set port to 6379 by default (got %v)", address) - } - return nil, nil - } - _, err := DialURL("redis://localhost", DialNetDial(checkPort)) - if err != nil { - t.Error("dial error:", err) - } -} - -func TestDialURLHost(t *testing.T) { - checkHost := func(network, address string) (net.Conn, error) { - if address != "localhost:6379" { - t.Errorf("DialURL did not set host to localhost by default (got %v)", address) - } - return nil, nil - } - _, err := DialURL("redis://:6379", DialNetDial(checkHost)) - if err != nil { - t.Error("dial error:", err) - } -} - -func TestDialURLPassword(t *testing.T) { - var buf bytes.Buffer - _, err := DialURL("redis://x:abc123@localhost", dialTestConn(strings.NewReader("+OK\r\n"), &buf)) - if err != nil { - t.Error("dial error:", err) - } - expected := "*2\r\n$4\r\nAUTH\r\n$6\r\nabc123\r\n" - actual := buf.String() - if actual != expected { - t.Errorf("commands = %q, want %q", actual, expected) - } -} - -func TestDialURLDatabase(t *testing.T) { - var buf bytes.Buffer - _, err := DialURL("redis://localhost/3", dialTestConn(strings.NewReader("+OK\r\n"), &buf)) - if err != nil { - t.Error("dial error:", err) - } - expected := "*2\r\n$6\r\nSELECT\r\n$1\r\n3\r\n" - actual := buf.String() - if actual != expected { - t.Errorf("commands = %q, want %q", actual, expected) - } -} - -// Connect to local instance of Redis running on the default port. -func ExampleDial() { - c, err := Dial("tcp", ":6379") - if err != nil { - // handle error - } - defer c.Close() -} - -// Connect to remote instance of Redis using a URL. -func ExampleDialURL() { - c, err := DialURL(os.Getenv("REDIS_URL")) - if err != nil { - // handle connection error - } - defer c.Close() -} - -// TextExecError tests handling of errors in a transaction. See -// http://io/topics/transactions for information on how Redis handles -// errors in a transaction. -func TestExecError(t *testing.T) { - c, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - // Execute commands that fail before EXEC is called. - - c.Do("DEL", "k0") - c.Do("ZADD", "k0", 0, 0) - c.Send("MULTI") - c.Send("NOTACOMMAND", "k0", 0, 0) - c.Send("ZINCRBY", "k0", 0, 0) - v, err := c.Do("EXEC") - if err == nil { - t.Fatalf("EXEC returned values %v, expected error", v) - } - - // Execute commands that fail after EXEC is called. The first command - // returns an error. - - c.Do("DEL", "k1") - c.Do("ZADD", "k1", 0, 0) - c.Send("MULTI") - c.Send("HSET", "k1", 0, 0) - c.Send("ZINCRBY", "k1", 0, 0) - v, err = c.Do("EXEC") - if err != nil { - t.Fatalf("EXEC returned error %v", err) - } - - vs, err := Values(v, nil) - if err != nil { - t.Fatalf("Values(v) returned error %v", err) - } - - if len(vs) != 2 { - t.Fatalf("len(vs) == %d, want 2", len(vs)) - } - - if _, ok := vs[0].(error); !ok { - t.Fatalf("first result is type %T, expected error", vs[0]) - } - - if _, ok := vs[1].([]byte); !ok { - t.Fatalf("second result is type %T, expected []byte", vs[1]) - } - - // Execute commands that fail after EXEC is called. The second command - // returns an error. - - c.Do("ZADD", "k2", 0, 0) - c.Send("MULTI") - c.Send("ZINCRBY", "k2", 0, 0) - c.Send("HSET", "k2", 0, 0) - v, err = c.Do("EXEC") - if err != nil { - t.Fatalf("EXEC returned error %v", err) - } - - vs, err = Values(v, nil) - if err != nil { - t.Fatalf("Values(v) returned error %v", err) - } - - if len(vs) != 2 { - t.Fatalf("len(vs) == %d, want 2", len(vs)) - } - - if _, ok := vs[0].([]byte); !ok { - t.Fatalf("first result is type %T, expected []byte", vs[0]) - } - - if _, ok := vs[1].(error); !ok { - t.Fatalf("second result is type %T, expected error", vs[2]) - } -} - -func BenchmarkDoEmpty(b *testing.B) { - c, err := DialDefaultServer() - if err != nil { - b.Fatal(err) - } - defer c.Close() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := c.Do(""); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkDoPing(b *testing.B) { - c, err := DialDefaultServer() - if err != nil { - b.Fatal(err) - } - defer c.Close() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := c.Do("PING"); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkConn(b *testing.B) { - for i := 0; i < b.N; i++ { - c, err := DialDefaultServer() - if err != nil { - b.Fatal(err) - } - c2 := c.WithContext(context.TODO()) - if _, err := c2.Do("PING"); err != nil { - b.Fatal(err) - } - c2.Close() - } -} diff --git a/pkg/cache/redis/doc.go b/pkg/cache/redis/doc.go deleted file mode 100644 index 1ae6f0cc2..000000000 --- a/pkg/cache/redis/doc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package redis is a client for the Redis database. -// -// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more -// documentation about this package. -// -// Connections -// -// The Conn interface is the primary interface for working with Redis. -// Applications create connections by calling the Dial, DialWithTimeout or -// NewConn functions. In the future, functions will be added for creating -// sharded and other types of connections. -// -// The application must call the connection Close method when the application -// is done with the connection. -// -// Executing Commands -// -// The Conn interface has a generic method for executing Redis commands: -// -// Do(commandName string, args ...interface{}) (reply interface{}, err error) -// -// The Redis command reference (http://redis.io/commands) lists the available -// commands. An example of using the Redis APPEND command is: -// -// n, err := conn.Do("APPEND", "key", "value") -// -// The Do method converts command arguments to binary strings for transmission -// to the server as follows: -// -// Go Type Conversion -// []byte Sent as is -// string Sent as is -// int, int64 strconv.FormatInt(v) -// float64 strconv.FormatFloat(v, 'g', -1, 64) -// bool true -> "1", false -> "0" -// nil "" -// all other types fmt.Print(v) -// -// Redis command reply types are represented using the following Go types: -// -// Redis type Go type -// error redis.Error -// integer int64 -// simple string string -// bulk string []byte or nil if value not present. -// array []interface{} or nil if value not present. -// -// Use type assertions or the reply helper functions to convert from -// interface{} to the specific Go type for the command result. -// -// Pipelining -// -// Connections support pipelining using the Send, Flush and Receive methods. -// -// Send(commandName string, args ...interface{}) error -// Flush() error -// Receive() (reply interface{}, err error) -// -// Send writes the command to the connection's output buffer. Flush flushes the -// connection's output buffer to the server. Receive reads a single reply from -// the server. The following example shows a simple pipeline. -// -// c.Send("SET", "foo", "bar") -// c.Send("GET", "foo") -// c.Flush() -// c.Receive() // reply from SET -// v, err = c.Receive() // reply from GET -// -// The Do method combines the functionality of the Send, Flush and Receive -// methods. The Do method starts by writing the command and flushing the output -// buffer. Next, the Do method receives all pending replies including the reply -// for the command just sent by Do. If any of the received replies is an error, -// then Do returns the error. If there are no errors, then Do returns the last -// reply. If the command argument to the Do method is "", then the Do method -// will flush the output buffer and receive pending replies without sending a -// command. -// -// Use the Send and Do methods to implement pipelined transactions. -// -// c.Send("MULTI") -// c.Send("INCR", "foo") -// c.Send("INCR", "bar") -// r, err := c.Do("EXEC") -// fmt.Println(r) // prints [1, 1] -// -// Concurrency -// -// Connections do not support concurrent calls to the write methods (Send, -// Flush) or concurrent calls to the read method (Receive). Connections do -// allow a concurrent reader and writer. -// -// Because the Do method combines the functionality of Send, Flush and Receive, -// the Do method cannot be called concurrently with the other methods. -// -// For full concurrent access to Redis, use the thread-safe Pool to get and -// release connections from within a goroutine. -// -// Publish and Subscribe -// -// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. -// -// c.Send("SUBSCRIBE", "example") -// c.Flush() -// for { -// reply, err := c.Receive() -// if err != nil { -// return err -// } -// // process pushed message -// } -// -// The PubSubConn type wraps a Conn with convenience methods for implementing -// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods -// send and flush a subscription management command. The receive method -// converts a pushed message to convenient types for use in a type switch. -// -// psc := redis.PubSubConn{c} -// psc.Subscribe("example") -// for { -// switch v := psc.Receive().(type) { -// case redis.Message: -// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) -// case redis.Subscription: -// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) -// case error: -// return v -// } -// } -// -// Reply Helpers -// -// The Bool, Int, Bytes, String, Strings and Values functions convert a reply -// to a value of a specific type. To allow convenient wrapping of calls to the -// connection Do and Receive methods, the functions take a second argument of -// type error. If the error is non-nil, then the helper function returns the -// error. If the error is nil, the function converts the reply to the specified -// type: -// -// exists, err := redis.Bool(c.Do("EXISTS", "foo")) -// if err != nil { -// // handle error return from c.Do or type conversion error. -// } -// -// The Scan function converts elements of a array reply to Go types: -// -// var value1 int -// var value2 string -// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) -// if err != nil { -// // handle error -// } -// if _, err := redis.Scan(reply, &value1, &value2); err != nil { -// // handle error -// } -package redis diff --git a/pkg/cache/redis/errors.go b/pkg/cache/redis/errors.go deleted file mode 100644 index 6be483ff9..000000000 --- a/pkg/cache/redis/errors.go +++ /dev/null @@ -1,43 +0,0 @@ -package redis - -import ( - "strings" - - pkgerr "github.com/pkg/errors" -) - -func formatErr(err error, name, addr string) string { - e := pkgerr.Cause(err) - switch e { - case ErrNil, nil: - if e == ErrNil { - _metricMisses.Inc(name, addr) - } - return "" - default: - es := e.Error() - switch { - case strings.HasPrefix(es, "read"): - return "read timeout" - case strings.HasPrefix(es, "dial"): - if strings.Contains(es, "connection refused") { - return "connection refused" - } - return "dial timeout" - case strings.HasPrefix(es, "write"): - return "write timeout" - case strings.Contains(es, "EOF"): - return "eof" - case strings.Contains(es, "reset"): - return "reset" - case strings.Contains(es, "broken"): - return "broken pipe" - case strings.Contains(es, "pool exhausted"): - return "pool exhausted" - case strings.Contains(es, "pool closed"): - return "pool closed" - default: - return "unexpected err" - } - } -} diff --git a/pkg/cache/redis/log.go b/pkg/cache/redis/log.go deleted file mode 100644 index 487a1408f..000000000 --- a/pkg/cache/redis/log.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "context" - "fmt" - "log" -) - -// NewLoggingConn returns a logging wrapper around a connection. -// ATTENTION: ONLY use loggingConn in developing, DO NOT use this in production. -func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { - if prefix != "" { - prefix = prefix + "." - } - return &loggingConn{Conn: conn, logger: logger, prefix: prefix} -} - -type loggingConn struct { - Conn - logger *log.Logger - prefix string -} - -func (c *loggingConn) Close() error { - err := c.Conn.Close() - var buf bytes.Buffer - fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) - c.logger.Output(2, buf.String()) - return err -} - -func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { - const chop = 32 - switch v := v.(type) { - case []byte: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case string: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case []interface{}: - if len(v) == 0 { - buf.WriteString("[]") - } else { - sep := "[" - fin := "]" - if len(v) > chop { - v = v[:chop] - fin = "...]" - } - for _, vv := range v { - buf.WriteString(sep) - c.printValue(buf, vv) - sep = ", " - } - buf.WriteString(fin) - } - default: - fmt.Fprint(buf, v) - } -} - -func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s%s(", c.prefix, method) - if method != "Receive" { - buf.WriteString(commandName) - for _, arg := range args { - buf.WriteString(", ") - c.printValue(&buf, arg) - } - } - buf.WriteString(") -> (") - if method != "Send" { - c.printValue(&buf, reply) - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%v)", err) - c.logger.Output(3, buf.String()) -} - -func (c *loggingConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - reply, err = c.Conn.Do(commandName, args...) - c.print("Do", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) Send(commandName string, args ...interface{}) (err error) { - err = c.Conn.Send(commandName, args...) - c.print("Send", commandName, args, nil, err) - return -} - -func (c *loggingConn) Receive() (interface{}, error) { - reply, err := c.Conn.Receive() - c.print("Receive", "", nil, reply, err) - return reply, err -} - -func (c *loggingConn) WithContext(ctx context.Context) Conn { - return c -} diff --git a/pkg/cache/redis/main_test.go b/pkg/cache/redis/main_test.go deleted file mode 100644 index 0cf3556b5..000000000 --- a/pkg/cache/redis/main_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package redis - -import ( - "flag" - "os" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/container/pool" - "github.com/go-kratos/kratos/pkg/testing/lich" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -var ( - testRedisAddr string - testPool *Pool - testConfig *Config -) - -func setupTestConfig(addr string) { - c := getTestConfig(addr) - c.Config = &pool.Config{ - Active: 20, - Idle: 2, - IdleTimeout: xtime.Duration(90 * time.Second), - } - testConfig = c -} - -func getTestConfig(addr string) *Config { - return &Config{ - Name: "test", - Proto: "tcp", - Addr: addr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - } -} - -func setupTestPool() { - testPool = NewPool(testConfig) -} - -// DialDefaultServer starts the test server if not already started and dials a -// connection to the server. -func DialDefaultServer() (Conn, error) { - c, err := Dial("tcp", testRedisAddr, DialReadTimeout(1*time.Second), DialWriteTimeout(1*time.Second)) - if err != nil { - return nil, err - } - c.Do("FLUSHDB") - return c, nil -} - -func TestMain(m *testing.M) { - flag.Set("f", "./test/docker-compose.yaml") - if err := lich.Setup(); err != nil { - panic(err) - } - defer lich.Teardown() - testRedisAddr = "localhost:6379" - setupTestConfig(testRedisAddr) - setupTestPool() - ret := m.Run() - os.Exit(ret) -} diff --git a/pkg/cache/redis/metrics.go b/pkg/cache/redis/metrics.go deleted file mode 100644 index 4ad7a0b87..000000000 --- a/pkg/cache/redis/metrics.go +++ /dev/null @@ -1,53 +0,0 @@ -package redis - -import ( - "github.com/go-kratos/kratos/pkg/stat/metric" -) - -const namespace = "redis_client" - -var ( - _metricReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "redis client requests duration(ms).", - Labels: []string{"name", "addr", "command"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000, 2500}, - }) - _metricReqErr = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "error_total", - Help: "redis client requests error count.", - Labels: []string{"name", "addr", "command", "error"}, - }) - _metricConnTotal = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "connections", - Name: "total", - Help: "redis client connections total count.", - Labels: []string{"name", "addr", "state"}, - }) - _metricConnCurrent = metric.NewGaugeVec(&metric.GaugeVecOpts{ - Namespace: namespace, - Subsystem: "connections", - Name: "current", - Help: "redis client connections current.", - Labels: []string{"name", "addr", "state"}, - }) - _metricHits = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "", - Name: "hits_total", - Help: "redis client hits total.", - Labels: []string{"name", "addr"}, - }) - _metricMisses = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "", - Name: "misses_total", - Help: "redis client misses total.", - Labels: []string{"name", "addr"}, - }) -) diff --git a/pkg/cache/redis/mock.go b/pkg/cache/redis/mock.go deleted file mode 100644 index 55ed57447..000000000 --- a/pkg/cache/redis/mock.go +++ /dev/null @@ -1,36 +0,0 @@ -package redis - -import "context" - -// MockErr for unit test. -type MockErr struct { - Error error -} - -// MockWith return a mock conn. -func MockWith(err error) MockErr { - return MockErr{Error: err} -} - -// Err . -func (m MockErr) Err() error { return m.Error } - -// Close . -func (m MockErr) Close() error { return m.Error } - -// Do . -func (m MockErr) Do(commandName string, args ...interface{}) (interface{}, error) { - return nil, m.Error -} - -// Send . -func (m MockErr) Send(commandName string, args ...interface{}) error { return m.Error } - -// Flush . -func (m MockErr) Flush() error { return m.Error } - -// Receive . -func (m MockErr) Receive() (interface{}, error) { return nil, m.Error } - -// WithContext . -func (m MockErr) WithContext(context.Context) Conn { return m } diff --git a/pkg/cache/redis/pipeline.go b/pkg/cache/redis/pipeline.go deleted file mode 100644 index e9e745a26..000000000 --- a/pkg/cache/redis/pipeline.go +++ /dev/null @@ -1,84 +0,0 @@ -package redis - -import ( - "context" - "errors" -) - -type Pipeliner interface { - // Send writes the command to the client's output buffer. - Send(commandName string, args ...interface{}) - - // Exec executes all commands and get replies. - Exec(ctx context.Context) (rs *Replies, err error) -} - -var ( - ErrNoReply = errors.New("redis: no reply in result set") -) - -type pipeliner struct { - pool *Pool - cmds []*cmd -} - -type Replies struct { - replies []*reply -} - -type reply struct { - reply interface{} - err error -} - -func (rs *Replies) Next() bool { - return len(rs.replies) > 0 -} - -func (rs *Replies) Scan() (reply interface{}, err error) { - if !rs.Next() { - return nil, ErrNoReply - } - reply, err = rs.replies[0].reply, rs.replies[0].err - rs.replies = rs.replies[1:] - return -} - -type cmd struct { - commandName string - args []interface{} -} - -func (p *pipeliner) Send(commandName string, args ...interface{}) { - p.cmds = append(p.cmds, &cmd{commandName: commandName, args: args}) -} - -func (p *pipeliner) Exec(ctx context.Context) (rs *Replies, err error) { - n := len(p.cmds) - if n == 0 { - return &Replies{}, nil - } - c := p.pool.Get(ctx) - defer c.Close() - for len(p.cmds) > 0 { - cmd := p.cmds[0] - p.cmds = p.cmds[1:] - if err = c.Send(cmd.commandName, cmd.args...); err != nil { - p.cmds = p.cmds[:0] - return nil, err - } - } - if err = c.Flush(); err != nil { - p.cmds = p.cmds[:0] - return nil, err - } - rps := make([]*reply, 0, n) - for i := 0; i < n; i++ { - rp, err := c.Receive() - rps = append(rps, &reply{reply: rp, err: err}) - } - rs = &Replies{ - replies: rps, - } - return -} diff --git a/pkg/cache/redis/pipeline_test.go b/pkg/cache/redis/pipeline_test.go deleted file mode 100644 index 9b5d4ee53..000000000 --- a/pkg/cache/redis/pipeline_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "reflect" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/container/pool" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -func TestRedis_Pipeline(t *testing.T) { - conf := &Config{ - Name: "test", - Proto: "tcp", - Addr: testRedisAddr, - DialTimeout: xtime.Duration(1 * time.Second), - ReadTimeout: xtime.Duration(1 * time.Second), - WriteTimeout: xtime.Duration(1 * time.Second), - } - conf.Config = &pool.Config{ - Active: 10, - Idle: 2, - IdleTimeout: xtime.Duration(90 * time.Second), - } - - r := NewRedis(conf) - r.Do(context.TODO(), "FLUSHDB") - - p := r.Pipeline() - - for _, cmd := range testCommands { - p.Send(cmd.args[0].(string), cmd.args[1:]...) - } - - replies, err := p.Exec(context.TODO()) - - i := 0 - for replies.Next() { - cmd := testCommands[i] - actual, err := replies.Scan() - if err != nil { - t.Fatalf("Receive(%v) returned error %v", cmd.args, err) - } - if !reflect.DeepEqual(actual, cmd.expected) { - t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) - } - i++ - } - err = r.Close() - if err != nil { - t.Errorf("Close() error %v", err) - } -} - -func ExamplePipeliner() { - r := NewRedis(testConfig) - defer r.Close() - - pip := r.Pipeline() - pip.Send("SET", "hello", "world") - pip.Send("GET", "hello") - replies, err := pip.Exec(context.TODO()) - if err != nil { - fmt.Printf("%#v\n", err) - } - for replies.Next() { - s, err := String(replies.Scan()) - if err != nil { - fmt.Printf("err %#v\n", err) - } - fmt.Printf("%#v\n", s) - } - // Output: - // "OK" - // "world" -} - -func BenchmarkRedisPipelineExec(b *testing.B) { - r := NewRedis(testConfig) - defer r.Close() - - r.Do(context.TODO(), "SET", "abcde", "fghiasdfasdf") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - p := r.Pipeline() - p.Send("GET", "abcde") - _, err := p.Exec(context.TODO()) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/cache/redis/pool.go b/pkg/cache/redis/pool.go deleted file mode 100644 index ecab41e29..000000000 --- a/pkg/cache/redis/pool.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/sha1" - "errors" - "io" - "strconv" - "sync" - "time" - - "github.com/go-kratos/kratos/pkg/container/pool" - "github.com/go-kratos/kratos/pkg/net/trace" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -var beginTime, _ = time.Parse("2006-01-02 15:04:05", "2006-01-02 15:04:05") - -var ( - errConnClosed = errors.New("redigo: connection closed") -) - -// Pool . -type Pool struct { - *pool.Slice - // config - c *Config - // statfunc - statfunc func(name, addr, cmd string, t time.Time, err error) func() -} - -// NewPool creates a new pool. -func NewPool(c *Config, options ...DialOption) (p *Pool) { - if c.DialTimeout <= 0 || c.ReadTimeout <= 0 || c.WriteTimeout <= 0 { - panic("must config redis timeout") - } - if c.SlowLog <= 0 { - c.SlowLog = xtime.Duration(250 * time.Millisecond) - } - ops := []DialOption{ - DialConnectTimeout(time.Duration(c.DialTimeout)), - DialReadTimeout(time.Duration(c.ReadTimeout)), - DialWriteTimeout(time.Duration(c.WriteTimeout)), - DialPassword(c.Auth), - DialDatabase(c.Db), - } - ops = append(ops, options...) - p1 := pool.NewSlice(c.Config) - - // new pool - p1.New = func(ctx context.Context) (io.Closer, error) { - conn, err := Dial(c.Proto, c.Addr, ops...) - if err != nil { - return nil, err - } - return &traceConn{ - Conn: conn, - connTags: []trace.Tag{trace.TagString(trace.TagPeerAddress, c.Addr)}, - slowLogThreshold: time.Duration(c.SlowLog), - }, nil - } - p = &Pool{Slice: p1, c: c, statfunc: pstat} - return -} - -// Get gets a connection. The application must close the returned connection. -// This method always returns a valid connection so that applications can defer -// error handling to the first use of the connection. If there is an error -// getting an underlying connection, then the connection Err, Do, Send, Flush -// and Receive methods return that error. -func (p *Pool) Get(ctx context.Context) Conn { - c, err := p.Slice.Get(ctx) - if err != nil { - return errorConnection{err} - } - c1, _ := c.(Conn) - return &pooledConnection{p: p, c: c1.WithContext(ctx), rc: c1, now: beginTime} -} - -// Close releases the resources used by the pool. -func (p *Pool) Close() error { - return p.Slice.Close() -} - -type pooledConnection struct { - p *Pool - rc Conn - c Conn - state int - - now time.Time - cmds []string -} - -var ( - sentinel []byte - sentinelOnce sync.Once -) - -func initSentinel() { - p := make([]byte, 64) - if _, err := rand.Read(p); err == nil { - sentinel = p - } else { - h := sha1.New() - io.WriteString(h, "Oops, rand failed. Use time instead.") - io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) - sentinel = h.Sum(nil) - } -} - -// SetStatFunc set stat func. -func (p *Pool) SetStatFunc(fn func(name, addr, cmd string, t time.Time, err error) func()) { - p.statfunc = fn -} - -func pstat(name, addr, cmd string, t time.Time, err error) func() { - return func() { - _metricReqDur.Observe(int64(time.Since(t)/time.Millisecond), name, addr, cmd) - if err != nil { - if msg := formatErr(err, name, addr); msg != "" { - _metricReqErr.Inc(name, addr, cmd, msg) - } - return - } - _metricHits.Inc(name, addr) - } -} - -func (pc *pooledConnection) Close() error { - c := pc.c - if _, ok := c.(errorConnection); ok { - return nil - } - pc.c = errorConnection{errConnClosed} - - if pc.state&MultiState != 0 { - c.Send("DISCARD") - pc.state &^= (MultiState | WatchState) - } else if pc.state&WatchState != 0 { - c.Send("UNWATCH") - pc.state &^= WatchState - } - if pc.state&SubscribeState != 0 { - c.Send("UNSUBSCRIBE") - c.Send("PUNSUBSCRIBE") - // To detect the end of the message stream, ask the server to echo - // a sentinel value and read until we see that value. - sentinelOnce.Do(initSentinel) - c.Send("ECHO", sentinel) - c.Flush() - for { - p, err := c.Receive() - if err != nil { - break - } - if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { - pc.state &^= SubscribeState - break - } - } - } - _, err := c.Do("") - pc.p.Slice.Put(context.Background(), pc.rc, pc.state != 0 || c.Err() != nil) - return err -} - -func (pc *pooledConnection) Err() error { - return pc.c.Err() -} - -func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - now := time.Now() - ci := LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - reply, err = pc.c.Do(commandName, args...) - if pc.p.statfunc != nil { - pc.p.statfunc(pc.p.c.Name, pc.p.c.Addr, commandName, now, err)() - } - return -} - -func (pc *pooledConnection) Send(commandName string, args ...interface{}) (err error) { - ci := LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - if pc.now.Equal(beginTime) { - // mark first send time - pc.now = time.Now() - } - pc.cmds = append(pc.cmds, commandName) - return pc.c.Send(commandName, args...) -} - -func (pc *pooledConnection) Flush() error { - return pc.c.Flush() -} - -func (pc *pooledConnection) Receive() (reply interface{}, err error) { - reply, err = pc.c.Receive() - if len(pc.cmds) > 0 { - cmd := pc.cmds[0] - pc.cmds = pc.cmds[1:] - if pc.p.statfunc != nil { - pc.p.statfunc(pc.p.c.Name, pc.p.c.Addr, cmd, pc.now, err)() - } - } - return -} - -func (pc *pooledConnection) WithContext(ctx context.Context) Conn { - return pc -} - -type errorConnection struct{ err error } - -func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { - return nil, ec.err -} -func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } -func (ec errorConnection) Err() error { return ec.err } -func (ec errorConnection) Close() error { return ec.err } -func (ec errorConnection) Flush() error { return ec.err } -func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } -func (ec errorConnection) WithContext(context.Context) Conn { return ec } diff --git a/pkg/cache/redis/pool_test.go b/pkg/cache/redis/pool_test.go deleted file mode 100644 index 4c0bb5f01..000000000 --- a/pkg/cache/redis/pool_test.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright 2011 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "context" - "errors" - "io" - "reflect" - "sync" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/container/pool" -) - -type poolTestConn struct { - d *poolDialer - err error - c Conn - ctx context.Context -} - -func (c *poolTestConn) Flush() error { - return c.c.Flush() -} - -func (c *poolTestConn) Receive() (reply interface{}, err error) { - return c.c.Receive() -} - -func (c *poolTestConn) WithContext(ctx context.Context) Conn { - c.c.WithContext(ctx) - c.ctx = ctx - return c -} - -func (c *poolTestConn) Close() error { - c.d.mu.Lock() - c.d.open-- - c.d.mu.Unlock() - return c.c.Close() -} - -func (c *poolTestConn) Err() error { return c.err } - -func (c *poolTestConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - if commandName == "ERR" { - c.err = args[0].(error) - commandName = "PING" - } - if commandName != "" { - c.d.commands = append(c.d.commands, commandName) - } - return c.c.Do(commandName, args...) -} - -func (c *poolTestConn) Send(commandName string, args ...interface{}) error { - c.d.commands = append(c.d.commands, commandName) - return c.c.Send(commandName, args...) -} - -type poolDialer struct { - mu sync.Mutex - t *testing.T - dialed int - open int - commands []string - dialErr error -} - -func (d *poolDialer) dial() (Conn, error) { - d.mu.Lock() - d.dialed += 1 - dialErr := d.dialErr - d.mu.Unlock() - if dialErr != nil { - return nil, d.dialErr - } - c, err := DialDefaultServer() - if err != nil { - return nil, err - } - d.mu.Lock() - d.open += 1 - d.mu.Unlock() - return &poolTestConn{d: d, c: c}, nil -} - -func (d *poolDialer) check(message string, p *Pool, dialed, open int) { - d.mu.Lock() - if d.dialed != dialed { - d.t.Errorf("%s: dialed=%d, want %d", message, d.dialed, dialed) - } - if d.open != open { - d.t.Errorf("%s: open=%d, want %d", message, d.open, open) - } - // if active := p.ActiveCount(); active != open { - // d.t.Errorf("%s: active=%d, want %d", message, active, open) - // } - d.mu.Unlock() -} - -func TestPoolReuse(t *testing.T) { - d := poolDialer{t: t} - p := NewPool(testConfig) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - var err error - - for i := 0; i < 10; i++ { - c1 := p.Get(context.TODO()) - c1.Do("PING") - c2 := p.Get(context.TODO()) - c2.Do("PING") - c1.Close() - c2.Close() - } - - d.check("before close", p, 2, 2) - err = p.Close() - if err != nil { - t.Fatal(err) - } - d.check("after close", p, 2, 0) -} - -func TestPoolMaxIdle(t *testing.T) { - d := poolDialer{t: t} - p := NewPool(testConfig) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - defer p.Close() - - for i := 0; i < 10; i++ { - c1 := p.Get(context.TODO()) - c1.Do("PING") - c2 := p.Get(context.TODO()) - c2.Do("PING") - c3 := p.Get(context.TODO()) - c3.Do("PING") - c1.Close() - c2.Close() - c3.Close() - } - d.check("before close", p, 12, 2) - p.Close() - d.check("after close", p, 12, 0) -} - -func TestPoolError(t *testing.T) { - d := poolDialer{t: t} - p := NewPool(testConfig) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - defer p.Close() - - c := p.Get(context.TODO()) - c.Do("ERR", io.EOF) - if c.Err() == nil { - t.Errorf("expected c.Err() != nil") - } - c.Close() - - c = p.Get(context.TODO()) - c.Do("ERR", io.EOF) - c.Close() - - d.check(".", p, 2, 0) -} - -func TestPoolClose(t *testing.T) { - d := poolDialer{t: t} - p := NewPool(testConfig) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - defer p.Close() - - c1 := p.Get(context.TODO()) - c1.Do("PING") - c2 := p.Get(context.TODO()) - c2.Do("PING") - c3 := p.Get(context.TODO()) - c3.Do("PING") - - c1.Close() - if _, err := c1.Do("PING"); err == nil { - t.Errorf("expected error after connection closed") - } - - c2.Close() - c2.Close() - - p.Close() - - d.check("after pool close", p, 3, 1) - - if _, err := c1.Do("PING"); err == nil { - t.Errorf("expected error after connection and pool closed") - } - - c3.Close() - - d.check("after conn close", p, 3, 0) - - c1 = p.Get(context.TODO()) - if _, err := c1.Do("PING"); err == nil { - t.Errorf("expected error after pool closed") - } -} - -func TestPoolConcurrenSendReceive(t *testing.T) { - p := NewPool(testConfig) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return DialDefaultServer() - } - defer p.Close() - - c := p.Get(context.TODO()) - done := make(chan error, 1) - go func() { - _, err := c.Receive() - done <- err - }() - c.Send("PING") - c.Flush() - err := <-done - if err != nil { - t.Fatalf("Receive() returned error %v", err) - } - _, err = c.Do("") - if err != nil { - t.Fatalf("Do() returned error %v", err) - } - c.Close() -} - -func TestPoolMaxActive(t *testing.T) { - d := poolDialer{t: t} - conf := getTestConfig(testRedisAddr) - conf.Config = &pool.Config{ - Active: 2, - Idle: 2, - } - p := NewPool(conf) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - defer p.Close() - - c1 := p.Get(context.TODO()) - c1.Do("PING") - c2 := p.Get(context.TODO()) - c2.Do("PING") - - d.check("1", p, 2, 2) - - c3 := p.Get(context.TODO()) - if _, err := c3.Do("PING"); err != pool.ErrPoolExhausted { - t.Errorf("expected pool exhausted") - } - - c3.Close() - d.check("2", p, 2, 2) - c2.Close() - d.check("3", p, 2, 2) - - c3 = p.Get(context.TODO()) - if _, err := c3.Do("PING"); err != nil { - t.Errorf("expected good channel, err=%v", err) - } - c3.Close() - - d.check("4", p, 2, 2) -} - -func TestPoolMonitorCleanup(t *testing.T) { - d := poolDialer{t: t} - p := NewPool(testConfig) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - defer p.Close() - c := p.Get(context.TODO()) - c.Send("MONITOR") - c.Close() - - d.check("", p, 1, 0) -} - -func TestPoolPubSubCleanup(t *testing.T) { - d := poolDialer{t: t} - p := NewPool(testConfig) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - defer p.Close() - - c := p.Get(context.TODO()) - c.Send("SUBSCRIBE", "x") - c.Close() - - want := []string{"SUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get(context.TODO()) - c.Send("PSUBSCRIBE", "x*") - c.Close() - - want = []string{"PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil -} - -func TestPoolTransactionCleanup(t *testing.T) { - d := poolDialer{t: t} - p := NewPool(testConfig) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - defer p.Close() - - c := p.Get(context.TODO()) - c.Do("WATCH", "key") - c.Do("PING") - c.Close() - - want := []string{"WATCH", "PING", "UNWATCH"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get(context.TODO()) - c.Do("WATCH", "key") - c.Do("UNWATCH") - c.Do("PING") - c.Close() - - want = []string{"WATCH", "UNWATCH", "PING"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get(context.TODO()) - c.Do("WATCH", "key") - c.Do("MULTI") - c.Do("PING") - c.Close() - - want = []string{"WATCH", "MULTI", "PING", "DISCARD"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get(context.TODO()) - c.Do("WATCH", "key") - c.Do("MULTI") - c.Do("DISCARD") - c.Do("PING") - c.Close() - - want = []string{"WATCH", "MULTI", "DISCARD", "PING"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil - - c = p.Get(context.TODO()) - c.Do("WATCH", "key") - c.Do("MULTI") - c.Do("EXEC") - c.Do("PING") - c.Close() - - want = []string{"WATCH", "MULTI", "EXEC", "PING"} - if !reflect.DeepEqual(d.commands, want) { - t.Errorf("got commands %v, want %v", d.commands, want) - } - d.commands = nil -} - -func startGoroutines(p *Pool, cmd string, args ...interface{}) chan error { - errs := make(chan error, 10) - for i := 0; i < cap(errs); i++ { - go func() { - c := p.Get(context.TODO()) - _, err := c.Do(cmd, args...) - errs <- err - c.Close() - }() - } - - // Wait for goroutines to block. - time.Sleep(time.Second / 4) - - return errs -} - -func TestWaitPoolDialError(t *testing.T) { - testErr := errors.New("test") - d := poolDialer{t: t} - config1 := testConfig - config1.Config = &pool.Config{ - Active: 1, - Idle: 1, - Wait: true, - } - p := NewPool(config1) - p.Slice.New = func(ctx context.Context) (io.Closer, error) { - return d.dial() - } - defer p.Close() - - c := p.Get(context.TODO()) - errs := startGoroutines(p, "ERR", testErr) - d.check("before close", p, 1, 1) - - d.dialErr = errors.New("dial") - c.Close() - - nilCount := 0 - errCount := 0 - timeout := time.After(2 * time.Second) - for i := 0; i < cap(errs); i++ { - select { - case err := <-errs: - switch err { - case nil: - nilCount++ - case d.dialErr: - errCount++ - default: - t.Fatalf("expected dial error or nil, got %v", err) - } - case <-timeout: - t.Logf("Wait all the time and timeout %d", i) - return - } - } - if nilCount != 1 { - t.Errorf("expected one nil error, got %d", nilCount) - } - if errCount != cap(errs)-1 { - t.Errorf("expected %d dial erors, got %d", cap(errs)-1, errCount) - } - d.check("done", p, cap(errs), 0) -} - -func BenchmarkPoolGet(b *testing.B) { - b.StopTimer() - p := NewPool(testConfig) - c := p.Get(context.Background()) - if err := c.Err(); err != nil { - b.Fatal(err) - } - c.Close() - defer p.Close() - b.StartTimer() - for i := 0; i < b.N; i++ { - c := p.Get(context.Background()) - c.Close() - } -} - -func BenchmarkPoolGetErr(b *testing.B) { - b.StopTimer() - p := NewPool(testConfig) - c := p.Get(context.Background()) - if err := c.Err(); err != nil { - b.Fatal(err) - } - c.Close() - defer p.Close() - b.StartTimer() - for i := 0; i < b.N; i++ { - c = p.Get(context.Background()) - if err := c.Err(); err != nil { - b.Fatal(err) - } - c.Close() - } -} - -func BenchmarkPoolGetPing(b *testing.B) { - b.StopTimer() - p := NewPool(testConfig) - c := p.Get(context.Background()) - if err := c.Err(); err != nil { - b.Fatal(err) - } - c.Close() - defer p.Close() - b.StartTimer() - for i := 0; i < b.N; i++ { - c := p.Get(context.Background()) - if _, err := c.Do("PING"); err != nil { - b.Fatal(err) - } - c.Close() - } -} - -func BenchmarkPooledConn(b *testing.B) { - p := NewPool(testConfig) - defer p.Close() - for i := 0; i < b.N; i++ { - ctx := context.TODO() - c := p.Get(ctx) - c2 := c.WithContext(context.TODO()) - if _, err := c2.Do("PING"); err != nil { - b.Fatal(err) - } - c2.Close() - } -} diff --git a/pkg/cache/redis/pubsub.go b/pkg/cache/redis/pubsub.go deleted file mode 100644 index a4723f932..000000000 --- a/pkg/cache/redis/pubsub.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - - pkgerr "github.com/pkg/errors" -) - -var ( - errPubSub = errors.New("redigo: unknown pubsub notification") -) - -// Subscription represents a subscribe or unsubscribe notification. -type Subscription struct { - - // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" - Kind string - - // The channel that was changed. - Channel string - - // The current number of subscriptions for connection. - Count int -} - -// Message represents a message notification. -type Message struct { - - // The originating channel. - Channel string - - // The message data. - Data []byte -} - -// PMessage represents a pmessage notification. -type PMessage struct { - - // The matched pattern. - Pattern string - - // The originating channel. - Channel string - - // The message data. - Data []byte -} - -// Pong represents a pubsub pong notification. -type Pong struct { - Data string -} - -// PubSubConn wraps a Conn with convenience methods for subscribers. -type PubSubConn struct { - Conn Conn -} - -// Close closes the connection. -func (c PubSubConn) Close() error { - return c.Conn.Close() -} - -// Subscribe subscribes the connection to the specified channels. -func (c PubSubConn) Subscribe(channel ...interface{}) error { - c.Conn.Send("SUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PSubscribe subscribes the connection to the given patterns. -func (c PubSubConn) PSubscribe(channel ...interface{}) error { - c.Conn.Send("PSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Unsubscribe unsubscribes the connection from the given channels, or from all -// of them if none is given. -func (c PubSubConn) Unsubscribe(channel ...interface{}) error { - c.Conn.Send("UNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PUnsubscribe unsubscribes the connection from the given patterns, or from all -// of them if none is given. -func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { - c.Conn.Send("PUNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Ping sends a PING to the server with the specified data. -func (c PubSubConn) Ping(data string) error { - c.Conn.Send("PING", data) - return c.Conn.Flush() -} - -// Receive returns a pushed message as a Subscription, Message, PMessage, Pong -// or error. The return value is intended to be used directly in a type switch -// as illustrated in the PubSubConn example. -func (c PubSubConn) Receive() interface{} { - reply, err := Values(c.Conn.Receive()) - if err != nil { - return err - } - - var kind string - reply, err = Scan(reply, &kind) - if err != nil { - return err - } - - switch kind { - case "message": - var m Message - if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { - return err - } - return m - case "pmessage": - var pm PMessage - if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { - return err - } - return pm - case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": - s := Subscription{Kind: kind} - if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { - return err - } - return s - case "pong": - var p Pong - if _, err := Scan(reply, &p.Data); err != nil { - return err - } - return p - } - return pkgerr.WithStack(errPubSub) -} diff --git a/pkg/cache/redis/pubsub_test.go b/pkg/cache/redis/pubsub_test.go deleted file mode 100644 index 69c66ffd8..000000000 --- a/pkg/cache/redis/pubsub_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "fmt" - "reflect" - "sync" - "testing" -) - -func publish(channel, value interface{}) { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - c.Do("PUBLISH", channel, value) -} - -// Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine. -func ExamplePubSubConn() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - var wg sync.WaitGroup - wg.Add(2) - - psc := PubSubConn{Conn: c} - - // This goroutine receives and prints pushed notifications from the server. - // The goroutine exits when the connection is unsubscribed from all - // channels or there is an error. - go func() { - defer wg.Done() - for { - switch n := psc.Receive().(type) { - case Message: - fmt.Printf("Message: %s %s\n", n.Channel, n.Data) - case PMessage: - fmt.Printf("PMessage: %s %s %s\n", n.Pattern, n.Channel, n.Data) - case Subscription: - fmt.Printf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count) - if n.Count == 0 { - return - } - case error: - fmt.Printf("error: %v\n", n) - return - } - } - }() - - // This goroutine manages subscriptions for the connection. - go func() { - defer wg.Done() - - psc.Subscribe("example") - psc.PSubscribe("p*") - - // The following function calls publish a message using another - // connection to the Redis server. - publish("example", "hello") - publish("example", "world") - publish("pexample", "foo") - publish("pexample", "bar") - - // Unsubscribe from all connections. This will cause the receiving - // goroutine to exit. - psc.Unsubscribe() - psc.PUnsubscribe() - }() - - wg.Wait() - - // Output: - // Subscription: subscribe example 1 - // Subscription: psubscribe p* 2 - // Message: example hello - // Message: example world - // PMessage: p* pexample foo - // PMessage: p* pexample bar - // Subscription: unsubscribe example 1 - // Subscription: punsubscribe p* 0 -} - -func expectPushed(t *testing.T, c PubSubConn, message string, expected interface{}) { - actual := c.Receive() - if !reflect.DeepEqual(actual, expected) { - t.Errorf("%s = %v, want %v", message, actual, expected) - } -} - -func TestPushed(t *testing.T) { - pc, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer pc.Close() - - sc, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer sc.Close() - - c := PubSubConn{Conn: sc} - - c.Subscribe("c1") - expectPushed(t, c, "Subscribe(c1)", Subscription{Kind: "subscribe", Channel: "c1", Count: 1}) - c.Subscribe("c2") - expectPushed(t, c, "Subscribe(c2)", Subscription{Kind: "subscribe", Channel: "c2", Count: 2}) - c.PSubscribe("p1") - expectPushed(t, c, "PSubscribe(p1)", Subscription{Kind: "psubscribe", Channel: "p1", Count: 3}) - c.PSubscribe("p2") - expectPushed(t, c, "PSubscribe(p2)", Subscription{Kind: "psubscribe", Channel: "p2", Count: 4}) - c.PUnsubscribe() - expectPushed(t, c, "Punsubscribe(p1)", Subscription{Kind: "punsubscribe", Channel: "p1", Count: 3}) - expectPushed(t, c, "Punsubscribe()", Subscription{Kind: "punsubscribe", Channel: "p2", Count: 2}) - - pc.Do("PUBLISH", "c1", "hello") - expectPushed(t, c, "PUBLISH c1 hello", Message{Channel: "c1", Data: []byte("hello")}) - - c.Ping("hello") - expectPushed(t, c, `Ping("hello")`, Pong{"hello"}) - - c.Conn.Send("PING") - c.Conn.Flush() - expectPushed(t, c, `Send("PING")`, Pong{}) -} diff --git a/pkg/cache/redis/redis.go b/pkg/cache/redis/redis.go deleted file mode 100644 index 634dc1cd9..000000000 --- a/pkg/cache/redis/redis.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/container/pool" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// Error represents an error returned in a command reply. -type Error string - -func (err Error) Error() string { return string(err) } - -// Config client settings. -type Config struct { - *pool.Config - - Name string // redis name, for trace - Proto string - Addr string - Auth string - Db int - DialTimeout xtime.Duration - ReadTimeout xtime.Duration - WriteTimeout xtime.Duration - SlowLog xtime.Duration -} - -type Redis struct { - pool *Pool - conf *Config -} - -func NewRedis(c *Config, options ...DialOption) *Redis { - return &Redis{ - pool: NewPool(c, options...), - conf: c, - } -} - -// Do gets a new conn from pool, then execute Do with this conn, finally close this conn. -// ATTENTION: Don't use this method with transaction command like MULTI etc. Because every Do will close conn automatically, use r.Conn to get a raw conn for this situation. -func (r *Redis) Do(ctx context.Context, commandName string, args ...interface{}) (reply interface{}, err error) { - conn := r.pool.Get(ctx) - defer conn.Close() - reply, err = conn.Do(commandName, args...) - return -} - -// Close closes connection pool -func (r *Redis) Close() error { - return r.pool.Close() -} - -// Conn direct gets a connection -func (r *Redis) Conn(ctx context.Context) Conn { - return r.pool.Get(ctx) -} - -func (r *Redis) Pipeline() (p Pipeliner) { - return &pipeliner{ - pool: r.pool, - } -} diff --git a/pkg/cache/redis/redis_test.go b/pkg/cache/redis/redis_test.go deleted file mode 100644 index 0d2ede87b..000000000 --- a/pkg/cache/redis/redis_test.go +++ /dev/null @@ -1,323 +0,0 @@ -package redis - -import ( - "context" - "reflect" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/container/pool" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -func TestRedis(t *testing.T) { - testSet(t, testPool) - testSend(t, testPool) - testGet(t, testPool) - testErr(t, testPool) - if err := testPool.Close(); err != nil { - t.Errorf("redis: close error(%v)", err) - } - conn, err := NewConn(testConfig) - if err != nil { - t.Errorf("redis: new conn error(%v)", err) - } - if err := conn.Close(); err != nil { - t.Errorf("redis: close error(%v)", err) - } -} - -func testSet(t *testing.T, p *Pool) { - var ( - key = "test" - value = "test" - conn = p.Get(context.TODO()) - ) - defer conn.Close() - if reply, err := conn.Do("set", key, value); err != nil { - t.Errorf("redis: conn.Do(SET, %s, %s) error(%v)", key, value, err) - } else { - t.Logf("redis: set status: %s", reply) - } -} - -func testSend(t *testing.T, p *Pool) { - var ( - key = "test" - value = "test" - expire = 1000 - conn = p.Get(context.TODO()) - ) - defer conn.Close() - if err := conn.Send("SET", key, value); err != nil { - t.Errorf("redis: conn.Send(SET, %s, %s) error(%v)", key, value, err) - } - if err := conn.Send("EXPIRE", key, expire); err != nil { - t.Errorf("redis: conn.Send(EXPIRE key(%s) expire(%d)) error(%v)", key, expire, err) - } - if err := conn.Flush(); err != nil { - t.Errorf("redis: conn.Flush error(%v)", err) - } - for i := 0; i < 2; i++ { - if _, err := conn.Receive(); err != nil { - t.Errorf("redis: conn.Receive error(%v)", err) - return - } - } - t.Logf("redis: set value: %s", value) -} - -func testGet(t *testing.T, p *Pool) { - var ( - key = "test" - conn = p.Get(context.TODO()) - ) - defer conn.Close() - if reply, err := conn.Do("GET", key); err != nil { - t.Errorf("redis: conn.Do(GET, %s) error(%v)", key, err) - } else { - t.Logf("redis: get value: %s", reply) - } -} - -func testErr(t *testing.T, p *Pool) { - conn := p.Get(context.TODO()) - if err := conn.Close(); err != nil { - t.Errorf("redis: close error(%v)", err) - } - if err := conn.Err(); err == nil { - t.Errorf("redis: err not nil") - } else { - t.Logf("redis: err: %v", err) - } -} - -func BenchmarkRedis(b *testing.B) { - conf := &Config{ - Name: "test", - Proto: "tcp", - Addr: testRedisAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - } - conf.Config = &pool.Config{ - Active: 10, - Idle: 5, - IdleTimeout: xtime.Duration(90 * time.Second), - } - benchmarkPool := NewPool(conf) - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn := benchmarkPool.Get(context.TODO()) - if err := conn.Close(); err != nil { - b.Errorf("redis: close error(%v)", err) - } - } - }) - if err := benchmarkPool.Close(); err != nil { - b.Errorf("redis: close error(%v)", err) - } -} - -var testRedisCommands = []struct { - args []interface{} - expected interface{} -}{ - { - []interface{}{"PING"}, - "PONG", - }, - { - []interface{}{"SET", "foo", "bar"}, - "OK", - }, - { - []interface{}{"GET", "foo"}, - []byte("bar"), - }, - { - []interface{}{"GET", "nokey"}, - nil, - }, - { - []interface{}{"MGET", "nokey", "foo"}, - []interface{}{nil, []byte("bar")}, - }, - { - []interface{}{"INCR", "mycounter"}, - int64(1), - }, - { - []interface{}{"LPUSH", "mylist", "foo"}, - int64(1), - }, - { - []interface{}{"LPUSH", "mylist", "bar"}, - int64(2), - }, - { - []interface{}{"LRANGE", "mylist", 0, -1}, - []interface{}{[]byte("bar"), []byte("foo")}, - }, -} - -func TestNewRedis(t *testing.T) { - type args struct { - c *Config - options []DialOption - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - "new_redis", - args{ - testConfig, - make([]DialOption, 0), - }, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := NewRedis(tt.args.c, tt.args.options...) - if r == nil { - t.Errorf("NewRedis() error, got nil") - return - } - err := r.Close() - if err != nil { - t.Errorf("Close() error %v", err) - } - }) - } -} - -func TestRedis_Do(t *testing.T) { - r := NewRedis(testConfig) - r.Do(context.TODO(), "FLUSHDB") - - for _, cmd := range testRedisCommands { - actual, err := r.Do(context.TODO(), cmd.args[0].(string), cmd.args[1:]...) - if err != nil { - t.Errorf("Do(%v) returned error %v", cmd.args, err) - continue - } - if !reflect.DeepEqual(actual, cmd.expected) { - t.Errorf("Do(%v) = %v, want %v", cmd.args, actual, cmd.expected) - } - } - err := r.Close() - if err != nil { - t.Errorf("Close() error %v", err) - } -} - -func TestRedis_Conn(t *testing.T) { - type args struct { - ctx context.Context - } - tests := []struct { - name string - p *Redis - args args - wantErr bool - g int - c int - }{ - { - "Close", - NewRedis(&Config{ - Config: &pool.Config{ - Active: 1, - Idle: 1, - }, - Name: "test_get", - Proto: "tcp", - Addr: testRedisAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - }), - args{context.TODO()}, - false, - 3, - 3, - }, - { - "CloseExceededPoolSize", - NewRedis(&Config{ - Config: &pool.Config{ - Active: 1, - Idle: 1, - }, - Name: "test_get_out", - Proto: "tcp", - Addr: testRedisAddr, - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - }), - args{context.TODO()}, - true, - 5, - 3, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - for i := 1; i <= tt.g; i++ { - got := tt.p.Conn(tt.args.ctx) - if err := got.Close(); err != nil { - if !tt.wantErr { - t.Error(err) - } - } - if i <= tt.c { - if err := got.Close(); err != nil { - t.Error(err) - } - } - } - }) - } -} - -func BenchmarkRedisDoPing(b *testing.B) { - r := NewRedis(testConfig) - defer r.Close() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := r.Do(context.Background(), "PING"); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkRedisDoSET(b *testing.B) { - r := NewRedis(testConfig) - defer r.Close() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := r.Do(context.Background(), "SET", "a", "b"); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkRedisDoGET(b *testing.B) { - r := NewRedis(testConfig) - defer r.Close() - r.Do(context.Background(), "SET", "a", "b") - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := r.Do(context.Background(), "GET", "b"); err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/cache/redis/reply.go b/pkg/cache/redis/reply.go deleted file mode 100644 index 8e4349e83..000000000 --- a/pkg/cache/redis/reply.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "strconv" - - pkgerr "github.com/pkg/errors" -) - -// ErrNil indicates that a reply value is nil. -var ErrNil = errors.New("redigo: nil returned") - -// Int is a helper that converts a command reply to an integer. If err is not -// equal to nil, then Int returns 0, err. Otherwise, Int converts the -// reply to an int as follows: -// -// Reply type Result -// integer int(reply), nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int(reply interface{}, err error) (int, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - x := int(reply) - if int64(x) != reply { - return 0, pkgerr.WithStack(strconv.ErrRange) - } - return x, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 0) - return int(n), pkgerr.WithStack(err) - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, pkgerr.Errorf("redigo: unexpected type for Int, got type %T", reply) -} - -// Int64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int64(reply interface{}, err error) (int64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - return reply, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 64) - return n, pkgerr.WithStack(err) - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, pkgerr.Errorf("redigo: unexpected type for Int64, got type %T", reply) -} - -var errNegativeInt = errors.New("redigo: unexpected value for Uint64") - -// Uint64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Uint64(reply interface{}, err error) (uint64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - if reply < 0 { - return 0, pkgerr.WithStack(errNegativeInt) - } - return uint64(reply), nil - case []byte: - n, err := strconv.ParseUint(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, pkgerr.Errorf("redigo: unexpected type for Uint64, got type %T", reply) -} - -// Float64 is a helper that converts a command reply to 64 bit float. If err is -// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts -// the reply to an int as follows: -// -// Reply type Result -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Float64(reply interface{}, err error) (float64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case []byte: - n, err := strconv.ParseFloat(string(reply), 64) - return n, pkgerr.WithStack(err) - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, pkgerr.Errorf("redigo: unexpected type for Float64, got type %T", reply) -} - -// String is a helper that converts a command reply to a string. If err is not -// equal to nil, then String returns "", err. Otherwise String converts the -// reply to a string as follows: -// -// Reply type Result -// bulk string string(reply), nil -// simple string reply, nil -// nil "", ErrNil -// other "", error -func String(reply interface{}, err error) (string, error) { - if err != nil { - return "", err - } - switch reply := reply.(type) { - case []byte: - return string(reply), nil - case string: - return reply, nil - case nil: - return "", ErrNil - case Error: - return "", reply - } - return "", pkgerr.Errorf("redigo: unexpected type for String, got type %T", reply) -} - -// Bytes is a helper that converts a command reply to a slice of bytes. If err -// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts -// the reply to a slice of bytes as follows: -// -// Reply type Result -// bulk string reply, nil -// simple string []byte(reply), nil -// nil nil, ErrNil -// other nil, error -func Bytes(reply interface{}, err error) ([]byte, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []byte: - return reply, nil - case string: - return []byte(reply), nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, pkgerr.Errorf("redigo: unexpected type for Bytes, got type %T", reply) -} - -// Bool is a helper that converts a command reply to a boolean. If err is not -// equal to nil, then Bool returns false, err. Otherwise Bool converts the -// reply to boolean as follows: -// -// Reply type Result -// integer value != 0, nil -// bulk string strconv.ParseBool(reply) -// nil false, ErrNil -// other false, error -func Bool(reply interface{}, err error) (bool, error) { - if err != nil { - return false, err - } - switch reply := reply.(type) { - case int64: - return reply != 0, nil - case []byte: - b, e := strconv.ParseBool(string(reply)) - return b, pkgerr.WithStack(e) - case nil: - return false, ErrNil - case Error: - return false, reply - } - return false, pkgerr.Errorf("redigo: unexpected type for Bool, got type %T", reply) -} - -// MultiBulk is a helper that converts an array command reply to a []interface{}. -// -// Deprecated: Use Values instead. -func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } - -// Values is a helper that converts an array command reply to a []interface{}. -// If err is not equal to nil, then Values returns nil, err. Otherwise, Values -// converts the reply as follows: -// -// Reply type Result -// array reply, nil -// nil nil, ErrNil -// other nil, error -func Values(reply interface{}, err error) ([]interface{}, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - return reply, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, pkgerr.Errorf("redigo: unexpected type for Values, got type %T", reply) -} - -// Strings is a helper that converts an array command reply to a []string. If -// err is not equal to nil, then Strings returns nil, err. Nil array items are -// converted to "" in the output slice. Strings returns an error if an array -// item is not a bulk string or nil. -func Strings(reply interface{}, err error) ([]string, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - result := make([]string, len(reply)) - for i := range reply { - if reply[i] == nil { - continue - } - p, ok := reply[i].([]byte) - if !ok { - return nil, pkgerr.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) - } - result[i] = string(p) - } - return result, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, pkgerr.Errorf("redigo: unexpected type for Strings, got type %T", reply) -} - -// ByteSlices is a helper that converts an array command reply to a [][]byte. -// If err is not equal to nil, then ByteSlices returns nil, err. Nil array -// items are stay nil. ByteSlices returns an error if an array item is not a -// bulk string or nil. -func ByteSlices(reply interface{}, err error) ([][]byte, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - result := make([][]byte, len(reply)) - for i := range reply { - if reply[i] == nil { - continue - } - p, ok := reply[i].([]byte) - if !ok { - return nil, pkgerr.Errorf("redigo: unexpected element type for ByteSlices, got type %T", reply[i]) - } - result[i] = p - } - return result, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, pkgerr.Errorf("redigo: unexpected type for ByteSlices, got type %T", reply) -} - -// Ints is a helper that converts an array command reply to a []int. If -// err is not equal to nil, then Ints returns nil, err. -func Ints(reply interface{}, err error) ([]int, error) { - var ints []int - values, err := Values(reply, err) - if err != nil { - return ints, err - } - if err := ScanSlice(values, &ints); err != nil { - return ints, err - } - return ints, nil -} - -// Int64s is a helper that converts an array command reply to a []int64. If -// err is not equal to nil, then Int64s returns nil, err. -func Int64s(reply interface{}, err error) ([]int64, error) { - var int64s []int64 - values, err := Values(reply, err) - if err != nil { - return int64s, err - } - if err := ScanSlice(values, &int64s); err != nil { - return int64s, err - } - return int64s, nil -} - -// StringMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. -// Requires an even number of values in result. -func StringMap(result interface{}, err error) (map[string]string, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, pkgerr.New("redigo: StringMap expects even number of values result") - } - m := make(map[string]string, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, okKey := values[i].([]byte) - value, okValue := values[i+1].([]byte) - if !okKey || !okValue { - return nil, pkgerr.New("redigo: ScanMap key not a bulk string value") - } - m[string(key)] = string(value) - } - return m, nil -} - -// IntMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]int. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func IntMap(result interface{}, err error) (map[string]int, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, pkgerr.New("redigo: IntMap expects even number of values result") - } - m := make(map[string]int, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, pkgerr.New("redigo: ScanMap key not a bulk string value") - } - value, err := Int(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// Int64Map is a helper that converts an array of strings (alternating key, value) -// into a map[string]int64. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func Int64Map(result interface{}, err error) (map[string]int64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, pkgerr.New("redigo: Int64Map expects even number of values result") - } - m := make(map[string]int64, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, pkgerr.New("redigo: ScanMap key not a bulk string value") - } - value, err := Int64(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} diff --git a/pkg/cache/redis/reply_test.go b/pkg/cache/redis/reply_test.go deleted file mode 100644 index d3b1b9551..000000000 --- a/pkg/cache/redis/reply_test.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "fmt" - "reflect" - "testing" - - "github.com/pkg/errors" -) - -type valueError struct { - v interface{} - err error -} - -func ve(v interface{}, err error) valueError { - return valueError{v, err} -} - -var replyTests = []struct { - name interface{} - actual valueError - expected valueError -}{ - { - "ints([v1, v2])", - ve(Ints([]interface{}{[]byte("4"), []byte("5")}, nil)), - ve([]int{4, 5}, nil), - }, - { - "ints(nil)", - ve(Ints(nil, nil)), - ve([]int(nil), ErrNil), - }, - { - "strings([v1, v2])", - ve(Strings([]interface{}{[]byte("v1"), []byte("v2")}, nil)), - ve([]string{"v1", "v2"}, nil), - }, - { - "strings(nil)", - ve(Strings(nil, nil)), - ve([]string(nil), ErrNil), - }, - { - "byteslices([v1, v2])", - ve(ByteSlices([]interface{}{[]byte("v1"), []byte("v2")}, nil)), - ve([][]byte{[]byte("v1"), []byte("v2")}, nil), - }, - { - "byteslices(nil)", - ve(ByteSlices(nil, nil)), - ve([][]byte(nil), ErrNil), - }, - { - "values([v1, v2])", - ve(Values([]interface{}{[]byte("v1"), []byte("v2")}, nil)), - ve([]interface{}{[]byte("v1"), []byte("v2")}, nil), - }, - { - "values(nil)", - ve(Values(nil, nil)), - ve([]interface{}(nil), ErrNil), - }, - { - "float64(1.0)", - ve(Float64([]byte("1.0"), nil)), - ve(float64(1.0), nil), - }, - { - "float64(nil)", - ve(Float64(nil, nil)), - ve(float64(0.0), ErrNil), - }, - { - "uint64(1)", - ve(Uint64(int64(1), nil)), - ve(uint64(1), nil), - }, - { - "uint64(-1)", - ve(Uint64(int64(-1), nil)), - ve(uint64(0), errNegativeInt), - }, -} - -func TestReply(t *testing.T) { - for _, rt := range replyTests { - if errors.Cause(rt.actual.err) != rt.expected.err { - t.Errorf("%s returned err %v, want %v", rt.name, rt.actual.err, rt.expected.err) - continue - } - if !reflect.DeepEqual(rt.actual.v, rt.expected.v) { - t.Errorf("%s=%+v, want %+v", rt.name, rt.actual.v, rt.expected.v) - } - } -} - -// dial wraps DialDefaultServer() with a more suitable function name for examples. -func dial() (Conn, error) { - return DialDefaultServer() -} - -func ExampleBool() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - - c.Do("SET", "foo", 1) - exists, _ := Bool(c.Do("EXISTS", "foo")) - fmt.Printf("%#v\n", exists) - // Output: - // true -} - -func ExampleInt() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - - c.Do("SET", "k1", 1) - n, _ := Int(c.Do("GET", "k1")) - fmt.Printf("%#v\n", n) - n, _ = Int(c.Do("INCR", "k1")) - fmt.Printf("%#v\n", n) - // Output: - // 1 - // 2 -} - -func ExampleInts() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - - c.Do("SADD", "set_with_integers", 4, 5, 6) - ints, _ := Ints(c.Do("SMEMBERS", "set_with_integers")) - fmt.Printf("%#v\n", ints) - // Output: - // []int{4, 5, 6} -} - -func ExampleString() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - - c.Do("SET", "hello", "world") - s, _ := String(c.Do("GET", "hello")) - fmt.Printf("%#v", s) - // Output: - // "world" -} diff --git a/pkg/cache/redis/scan.go b/pkg/cache/redis/scan.go deleted file mode 100644 index 6414a3b43..000000000 --- a/pkg/cache/redis/scan.go +++ /dev/null @@ -1,558 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - - pkgerr "github.com/pkg/errors" -) - -func ensureLen(d reflect.Value, n int) { - if n > d.Cap() { - d.Set(reflect.MakeSlice(d.Type(), n, n)) - } else { - d.SetLen(n) - } -} - -func cannotConvert(d reflect.Value, s interface{}) error { - var sname string - switch s.(type) { - case string: - sname = "Redis simple string" - case Error: - sname = "Redis error" - case int64: - sname = "Redis integer" - case []byte: - sname = "Redis bulk string" - case []interface{}: - sname = "Redis array" - default: - sname = reflect.TypeOf(s).String() - } - return pkgerr.Errorf("cannot convert from %s to %s", sname, d.Type()) -} - -func convertAssignBulkString(d reflect.Value, s []byte) (err error) { - switch d.Type().Kind() { - case reflect.Float32, reflect.Float64: - var x float64 - x, err = strconv.ParseFloat(string(s), d.Type().Bits()) - d.SetFloat(x) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var x int64 - x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) - d.SetInt(x) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - var x uint64 - x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) - d.SetUint(x) - case reflect.Bool: - var x bool - x, err = strconv.ParseBool(string(s)) - d.SetBool(x) - case reflect.String: - d.SetString(string(s)) - case reflect.Slice: - if d.Type().Elem().Kind() != reflect.Uint8 { - err = cannotConvert(d, s) - } else { - d.SetBytes(s) - } - default: - err = cannotConvert(d, s) - } - err = pkgerr.WithStack(err) - return -} - -func convertAssignInt(d reflect.Value, s int64) (err error) { - switch d.Type().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - d.SetInt(s) - if d.Int() != s { - err = strconv.ErrRange - d.SetInt(0) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if s < 0 { - err = strconv.ErrRange - } else { - x := uint64(s) - d.SetUint(x) - if d.Uint() != x { - err = strconv.ErrRange - d.SetUint(0) - } - } - case reflect.Bool: - d.SetBool(s != 0) - default: - err = cannotConvert(d, s) - } - err = pkgerr.WithStack(err) - return -} - -func convertAssignValue(d reflect.Value, s interface{}) (err error) { - switch s := s.(type) { - case []byte: - err = convertAssignBulkString(d, s) - case int64: - err = convertAssignInt(d, s) - default: - err = cannotConvert(d, s) - } - return err -} - -func convertAssignArray(d reflect.Value, s []interface{}) error { - if d.Type().Kind() != reflect.Slice { - return cannotConvert(d, s) - } - ensureLen(d, len(s)) - for i := 0; i < len(s); i++ { - if err := convertAssignValue(d.Index(i), s[i]); err != nil { - return err - } - } - return nil -} - -func convertAssign(d interface{}, s interface{}) (err error) { - // Handle the most common destination types using type switches and - // fall back to reflection for all other types. - switch s := s.(type) { - case nil: - // ingore - case []byte: - switch d := d.(type) { - case *string: - *d = string(s) - case *int: - *d, err = strconv.Atoi(string(s)) - case *bool: - *d, err = strconv.ParseBool(string(s)) - case *[]byte: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignBulkString(d.Elem(), s) - } - } - case int64: - switch d := d.(type) { - case *int: - x := int(s) - if int64(x) != s { - err = strconv.ErrRange - x = 0 - } - *d = x - case *bool: - *d = s != 0 - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignInt(d.Elem(), s) - } - } - case string: - switch d := d.(type) { - case *string: - *d = string(s) - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - case []interface{}: - switch d := d.(type) { - case *[]interface{}: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignArray(d.Elem(), s) - } - } - case Error: - err = s - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - err = pkgerr.WithStack(err) - return -} - -// Scan copies from src to the values pointed at by dest. -// -// The values pointed at by dest must be an integer, float, boolean, string, -// []byte, interface{} or slices of these types. Scan uses the standard strconv -// package to convert bulk strings to numeric and boolean types. -// -// If a dest value is nil, then the corresponding src value is skipped. -// -// If a src element is nil, then the corresponding dest value is not modified. -// -// To enable easy use of Scan in a loop, Scan returns the slice of src -// following the copied values. -func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { - if len(src) < len(dest) { - return nil, pkgerr.New("redigo.Scan: array short") - } - var err error - for i, d := range dest { - err = convertAssign(d, src[i]) - if err != nil { - err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) - break - } - } - return src[len(dest):], err -} - -type fieldSpec struct { - name string - index []int - omitEmpty bool -} - -type structSpec struct { - m map[string]*fieldSpec - l []*fieldSpec -} - -func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { - return ss.m[string(name)] -} - -func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - switch { - case f.PkgPath != "" && !f.Anonymous: - // Ignore unexported fields. - case f.Anonymous: - // TODO: Handle pointers. Requires change to decoder and - // protection against infinite recursion. - if f.Type.Kind() == reflect.Struct { - compileStructSpec(f.Type, depth, append(index, i), ss) - } - default: - fs := &fieldSpec{name: f.Name} - tag := f.Tag.Get("redis") - p := strings.Split(tag, ",") - if len(p) > 0 { - if p[0] == "-" { - continue - } - if len(p[0]) > 0 { - fs.name = p[0] - } - for _, s := range p[1:] { - switch s { - case "omitempty": - fs.omitEmpty = true - default: - panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) - } - } - } - d, found := depth[fs.name] - if !found { - d = 1 << 30 - } - switch { - case len(index) == d: - // At same depth, remove from result. - delete(ss.m, fs.name) - j := 0 - for i1 := 0; i1 < len(ss.l); i1++ { - if fs.name != ss.l[i1].name { - ss.l[j] = ss.l[i1] - j++ - } - } - ss.l = ss.l[:j] - case len(index) < d: - fs.index = make([]int, len(index)+1) - copy(fs.index, index) - fs.index[len(index)] = i - depth[fs.name] = len(index) - ss.m[fs.name] = fs - ss.l = append(ss.l, fs) - } - } - } -} - -var ( - structSpecMutex sync.RWMutex - structSpecCache = make(map[reflect.Type]*structSpec) -) - -func structSpecForType(t reflect.Type) *structSpec { - structSpecMutex.RLock() - ss, found := structSpecCache[t] - structSpecMutex.RUnlock() - if found { - return ss - } - - structSpecMutex.Lock() - defer structSpecMutex.Unlock() - ss, found = structSpecCache[t] - if found { - return ss - } - - ss = &structSpec{m: make(map[string]*fieldSpec)} - compileStructSpec(t, make(map[string]int), nil, ss) - structSpecCache[t] = ss - return ss -} - -var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") - -// ScanStruct scans alternating names and values from src to a struct. The -// HGETALL and CONFIG GET commands return replies in this format. -// -// ScanStruct uses exported field names to match values in the response. Use -// 'redis' field tag to override the name: -// -// Field int `redis:"myName"` -// -// Fields with the tag redis:"-" are ignored. -// -// Integer, float, boolean, string and []byte fields are supported. Scan uses the -// standard strconv package to convert bulk string values to numeric and -// boolean types. -// -// If a src element is nil, then the corresponding field is not modified. -func ScanStruct(src []interface{}, dest interface{}) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return pkgerr.WithStack(errScanStructValue) - } - d = d.Elem() - if d.Kind() != reflect.Struct { - return pkgerr.WithStack(errScanStructValue) - } - ss := structSpecForType(d.Type()) - - if len(src)%2 != 0 { - return pkgerr.New("redigo.ScanStruct: number of values not a multiple of 2") - } - - for i := 0; i < len(src); i += 2 { - s := src[i+1] - if s == nil { - continue - } - name, ok := src[i].([]byte) - if !ok { - return pkgerr.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) - } - fs := ss.fieldSpec(name) - if fs == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return pkgerr.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) - } - } - return nil -} - -var ( - errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") -) - -// ScanSlice scans src to the slice pointed to by dest. The elements the dest -// slice must be integer, float, boolean, string, struct or pointer to struct -// values. -// -// Struct fields must be integer, float, boolean or string values. All struct -// fields are used unless a subset is specified using fieldNames. -func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return pkgerr.WithStack(errScanSliceValue) - } - d = d.Elem() - if d.Kind() != reflect.Slice { - return pkgerr.WithStack(errScanSliceValue) - } - - isPtr := false - t := d.Type().Elem() - if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { - isPtr = true - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - ensureLen(d, len(src)) - for i, s := range src { - if s == nil { - continue - } - if err := convertAssignValue(d.Index(i), s); err != nil { - return pkgerr.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) - } - } - return nil - } - - ss := structSpecForType(t) - fss := ss.l - if len(fieldNames) > 0 { - fss = make([]*fieldSpec, len(fieldNames)) - for i, name := range fieldNames { - fss[i] = ss.m[name] - if fss[i] == nil { - return pkgerr.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) - } - } - } - - if len(fss) == 0 { - return pkgerr.New("redigo.ScanSlice: no struct fields") - } - - n := len(src) / len(fss) - if n*len(fss) != len(src) { - return pkgerr.New("redigo.ScanSlice: length not a multiple of struct field count") - } - - ensureLen(d, n) - for i := 0; i < n; i++ { - d1 := d.Index(i) - if isPtr { - if d1.IsNil() { - d1.Set(reflect.New(t)) - } - d1 = d1.Elem() - } - for j, fs := range fss { - s := src[i*len(fss)+j] - if s == nil { - continue - } - if err := convertAssignValue(d1.FieldByIndex(fs.index), s); err != nil { - return pkgerr.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) - } - } - } - return nil -} - -// Args is a helper for constructing command arguments from structured values. -type Args []interface{} - -// Add returns the result of appending value to args. -func (args Args) Add(value ...interface{}) Args { - return append(args, value...) -} - -// AddFlat returns the result of appending the flattened value of v to args. -// -// Maps are flattened by appending the alternating keys and map values to args. -// -// Slices are flattened by appending the slice elements to args. -// -// Structs are flattened by appending the alternating names and values of -// exported fields to args. If v is a nil struct pointer, then nothing is -// appended. The 'redis' field tag overrides struct field names. See ScanStruct -// for more information on the use of the 'redis' field tag. -// -// Other types are appended to args as is. -func (args Args) AddFlat(v interface{}) Args { - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Struct: - args = flattenStruct(args, rv) - case reflect.Slice: - for i := 0; i < rv.Len(); i++ { - args = append(args, rv.Index(i).Interface()) - } - case reflect.Map: - for _, k := range rv.MapKeys() { - args = append(args, k.Interface(), rv.MapIndex(k).Interface()) - } - case reflect.Ptr: - if rv.Type().Elem().Kind() == reflect.Struct { - if !rv.IsNil() { - args = flattenStruct(args, rv.Elem()) - } - } else { - args = append(args, v) - } - default: - args = append(args, v) - } - return args -} - -func flattenStruct(args Args, v reflect.Value) Args { - ss := structSpecForType(v.Type()) - for _, fs := range ss.l { - fv := v.FieldByIndex(fs.index) - if fs.omitEmpty { - var empty = false - switch fv.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - empty = fv.Len() == 0 - case reflect.Bool: - empty = !fv.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - empty = fv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - empty = fv.Uint() == 0 - case reflect.Float32, reflect.Float64: - empty = fv.Float() == 0 - case reflect.Interface, reflect.Ptr: - empty = fv.IsNil() - } - if empty { - continue - } - } - args = append(args, fs.name, fv.Interface()) - } - return args -} diff --git a/pkg/cache/redis/scan_test.go b/pkg/cache/redis/scan_test.go deleted file mode 100644 index 119cc61d3..000000000 --- a/pkg/cache/redis/scan_test.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "fmt" - "math" - "reflect" - "testing" -) - -var scanConversionTests = []struct { - src interface{} - dest interface{} -}{ - {[]byte("-inf"), math.Inf(-1)}, - {[]byte("+inf"), math.Inf(1)}, - {[]byte("0"), float64(0)}, - {[]byte("3.14159"), float64(3.14159)}, - {[]byte("3.14"), float32(3.14)}, - {[]byte("-100"), int(-100)}, - {[]byte("101"), int(101)}, - {int64(102), int(102)}, - {[]byte("103"), uint(103)}, - {int64(104), uint(104)}, - {[]byte("105"), int8(105)}, - {int64(106), int8(106)}, - {[]byte("107"), uint8(107)}, - {int64(108), uint8(108)}, - {[]byte("0"), false}, - {int64(0), false}, - {[]byte("f"), false}, - {[]byte("1"), true}, - {int64(1), true}, - {[]byte("t"), true}, - {"hello", "hello"}, - {[]byte("hello"), "hello"}, - {[]byte("world"), []byte("world")}, - {[]interface{}{[]byte("foo")}, []interface{}{[]byte("foo")}}, - {[]interface{}{[]byte("foo")}, []string{"foo"}}, - {[]interface{}{[]byte("hello"), []byte("world")}, []string{"hello", "world"}}, - {[]interface{}{[]byte("bar")}, [][]byte{[]byte("bar")}}, - {[]interface{}{[]byte("1")}, []int{1}}, - {[]interface{}{[]byte("1"), []byte("2")}, []int{1, 2}}, - {[]interface{}{[]byte("1"), []byte("2")}, []float64{1, 2}}, - {[]interface{}{[]byte("1")}, []byte{1}}, - {[]interface{}{[]byte("1")}, []bool{true}}, -} - -func TestScanConversion(t *testing.T) { - for _, tt := range scanConversionTests { - values := []interface{}{tt.src} - dest := reflect.New(reflect.TypeOf(tt.dest)) - values, err := Scan(values, dest.Interface()) - if err != nil { - t.Errorf("Scan(%v) returned error %v", tt, err) - continue - } - if !reflect.DeepEqual(tt.dest, dest.Elem().Interface()) { - t.Errorf("Scan(%v) returned %v values: %v, want %v", tt, dest.Elem().Interface(), values, tt.dest) - } - } -} - -var scanConversionErrorTests = []struct { - src interface{} - dest interface{} -}{ - {[]byte("1234"), byte(0)}, - {int64(1234), byte(0)}, - {[]byte("-1"), byte(0)}, - {int64(-1), byte(0)}, - {[]byte("junk"), false}, - {Error("blah"), false}, -} - -func TestScanConversionError(t *testing.T) { - for _, tt := range scanConversionErrorTests { - values := []interface{}{tt.src} - dest := reflect.New(reflect.TypeOf(tt.dest)) - values, err := Scan(values, dest.Interface()) - if err == nil { - t.Errorf("Scan(%v) did not return error values: %v", tt, values) - } - } -} - -func ExampleScan() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - - c.Send("HMSET", "album:1", "title", "Red", "rating", 5) - c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) - c.Send("HMSET", "album:3", "title", "Beat") - c.Send("LPUSH", "albums", "1") - c.Send("LPUSH", "albums", "2") - c.Send("LPUSH", "albums", "3") - values, err := Values(c.Do("SORT", "albums", - "BY", "album:*->rating", - "GET", "album:*->title", - "GET", "album:*->rating")) - if err != nil { - fmt.Println(err) - return - } - - for len(values) > 0 { - var title string - rating := -1 // initialize to illegal value to detect nil. - values, err = Scan(values, &title, &rating) - if err != nil { - fmt.Println(err) - return - } - if rating == -1 { - fmt.Println(title, "not-rated") - } else { - fmt.Println(title, rating) - } - } - // Output: - // Beat not-rated - // Earthbound 1 - // Red 5 -} - -type s0 struct { - X int - Y int `redis:"y"` - Bt bool -} - -type s1 struct { - X int `redis:"-"` - I int `redis:"i"` - U uint `redis:"u"` - S string `redis:"s"` - P []byte `redis:"p"` - B bool `redis:"b"` - Bt bool - Bf bool - s0 -} - -var scanStructTests = []struct { - title string - reply []string - value interface{} -}{ - {"basic", - []string{"i", "-1234", "u", "5678", "s", "hello", "p", "world", "b", "t", "Bt", "1", "Bf", "0", "X", "123", "y", "456"}, - &s1{I: -1234, U: 5678, S: "hello", P: []byte("world"), B: true, Bt: true, Bf: false, s0: s0{X: 123, Y: 456}}, - }, -} - -func TestScanStruct(t *testing.T) { - for _, tt := range scanStructTests { - var reply []interface{} - for _, v := range tt.reply { - reply = append(reply, []byte(v)) - } - - value := reflect.New(reflect.ValueOf(tt.value).Type().Elem()) - - if err := ScanStruct(reply, value.Interface()); err != nil { - t.Fatalf("ScanStruct(%s) returned error %v", tt.title, err) - } - - if !reflect.DeepEqual(value.Interface(), tt.value) { - t.Fatalf("ScanStruct(%s) returned %v, want %v", tt.title, value.Interface(), tt.value) - } - } -} - -func TestBadScanStructArgs(t *testing.T) { - x := []interface{}{"A", "b"} - test := func(v interface{}) { - if err := ScanStruct(x, v); err == nil { - t.Errorf("Expect error for ScanStruct(%T, %T)", x, v) - } - } - - test(nil) - - var v0 *struct{} - test(v0) - - var v1 int - test(&v1) - - x = x[:1] - v2 := struct{ A string }{} - test(&v2) -} - -var scanSliceTests = []struct { - src []interface{} - fieldNames []string - ok bool - dest interface{} -}{ - { - []interface{}{[]byte("1"), nil, []byte("-1")}, - nil, - true, - []int{1, 0, -1}, - }, - { - []interface{}{[]byte("1"), nil, []byte("2")}, - nil, - true, - []uint{1, 0, 2}, - }, - { - []interface{}{[]byte("-1")}, - nil, - false, - []uint{1}, - }, - { - []interface{}{[]byte("hello"), nil, []byte("world")}, - nil, - true, - [][]byte{[]byte("hello"), nil, []byte("world")}, - }, - { - []interface{}{[]byte("hello"), nil, []byte("world")}, - nil, - true, - []string{"hello", "", "world"}, - }, - { - []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, - nil, - true, - []struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, - }, - { - []interface{}{[]byte("a1"), []byte("b1")}, - nil, - false, - []struct{ A, B, C string }{{"a1", "b1", ""}}, - }, - { - []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, - nil, - true, - []*struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, - }, - { - []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, - []string{"A", "B"}, - true, - []struct{ A, C, B string }{{"a1", "", "b1"}, {"a2", "", "b2"}}, - }, - { - []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, - nil, - false, - []struct{}{}, - }, -} - -func TestScanSlice(t *testing.T) { - for _, tt := range scanSliceTests { - typ := reflect.ValueOf(tt.dest).Type() - dest := reflect.New(typ) - - err := ScanSlice(tt.src, dest.Interface(), tt.fieldNames...) - if tt.ok != (err == nil) { - t.Errorf("ScanSlice(%v, []%s, %v) returned error %v", tt.src, typ, tt.fieldNames, err) - continue - } - if tt.ok && !reflect.DeepEqual(dest.Elem().Interface(), tt.dest) { - t.Errorf("ScanSlice(src, []%s) returned %#v, want %#v", typ, dest.Elem().Interface(), tt.dest) - } - } -} - -func ExampleScanSlice() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - - c.Send("HMSET", "album:1", "title", "Red", "rating", 5) - c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) - c.Send("HMSET", "album:3", "title", "Beat", "rating", 4) - c.Send("LPUSH", "albums", "1") - c.Send("LPUSH", "albums", "2") - c.Send("LPUSH", "albums", "3") - values, err := Values(c.Do("SORT", "albums", - "BY", "album:*->rating", - "GET", "album:*->title", - "GET", "album:*->rating")) - if err != nil { - fmt.Println(err) - return - } - - var albums []struct { - Title string - Rating int - } - if err := ScanSlice(values, &albums); err != nil { - fmt.Println(err) - return - } - fmt.Printf("%v\n", albums) - // Output: - // [{Earthbound 1} {Beat 4} {Red 5}] -} - -var argsTests = []struct { - title string - actual Args - expected Args -}{ - {"struct ptr", - Args{}.AddFlat(&struct { - I int `redis:"i"` - U uint `redis:"u"` - S string `redis:"s"` - P []byte `redis:"p"` - M map[string]string `redis:"m"` - Bt bool - Bf bool - }{ - -1234, 5678, "hello", []byte("world"), map[string]string{"hello": "world"}, true, false, - }), - Args{"i", int(-1234), "u", uint(5678), "s", "hello", "p", []byte("world"), "m", map[string]string{"hello": "world"}, "Bt", true, "Bf", false}, - }, - {"struct", - Args{}.AddFlat(struct{ I int }{123}), - Args{"I", 123}, - }, - {"slice", - Args{}.Add(1).AddFlat([]string{"a", "b", "c"}).Add(2), - Args{1, "a", "b", "c", 2}, - }, - {"struct omitempty", - Args{}.AddFlat(&struct { - I int `redis:"i,omitempty"` - U uint `redis:"u,omitempty"` - S string `redis:"s,omitempty"` - P []byte `redis:"p,omitempty"` - M map[string]string `redis:"m,omitempty"` - Bt bool `redis:"Bt,omitempty"` - Bf bool `redis:"Bf,omitempty"` - }{ - 0, 0, "", []byte{}, map[string]string{}, true, false, - }), - Args{"Bt", true}, - }, -} - -func TestArgs(t *testing.T) { - for _, tt := range argsTests { - if !reflect.DeepEqual(tt.actual, tt.expected) { - t.Fatalf("%s is %v, want %v", tt.title, tt.actual, tt.expected) - } - } -} - -func ExampleArgs() { - c, err := dial() - if err != nil { - fmt.Println(err) - return - } - defer c.Close() - - var p1, p2 struct { - Title string `redis:"title"` - Author string `redis:"author"` - Body string `redis:"body"` - } - - p1.Title = "Example" - p1.Author = "Gary" - p1.Body = "Hello" - - if _, err := c.Do("HMSET", Args{}.Add("id1").AddFlat(&p1)...); err != nil { - fmt.Println(err) - return - } - - m := map[string]string{ - "title": "Example2", - "author": "Steve", - "body": "Map", - } - - if _, err := c.Do("HMSET", Args{}.Add("id2").AddFlat(m)...); err != nil { - fmt.Println(err) - return - } - - for _, id := range []string{"id1", "id2"} { - v, err := Values(c.Do("HGETALL", id)) - if err != nil { - fmt.Println(err) - return - } - - if err := ScanStruct(v, &p2); err != nil { - fmt.Println(err) - return - } - - fmt.Printf("%+v\n", p2) - } - - // Output: - // {Title:Example Author:Gary Body:Hello} - // {Title:Example2 Author:Steve Body:Map} -} diff --git a/pkg/cache/redis/script.go b/pkg/cache/redis/script.go deleted file mode 100644 index 78605a90a..000000000 --- a/pkg/cache/redis/script.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "crypto/sha1" - "encoding/hex" - "io" - "strings" -) - -// Script encapsulates the source, hash and key count for a Lua script. See -// http://redis.io/commands/eval for information on scripts in Redis. -type Script struct { - keyCount int - src string - hash string -} - -// NewScript returns a new script object. If keyCount is greater than or equal -// to zero, then the count is automatically inserted in the EVAL command -// argument list. If keyCount is less than zero, then the application supplies -// the count as the first value in the keysAndArgs argument to the Do, Send and -// SendHash methods. -func NewScript(keyCount int, src string) *Script { - h := sha1.New() - io.WriteString(h, src) - return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} -} - -func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { - var args []interface{} - if s.keyCount < 0 { - args = make([]interface{}, 1+len(keysAndArgs)) - args[0] = spec - copy(args[1:], keysAndArgs) - } else { - args = make([]interface{}, 2+len(keysAndArgs)) - args[0] = spec - args[1] = s.keyCount - copy(args[2:], keysAndArgs) - } - return args -} - -// Do evaluates the script. Under the covers, Do optimistically evaluates the -// script using the EVALSHA command. If the command fails because the script is -// not loaded, then Do evaluates the script using the EVAL command (thus -// causing the script to load). -func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { - v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) - if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { - v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) - } - return v, err -} - -// SendHash evaluates the script without waiting for the reply. The script is -// evaluated with the EVALSHA command. The application must ensure that the -// script is loaded by a previous call to Send, Do or Load methods. -func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) -} - -// Send evaluates the script without waiting for the reply. -func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVAL", s.args(s.src, keysAndArgs)...) -} - -// Load loads the script without evaluating it. -func (s *Script) Load(c Conn) error { - _, err := c.Do("SCRIPT", "LOAD", s.src) - return err -} diff --git a/pkg/cache/redis/script_test.go b/pkg/cache/redis/script_test.go deleted file mode 100644 index 9c2c3e8a7..000000000 --- a/pkg/cache/redis/script_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "fmt" - "reflect" - "testing" - "time" -) - -func ExampleScript() { - c, err := Dial("tcp", ":6379") - if err != nil { - // handle error - } - defer c.Close() - // Initialize a package-level variable with a script. - var getScript = NewScript(1, `return call('get', KEYS[1])`) - - // In a function, use the script Do method to evaluate the script. The Do - // method optimistically uses the EVALSHA command. If the script is not - // loaded, then the Do method falls back to the EVAL command. - if _, err = getScript.Do(c, "foo"); err != nil { - // handle error - } -} - -func TestScript(t *testing.T) { - c, err := DialDefaultServer() - if err != nil { - t.Fatalf("error connection to database, %v", err) - } - defer c.Close() - - // To test fall back in Do, we make script unique by adding comment with current time. - script := fmt.Sprintf("--%d\nreturn {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", time.Now().UnixNano()) - s := NewScript(2, script) - reply := []interface{}{[]byte("key1"), []byte("key2"), []byte("arg1"), []byte("arg2")} - - v, err := s.Do(c, "key1", "key2", "arg1", "arg2") - if err != nil { - t.Errorf("s.Do(c, ...) returned %v", err) - } - - if !reflect.DeepEqual(v, reply) { - t.Errorf("s.Do(c, ..); = %v, want %v", v, reply) - } - - err = s.Load(c) - if err != nil { - t.Errorf("s.Load(c) returned %v", err) - } - - err = s.SendHash(c, "key1", "key2", "arg1", "arg2") - if err != nil { - t.Errorf("s.SendHash(c, ...) returned %v", err) - } - - err = c.Flush() - if err != nil { - t.Errorf("c.Flush() returned %v", err) - } - - v, err = c.Receive() - if err != nil { - t.Errorf("c.Receive() returned %v", err) - } - if !reflect.DeepEqual(v, reply) { - t.Errorf("s.SendHash(c, ..); c.Receive() = %v, want %v", v, reply) - } - - err = s.Send(c, "key1", "key2", "arg1", "arg2") - if err != nil { - t.Errorf("s.Send(c, ...) returned %v", err) - } - - err = c.Flush() - if err != nil { - t.Errorf("c.Flush() returned %v", err) - } - - v, err = c.Receive() - if err != nil { - t.Errorf("c.Receive() returned %v", err) - } - if !reflect.DeepEqual(v, reply) { - t.Errorf("s.Send(c, ..); c.Receive() = %v, want %v", v, reply) - } -} diff --git a/pkg/cache/redis/test/docker-compose.yaml b/pkg/cache/redis/test/docker-compose.yaml deleted file mode 100644 index 4bb1f4552..000000000 --- a/pkg/cache/redis/test/docker-compose.yaml +++ /dev/null @@ -1,12 +0,0 @@ -version: "3.7" - -services: - redis: - image: redis - ports: - - 6379:6379 - healthcheck: - test: ["CMD", "redis-cli","ping"] - interval: 20s - timeout: 1s - retries: 20 \ No newline at end of file diff --git a/pkg/cache/redis/trace.go b/pkg/cache/redis/trace.go deleted file mode 100644 index 656b616c0..000000000 --- a/pkg/cache/redis/trace.go +++ /dev/null @@ -1,152 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "time" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/trace" -) - -const ( - _traceComponentName = "library/cache/redis" - _tracePeerService = "redis" - _traceSpanKind = "client" -) - -var _internalTags = []trace.Tag{ - trace.TagString(trace.TagSpanKind, _traceSpanKind), - trace.TagString(trace.TagComponent, _traceComponentName), - trace.TagString(trace.TagPeerService, _tracePeerService), -} - -type traceConn struct { - // tr parent trace. - tr trace.Trace - // trPipe for pipeline, if trPipe != nil meaning on pipeline. - trPipe trace.Trace - - // connTag include e.g. ip,port - connTags []trace.Tag - - // origin redis conn - Conn - pending int - // TODO: split slow log from trace. - slowLogThreshold time.Duration -} - -func (t *traceConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - statement := getStatement(commandName, args...) - defer t.slowLog(statement, time.Now()) - - // NOTE: ignored empty commandName - // current sdk will Do empty command after pipeline finished - if commandName == "" { - t.pending = 0 - t.trPipe = nil - return t.Conn.Do(commandName, args...) - } - if t.tr == nil { - return t.Conn.Do(commandName, args...) - } - tr := t.tr.Fork("", "Redis:"+commandName) - tr.SetTag(_internalTags...) - tr.SetTag(t.connTags...) - tr.SetTag(trace.TagString(trace.TagDBStatement, statement)) - reply, err = t.Conn.Do(commandName, args...) - tr.Finish(&err) - return -} - -func (t *traceConn) Send(commandName string, args ...interface{}) (err error) { - statement := getStatement(commandName, args...) - defer t.slowLog(statement, time.Now()) - t.pending++ - if t.tr == nil { - return t.Conn.Send(commandName, args...) - } - - if t.trPipe == nil { - t.trPipe = t.tr.Fork("", "Redis:Pipeline") - t.trPipe.SetTag(_internalTags...) - t.trPipe.SetTag(t.connTags...) - } - t.trPipe.SetLog( - trace.Log(trace.LogEvent, "Send"), - trace.Log("db.statement", statement), - ) - if err = t.Conn.Send(commandName, args...); err != nil { - t.trPipe.SetTag(trace.TagBool(trace.TagError, true)) - t.trPipe.SetLog( - trace.Log(trace.LogEvent, "Send Fail"), - trace.Log(trace.LogMessage, err.Error()), - ) - } - return err -} - -func (t *traceConn) Flush() error { - defer t.slowLog("Flush", time.Now()) - if t.trPipe == nil { - return t.Conn.Flush() - } - t.trPipe.SetLog(trace.Log(trace.LogEvent, "Flush")) - err := t.Conn.Flush() - if err != nil { - t.trPipe.SetTag(trace.TagBool(trace.TagError, true)) - t.trPipe.SetLog( - trace.Log(trace.LogEvent, "Flush Fail"), - trace.Log(trace.LogMessage, err.Error()), - ) - } - return err -} - -func (t *traceConn) Receive() (reply interface{}, err error) { - defer t.slowLog("Receive", time.Now()) - if t.trPipe == nil { - return t.Conn.Receive() - } - t.trPipe.SetLog(trace.Log(trace.LogEvent, "Receive")) - reply, err = t.Conn.Receive() - if err != nil { - t.trPipe.SetTag(trace.TagBool(trace.TagError, true)) - t.trPipe.SetLog( - trace.Log(trace.LogEvent, "Receive Fail"), - trace.Log(trace.LogMessage, err.Error()), - ) - } - if t.pending > 0 { - t.pending-- - } - if t.pending == 0 { - t.trPipe.Finish(nil) - t.trPipe = nil - } - return reply, err -} - -func (t *traceConn) WithContext(ctx context.Context) Conn { - t.Conn = t.Conn.WithContext(ctx) - t.tr, _ = trace.FromContext(ctx) - t.pending = 0 - t.trPipe = nil - return t -} - -func (t *traceConn) slowLog(statement string, now time.Time) { - du := time.Since(now) - if du > t.slowLogThreshold { - log.Warn("%s slow log statement: %s time: %v", _tracePeerService, statement, du) - } -} - -func getStatement(commandName string, args ...interface{}) (res string) { - res = commandName - if len(args) > 0 { - res = fmt.Sprintf("%s %v", commandName, args[0]) - } - return -} diff --git a/pkg/cache/redis/trace_test.go b/pkg/cache/redis/trace_test.go deleted file mode 100644 index 8d438b72b..000000000 --- a/pkg/cache/redis/trace_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package redis - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/go-kratos/kratos/pkg/net/trace" -) - -const testTraceSlowLogThreshold = 250 * time.Millisecond - -type mockTrace struct { - tags []trace.Tag - logs []trace.LogField - perr *error - operationName string - finished bool -} - -func (m *mockTrace) Fork(serviceName string, operationName string) trace.Trace { - m.operationName = operationName - return m -} -func (m *mockTrace) Follow(serviceName string, operationName string) trace.Trace { - panic("not implemented") -} -func (m *mockTrace) Finish(err *error) { - m.perr = err - m.finished = true -} -func (m *mockTrace) SetTag(tags ...trace.Tag) trace.Trace { - m.tags = append(m.tags, tags...) - return m -} -func (m *mockTrace) SetLog(logs ...trace.LogField) trace.Trace { - m.logs = append(m.logs, logs...) - return m -} -func (m *mockTrace) Visit(fn func(k, v string)) {} -func (m *mockTrace) SetTitle(title string) {} -func (m *mockTrace) TraceID() string { return "" } - -type mockConn struct{} - -func (c *mockConn) Close() error { return nil } -func (c *mockConn) Err() error { return nil } -func (c *mockConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - return nil, nil -} -func (c *mockConn) Send(commandName string, args ...interface{}) error { return nil } -func (c *mockConn) Flush() error { return nil } -func (c *mockConn) Receive() (reply interface{}, err error) { return nil, nil } -func (c *mockConn) WithContext(context.Context) Conn { return c } - -func TestTraceDo(t *testing.T) { - tr := &mockTrace{} - ctx := trace.NewContext(context.Background(), tr) - tc := &traceConn{Conn: &mockConn{}, slowLogThreshold: testTraceSlowLogThreshold} - conn := tc.WithContext(ctx) - - conn.Do("GET", "test") - - assert.Equal(t, "Redis:GET", tr.operationName) - assert.NotEmpty(t, tr.tags) - assert.True(t, tr.finished) -} - -func TestTraceDoErr(t *testing.T) { - tr := &mockTrace{} - ctx := trace.NewContext(context.Background(), tr) - tc := &traceConn{Conn: MockErr{Error: fmt.Errorf("hhhhhhh")}, - slowLogThreshold: testTraceSlowLogThreshold} - conn := tc.WithContext(ctx) - - conn.Do("GET", "test") - - assert.Equal(t, "Redis:GET", tr.operationName) - assert.True(t, tr.finished) - assert.NotNil(t, *tr.perr) -} - -func TestTracePipeline(t *testing.T) { - tr := &mockTrace{} - ctx := trace.NewContext(context.Background(), tr) - tc := &traceConn{Conn: &mockConn{}, slowLogThreshold: testTraceSlowLogThreshold} - conn := tc.WithContext(ctx) - - N := 2 - for i := 0; i < N; i++ { - conn.Send("GET", "hello, world") - } - conn.Flush() - for i := 0; i < N; i++ { - conn.Receive() - } - - assert.Equal(t, "Redis:Pipeline", tr.operationName) - assert.NotEmpty(t, tr.tags) - assert.NotEmpty(t, tr.logs) - assert.True(t, tr.finished) -} - -func TestTracePipelineErr(t *testing.T) { - tr := &mockTrace{} - ctx := trace.NewContext(context.Background(), tr) - tc := &traceConn{Conn: MockErr{Error: fmt.Errorf("hahah")}, - slowLogThreshold: testTraceSlowLogThreshold} - conn := tc.WithContext(ctx) - - N := 2 - for i := 0; i < N; i++ { - conn.Send("GET", "hello, world") - } - conn.Flush() - for i := 0; i < N; i++ { - conn.Receive() - } - - assert.Equal(t, "Redis:Pipeline", tr.operationName) - assert.NotEmpty(t, tr.tags) - assert.NotEmpty(t, tr.logs) - assert.True(t, tr.finished) - var isError bool - for _, tag := range tr.tags { - if tag.Key == "error" { - isError = true - } - } - assert.True(t, isError) -} - -func TestSendStatement(t *testing.T) { - tr := &mockTrace{} - ctx := trace.NewContext(context.Background(), tr) - tc := &traceConn{Conn: MockErr{Error: fmt.Errorf("hahah")}, - slowLogThreshold: testTraceSlowLogThreshold} - conn := tc.WithContext(ctx) - conn.Send("SET", "hello", "test") - conn.Flush() - conn.Receive() - - assert.Equal(t, "Redis:Pipeline", tr.operationName) - assert.NotEmpty(t, tr.tags) - assert.NotEmpty(t, tr.logs) - assert.Equal(t, "event", tr.logs[0].Key) - assert.Equal(t, "Send", tr.logs[0].Value) - assert.Equal(t, "db.statement", tr.logs[1].Key) - assert.Equal(t, "SET hello", tr.logs[1].Value) - assert.True(t, tr.finished) - var isError bool - for _, tag := range tr.tags { - if tag.Key == "error" { - isError = true - } - } - assert.True(t, isError) -} - -func TestDoStatement(t *testing.T) { - tr := &mockTrace{} - ctx := trace.NewContext(context.Background(), tr) - tc := &traceConn{Conn: MockErr{Error: fmt.Errorf("hahah")}, - slowLogThreshold: testTraceSlowLogThreshold} - conn := tc.WithContext(ctx) - conn.Do("SET", "hello", "test") - - assert.Equal(t, "Redis:SET", tr.operationName) - assert.Equal(t, "SET hello", tr.tags[len(tr.tags)-1].Value) - assert.True(t, tr.finished) -} - -func BenchmarkTraceConn(b *testing.B) { - for i := 0; i < b.N; i++ { - c, err := DialDefaultServer() - if err != nil { - b.Fatal(err) - } - t := &traceConn{ - Conn: c, - connTags: []trace.Tag{trace.TagString(trace.TagPeerAddress, "abc")}, - slowLogThreshold: 1 * time.Second, - } - c2 := t.WithContext(context.TODO()) - if _, err := c2.Do("PING"); err != nil { - b.Fatal(err) - } - c2.Close() - } -} - -func TestTraceConnPending(t *testing.T) { - c, err := DialDefaultServer() - if err != nil { - t.Fatal(err) - } - tc := &traceConn{ - Conn: c, - connTags: []trace.Tag{trace.TagString(trace.TagPeerAddress, "abc")}, - slowLogThreshold: 1 * time.Second, - } - err = tc.Send("SET", "a", "x") - if err != nil { - t.Fatal(err) - } - tc.Close() - assert.Equal(t, 1, tc.pending) - tc.Do("") - assert.Equal(t, 0, tc.pending) -} diff --git a/pkg/cache/redis/util.go b/pkg/cache/redis/util.go deleted file mode 100644 index aa52597bb..000000000 --- a/pkg/cache/redis/util.go +++ /dev/null @@ -1,17 +0,0 @@ -package redis - -import ( - "context" - "time" -) - -func shrinkDeadline(ctx context.Context, timeout time.Duration) time.Time { - var timeoutTime = time.Now().Add(timeout) - if ctx == nil { - return timeoutTime - } - if deadline, ok := ctx.Deadline(); ok && timeoutTime.After(deadline) { - return deadline - } - return timeoutTime -} diff --git a/pkg/cache/redis/util_test.go b/pkg/cache/redis/util_test.go deleted file mode 100644 index 748b8423e..000000000 --- a/pkg/cache/redis/util_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package redis - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestShrinkDeadline(t *testing.T) { - t.Run("test not deadline", func(t *testing.T) { - timeout := time.Second - timeoutTime := time.Now().Add(timeout) - tm := shrinkDeadline(context.Background(), timeout) - assert.True(t, tm.After(timeoutTime)) - }) - t.Run("test big deadline", func(t *testing.T) { - timeout := time.Second - timeoutTime := time.Now().Add(timeout) - deadlineTime := time.Now().Add(2 * time.Second) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - tm := shrinkDeadline(ctx, timeout) - assert.True(t, tm.After(timeoutTime) && tm.Before(deadlineTime)) - }) - t.Run("test small deadline", func(t *testing.T) { - timeout := time.Second - deadlineTime := time.Now().Add(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cancel() - - tm := shrinkDeadline(ctx, timeout) - assert.True(t, tm.After(deadlineTime) && tm.Before(time.Now().Add(timeout))) - }) -} diff --git a/pkg/conf/dsn/README.md b/pkg/conf/dsn/README.md deleted file mode 100644 index 57b23b585..000000000 --- a/pkg/conf/dsn/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# dsn - -## 项目简介 - -通用数据源地址解析 diff --git a/pkg/conf/dsn/doc.go b/pkg/conf/dsn/doc.go deleted file mode 100644 index d93013d87..000000000 --- a/pkg/conf/dsn/doc.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package dsn implements dsn parse with struct bind -/* -DSN 格式类似 URI, DSN 结构如下图 - - network:[//[username[:password]@]address[:port][,address[:port]]][/path][?query][#fragment] - -与 URI 的主要区别在于 scheme 被替换为 network, host 被替换为 address 并且支持多个 address. -network 与 net 包中 network 意义相同, tcp、udp、unix 等, address 支持多个使用 ',' 分割, 如果 -network 为 unix 等本地 sock 协议则使用 Path, 有且只有一个 - -dsn 包主要提供了 Parse, Bind 和 validate 功能 - -Parse 解析 dsn 字符串成 DSN struct, DSN struct 与 url.URL 几乎完全一样 - -Bind 提供将 DSN 数据绑定到一个 struct 的功能, 通过 tag dsn:"key,[default]" 指定绑定的字段, 目前支持两种类型的数据绑定 - -内置变量 key: - network string tcp, udp, unix 等, 参考 net 包中的 network - username string - password string - address string or []string address 可以绑定到 string 或者 []string, 如果为 string 则取 address 第一个 - -Query: 通过 query.name 可以取到 query 上的数据 - - 数组可以通过传递多个获得 - - array=1&array=2&array3 -> []int `tag:"query.array"` - - struct 支持嵌套 - - foo.sub.name=hello&foo.tm=hello - - struct Foo { - Tm string `dsn:"query.tm"` - Sub struct { - Name string `dsn:"query.name"` - } `dsn:"query.sub"` - } - -默认值: 通过 dsn:"key,[default]" 默认值暂时不支持数组 - -忽略 Bind: 通过 dsn:"-" 忽略 Bind - -自定义 Bind: 可以同时实现 encoding.TextUnmarshaler 自定义 Bind 实现 - -Validate: 参考 https://github.com/go-playground/validator - -使用参考: example_test.go - -DSN 命名规范: - -没有历史遗留的情况下,尽量使用 Address, Network, Username, Password 等命名,代替之前的 Proto 和 Addr 等命名 - -Query 命名参考, 使用驼峰小写开头: - - timeout 通用超时 - dialTimeout 连接建立超时 - readTimeout 读操作超时 - writeTimeout 写操作超时 - readsTimeout 批量读超时 - writesTimeout 批量写超时 -*/ -package dsn diff --git a/pkg/conf/dsn/dsn.go b/pkg/conf/dsn/dsn.go deleted file mode 100644 index 7cb209e2b..000000000 --- a/pkg/conf/dsn/dsn.go +++ /dev/null @@ -1,106 +0,0 @@ -package dsn - -import ( - "net/url" - "reflect" - "strings" - - validator "gopkg.in/go-playground/validator.v9" -) - -var _validator *validator.Validate - -func init() { - _validator = validator.New() -} - -// DSN a DSN represents a parsed DSN as same as url.URL. -type DSN struct { - *url.URL -} - -// Bind dsn to specify struct and validate use use go-playground/validator format -// -// The bind of each struct field can be customized by the format string -// stored under the 'dsn' key in the struct field's tag. The format string -// gives the name of the field, possibly followed by a comma-separated -// list of options. The name may be empty in order to specify options -// without overriding the default field name. -// -// A two type data you can bind to struct -// built-in values, use below keys to bind built-in value -// username -// password -// address -// network -// the value in query string, use query.{name} to bind value in query string -// -// As a special case, if the field tag is "-", the field is always omitted. -// NOTE: that a field with name "-" can still be generated using the tag "-,". -// -// Examples of struct field tags and their meanings: -// // Field bind username -// Field string `dsn:"username"` -// // Field is ignored by this package. -// Field string `dsn:"-"` -// // Field bind value from query -// Field string `dsn:"query.name"` -// -func (d *DSN) Bind(v interface{}) (url.Values, error) { - assignFuncs := make(map[string]assignFunc) - if d.User != nil { - username := d.User.Username() - password, ok := d.User.Password() - if ok { - assignFuncs["password"] = stringsAssignFunc(password) - } - assignFuncs["username"] = stringsAssignFunc(username) - } - assignFuncs["address"] = addressesAssignFunc(d.Addresses()) - assignFuncs["network"] = stringsAssignFunc(d.Scheme) - query, err := bindQuery(d.Query(), v, assignFuncs) - if err != nil { - return nil, err - } - return query, _validator.Struct(v) -} - -func addressesAssignFunc(addresses []string) assignFunc { - return func(v reflect.Value, to tagOpt) error { - if v.Kind() == reflect.String { - if addresses[0] == "" && to.Default != "" { - v.SetString(to.Default) - } else { - v.SetString(addresses[0]) - } - return nil - } - if !(v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.String) { - return &BindTypeError{Value: strings.Join(addresses, ","), Type: v.Type()} - } - vals := reflect.MakeSlice(v.Type(), len(addresses), len(addresses)) - for i, address := range addresses { - vals.Index(i).SetString(address) - } - if v.CanSet() { - v.Set(vals) - } - return nil - } -} - -// Addresses parse host split by ',' -// For Unix networks, return ['path'] -func (d *DSN) Addresses() []string { - switch d.Scheme { - case "unix", "unixgram", "unixpacket": - return []string{d.Path} - } - return strings.Split(d.Host, ",") -} - -// Parse parses rawdsn into a URL structure. -func Parse(rawdsn string) (*DSN, error) { - u, err := url.Parse(rawdsn) - return &DSN{URL: u}, err -} diff --git a/pkg/conf/dsn/dsn_test.go b/pkg/conf/dsn/dsn_test.go deleted file mode 100644 index 2a25acfb5..000000000 --- a/pkg/conf/dsn/dsn_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package dsn - -import ( - "net/url" - "reflect" - "testing" - "time" - - xtime "github.com/go-kratos/kratos/pkg/time" -) - -type config struct { - Network string `dsn:"network"` - Addresses []string `dsn:"address"` - Username string `dsn:"username"` - Password string `dsn:"password"` - Timeout xtime.Duration `dsn:"query.timeout"` - Sub Sub `dsn:"query.sub"` - Def string `dsn:"query.def,hello"` -} - -type Sub struct { - Foo int `dsn:"query.foo"` -} - -func TestBind(t *testing.T) { - var cfg config - rawdsn := "tcp://root:toor@172.12.23.34,178.23.34.45?timeout=1s&sub.foo=1&hello=world" - dsn, err := Parse(rawdsn) - if err != nil { - t.Fatal(err) - } - values, err := dsn.Bind(&cfg) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(values, url.Values{"hello": {"world"}}) { - t.Errorf("unexpect values get %v", values) - } - cfg2 := config{ - Network: "tcp", - Addresses: []string{"172.12.23.34", "178.23.34.45"}, - Password: "toor", - Username: "root", - Sub: Sub{Foo: 1}, - Timeout: xtime.Duration(time.Second), - Def: "hello", - } - if !reflect.DeepEqual(cfg, cfg2) { - t.Errorf("unexpect config get %v, expect %v", cfg, cfg2) - } -} - -type config2 struct { - Network string `dsn:"network"` - Address string `dsn:"address"` - Timeout xtime.Duration `dsn:"query.timeout"` -} - -func TestUnix(t *testing.T) { - var cfg config2 - rawdsn := "unix:///run/xxx.sock?timeout=1s&sub.foo=1&hello=world" - dsn, err := Parse(rawdsn) - if err != nil { - t.Fatal(err) - } - _, err = dsn.Bind(&cfg) - if err != nil { - t.Error(err) - } - cfg2 := config2{ - Network: "unix", - Address: "/run/xxx.sock", - Timeout: xtime.Duration(time.Second), - } - if !reflect.DeepEqual(cfg, cfg2) { - t.Errorf("unexpect config2 get %v, expect %v", cfg, cfg2) - } -} diff --git a/pkg/conf/dsn/example_test.go b/pkg/conf/dsn/example_test.go deleted file mode 100644 index b2ac38421..000000000 --- a/pkg/conf/dsn/example_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package dsn_test - -import ( - "log" - - "github.com/go-kratos/kratos/pkg/conf/dsn" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// Config struct -type Config struct { - Network string `dsn:"network" validate:"required"` - Host string `dsn:"host" validate:"required"` - Username string `dsn:"username" validate:"required"` - Password string `dsn:"password" validate:"required"` - Timeout xtime.Duration `dsn:"query.timeout,1s"` - Offset int `dsn:"query.offset" validate:"gte=0"` -} - -func ExampleParse() { - cfg := &Config{} - d, err := dsn.Parse("tcp://root:toor@172.12.12.23:2233?timeout=10s") - if err != nil { - log.Fatal(err) - } - _, err = d.Bind(cfg) - if err != nil { - log.Fatal(err) - } - log.Printf("%v", cfg) -} diff --git a/pkg/conf/dsn/query.go b/pkg/conf/dsn/query.go deleted file mode 100644 index f622d0b07..000000000 --- a/pkg/conf/dsn/query.go +++ /dev/null @@ -1,422 +0,0 @@ -package dsn - -import ( - "encoding" - "net/url" - "reflect" - "runtime" - "strconv" - "strings" -) - -const ( - _tagID = "dsn" - _queryPrefix = "query." -) - -// InvalidBindError describes an invalid argument passed to DecodeQuery. -// (The argument to DecodeQuery must be a non-nil pointer.) -type InvalidBindError struct { - Type reflect.Type -} - -func (e *InvalidBindError) Error() string { - if e.Type == nil { - return "Bind(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "Bind(non-pointer " + e.Type.String() + ")" - } - return "Bind(nil " + e.Type.String() + ")" -} - -// BindTypeError describes a query value that was -// not appropriate for a value of a specific Go type. -type BindTypeError struct { - Value string - Type reflect.Type -} - -func (e *BindTypeError) Error() string { - return "cannot decode " + e.Value + " into Go value of type " + e.Type.String() -} - -type assignFunc func(v reflect.Value, to tagOpt) error - -func stringsAssignFunc(val string) assignFunc { - return func(v reflect.Value, to tagOpt) error { - if v.Kind() != reflect.String || !v.CanSet() { - return &BindTypeError{Value: "string", Type: v.Type()} - } - if val == "" { - v.SetString(to.Default) - } else { - v.SetString(val) - } - return nil - } -} - -// bindQuery parses url.Values and stores the result in the value pointed to by v. -// if v is nil or not a pointer, bindQuery returns an InvalidDecodeError -func bindQuery(query url.Values, v interface{}, assignFuncs map[string]assignFunc) (url.Values, error) { - if assignFuncs == nil { - assignFuncs = make(map[string]assignFunc) - } - d := decodeState{ - data: query, - used: make(map[string]bool), - assignFuncs: assignFuncs, - } - err := d.decode(v) - ret := d.unused() - return ret, err -} - -type tagOpt struct { - Name string - Default string -} - -func parseTag(tag string) tagOpt { - vs := strings.SplitN(tag, ",", 2) - if len(vs) == 2 { - return tagOpt{Name: vs[0], Default: vs[1]} - } - return tagOpt{Name: vs[0]} -} - -type decodeState struct { - data url.Values - used map[string]bool - assignFuncs map[string]assignFunc -} - -func (d *decodeState) unused() url.Values { - ret := make(url.Values) - for k, v := range d.data { - if !d.used[k] { - ret[k] = v - } - } - return ret -} - -func (d *decodeState) decode(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidBindError{reflect.TypeOf(v)} - } - return d.root(rv) -} - -func (d *decodeState) root(v reflect.Value) error { - var tu encoding.TextUnmarshaler - tu, v = d.indirect(v) - if tu != nil { - return tu.UnmarshalText([]byte(d.data.Encode())) - } - // TODO support map, slice as root - if v.Kind() != reflect.Struct { - return &BindTypeError{Value: d.data.Encode(), Type: v.Type()} - } - tv := v.Type() - for i := 0; i < tv.NumField(); i++ { - fv := v.Field(i) - field := tv.Field(i) - to := parseTag(field.Tag.Get(_tagID)) - if to.Name == "-" { - continue - } - if af, ok := d.assignFuncs[to.Name]; ok { - if err := af(fv, tagOpt{}); err != nil { - return err - } - continue - } - if !strings.HasPrefix(to.Name, _queryPrefix) { - continue - } - to.Name = to.Name[len(_queryPrefix):] - if err := d.value(fv, "", to); err != nil { - return err - } - } - return nil -} - -func combinekey(prefix string, to tagOpt) string { - key := to.Name - if prefix != "" { - key = prefix + "." + key - } - return key -} - -func (d *decodeState) value(v reflect.Value, prefix string, to tagOpt) (err error) { - key := combinekey(prefix, to) - d.used[key] = true - var tu encoding.TextUnmarshaler - tu, v = d.indirect(v) - if tu != nil { - if val, ok := d.data[key]; ok { - return tu.UnmarshalText([]byte(val[0])) - } - if to.Default != "" { - return tu.UnmarshalText([]byte(to.Default)) - } - return - } - switch v.Kind() { - case reflect.Bool: - err = d.valueBool(v, prefix, to) - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - err = d.valueInt64(v, prefix, to) - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - err = d.valueUint64(v, prefix, to) - case reflect.Float32, reflect.Float64: - err = d.valueFloat64(v, prefix, to) - case reflect.String: - err = d.valueString(v, prefix, to) - case reflect.Slice: - err = d.valueSlice(v, prefix, to) - case reflect.Struct: - err = d.valueStruct(v, prefix, to) - case reflect.Ptr: - if !d.hasKey(combinekey(prefix, to)) { - break - } - if !v.CanSet() { - break - } - nv := reflect.New(v.Type().Elem()) - v.Set(nv) - err = d.value(nv, prefix, to) - } - return -} - -func (d *decodeState) hasKey(key string) bool { - for k := range d.data { - if strings.HasPrefix(k, key+".") || k == key { - return true - } - } - return false -} - -func (d *decodeState) valueBool(v reflect.Value, prefix string, to tagOpt) error { - key := combinekey(prefix, to) - val := d.data.Get(key) - if val == "" { - if to.Default == "" { - return nil - } - val = to.Default - } - return d.setBool(v, val) -} - -func (d *decodeState) setBool(v reflect.Value, val string) error { - bval, err := strconv.ParseBool(val) - if err != nil { - return &BindTypeError{Value: val, Type: v.Type()} - } - v.SetBool(bval) - return nil -} - -func (d *decodeState) valueInt64(v reflect.Value, prefix string, to tagOpt) error { - key := combinekey(prefix, to) - val := d.data.Get(key) - if val == "" { - if to.Default == "" { - return nil - } - val = to.Default - } - return d.setInt64(v, val) -} - -func (d *decodeState) setInt64(v reflect.Value, val string) error { - ival, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return &BindTypeError{Value: val, Type: v.Type()} - } - v.SetInt(ival) - return nil -} - -func (d *decodeState) valueUint64(v reflect.Value, prefix string, to tagOpt) error { - key := combinekey(prefix, to) - val := d.data.Get(key) - if val == "" { - if to.Default == "" { - return nil - } - val = to.Default - } - return d.setUint64(v, val) -} - -func (d *decodeState) setUint64(v reflect.Value, val string) error { - uival, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return &BindTypeError{Value: val, Type: v.Type()} - } - v.SetUint(uival) - return nil -} - -func (d *decodeState) valueFloat64(v reflect.Value, prefix string, to tagOpt) error { - key := combinekey(prefix, to) - val := d.data.Get(key) - if val == "" { - if to.Default == "" { - return nil - } - val = to.Default - } - return d.setFloat64(v, val) -} - -func (d *decodeState) setFloat64(v reflect.Value, val string) error { - fval, err := strconv.ParseFloat(val, 64) - if err != nil { - return &BindTypeError{Value: val, Type: v.Type()} - } - v.SetFloat(fval) - return nil -} - -func (d *decodeState) valueString(v reflect.Value, prefix string, to tagOpt) error { - key := combinekey(prefix, to) - val := d.data.Get(key) - if val == "" { - if to.Default == "" { - return nil - } - val = to.Default - } - return d.setString(v, val) -} - -func (d *decodeState) setString(v reflect.Value, val string) error { - v.SetString(val) - return nil -} - -func (d *decodeState) valueSlice(v reflect.Value, prefix string, to tagOpt) error { - key := combinekey(prefix, to) - strs, ok := d.data[key] - if !ok { - strs = strings.Split(to.Default, ",") - } - if len(strs) == 0 { - return nil - } - et := v.Type().Elem() - var setFunc func(reflect.Value, string) error - switch et.Kind() { - case reflect.Bool: - setFunc = d.setBool - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - setFunc = d.setInt64 - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - setFunc = d.setUint64 - case reflect.Float32, reflect.Float64: - setFunc = d.setFloat64 - case reflect.String: - setFunc = d.setString - default: - return &BindTypeError{Type: et, Value: strs[0]} - } - vals := reflect.MakeSlice(v.Type(), len(strs), len(strs)) - for i, str := range strs { - if err := setFunc(vals.Index(i), str); err != nil { - return err - } - } - if v.CanSet() { - v.Set(vals) - } - return nil -} - -func (d *decodeState) valueStruct(v reflect.Value, prefix string, to tagOpt) error { - tv := v.Type() - for i := 0; i < tv.NumField(); i++ { - fv := v.Field(i) - field := tv.Field(i) - fto := parseTag(field.Tag.Get(_tagID)) - if fto.Name == "-" { - continue - } - if af, ok := d.assignFuncs[fto.Name]; ok { - if err := af(fv, tagOpt{}); err != nil { - return err - } - continue - } - if !strings.HasPrefix(fto.Name, _queryPrefix) { - continue - } - fto.Name = fto.Name[len(_queryPrefix):] - if err := d.value(fv, to.Name, fto); err != nil { - return err - } - } - return nil -} - -func (d *decodeState) indirect(v reflect.Value) (encoding.TextUnmarshaler, reflect.Value) { - v0 := v - haveAddr := false - - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - haveAddr = true - v = v.Addr() - } - for { - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && e.Elem().Kind() == reflect.Ptr { - haveAddr = false - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return u, reflect.Value{} - } - } - if haveAddr { - v = v0 - haveAddr = false - } else { - v = v.Elem() - } - } - return nil, v -} diff --git a/pkg/conf/dsn/query_test.go b/pkg/conf/dsn/query_test.go deleted file mode 100644 index 10d431db5..000000000 --- a/pkg/conf/dsn/query_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package dsn - -import ( - "net/url" - "reflect" - "testing" - "time" - - xtime "github.com/go-kratos/kratos/pkg/time" -) - -type cfg1 struct { - Name string `dsn:"query.name"` - Def string `dsn:"query.def,hello"` - DefSlice []int `dsn:"query.defslice,1,2,3,4"` - Ignore string `dsn:"-"` - FloatNum float64 `dsn:"query.floatNum"` -} - -type cfg2 struct { - Timeout xtime.Duration `dsn:"query.timeout"` -} - -type cfg3 struct { - Username string `dsn:"username"` - Timeout xtime.Duration `dsn:"query.timeout"` -} - -type cfg4 struct { - Timeout xtime.Duration `dsn:"query.timeout,1s"` -} - -func TestDecodeQuery(t *testing.T) { - type args struct { - query url.Values - v interface{} - assignFuncs map[string]assignFunc - } - tests := []struct { - name string - args args - want url.Values - cfg interface{} - wantErr bool - }{ - { - name: "test generic", - args: args{ - query: url.Values{ - "name": {"hello"}, - "Ignore": {"test"}, - "floatNum": {"22.33"}, - "adb": {"123"}, - }, - v: &cfg1{}, - }, - want: url.Values{ - "Ignore": {"test"}, - "adb": {"123"}, - }, - cfg: &cfg1{ - Name: "hello", - Def: "hello", - DefSlice: []int{1, 2, 3, 4}, - FloatNum: 22.33, - }, - }, - { - name: "test github.com/go-kratos/kratos/pkg/time", - args: args{ - query: url.Values{ - "timeout": {"1s"}, - }, - v: &cfg2{}, - }, - want: url.Values{}, - cfg: &cfg2{xtime.Duration(time.Second)}, - }, - { - name: "test empty github.com/go-kratos/kratos/pkg/time", - args: args{ - query: url.Values{}, - v: &cfg2{}, - }, - want: url.Values{}, - cfg: &cfg2{}, - }, - { - name: "test github.com/go-kratos/kratos/pkg/time", - args: args{ - query: url.Values{}, - v: &cfg4{}, - }, - want: url.Values{}, - cfg: &cfg4{xtime.Duration(time.Second)}, - }, - { - name: "test build-in value", - args: args{ - query: url.Values{ - "timeout": {"1s"}, - }, - v: &cfg3{}, - assignFuncs: map[string]assignFunc{"username": stringsAssignFunc("hello")}, - }, - want: url.Values{}, - cfg: &cfg3{ - Timeout: xtime.Duration(time.Second), - Username: "hello", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := bindQuery(tt.args.query, tt.args.v, tt.args.assignFuncs) - if (err != nil) != tt.wantErr { - t.Errorf("DecodeQuery() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("DecodeQuery() = %v, want %v", got, tt.want) - } - if !reflect.DeepEqual(tt.args.v, tt.cfg) { - t.Errorf("DecodeQuery() = %v, want %v", tt.args.v, tt.cfg) - } - }) - } -} diff --git a/pkg/conf/env/README.md b/pkg/conf/env/README.md deleted file mode 100644 index 5afa4ba9b..000000000 --- a/pkg/conf/env/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# env - -## 项目简介 - -全局公用环境变量 diff --git a/pkg/conf/env/env.go b/pkg/conf/env/env.go deleted file mode 100644 index 860895ccc..000000000 --- a/pkg/conf/env/env.go +++ /dev/null @@ -1,76 +0,0 @@ -// Package env get env & app config, all the public field must after init() -// finished and flag.Parse(). -package env - -import ( - "flag" - "os" - "strconv" - "time" -) - -// deploy env. -const ( - DeployEnvDev = "dev" - DeployEnvFat = "fat" - DeployEnvUat = "uat" - DeployEnvPre = "pre" - DeployEnvProd = "prod" -) - -// env default value. -const ( - // env - _region = "region01" - _zone = "zone01" - _deployEnv = "dev" -) - -// env configuration. -var ( - // Region available region where app at. - Region string - // Zone available zone where app at. - Zone string - // Hostname machine hostname. - Hostname string - // DeployEnv deploy env where app at. - DeployEnv string - // AppID is global unique application id, register by service tree. - // such as main.arch.disocvery. - AppID string - // Color is the identification of different experimental group in one caster cluster. - Color string - // DiscoveryNodes is seed nodes. - DiscoveryNodes string -) - -func init() { - var err error - Hostname = os.Getenv("HOSTNAME") - if Hostname == "" { - Hostname, err = os.Hostname() - if err != nil { - Hostname = strconv.Itoa(int(time.Now().UnixNano())) - } - } - addFlag(flag.CommandLine) -} - -func addFlag(fs *flag.FlagSet) { - // env - fs.StringVar(&Region, "region", defaultString("REGION", _region), "available region. or use REGION env variable, value: sh etc.") - fs.StringVar(&Zone, "zone", defaultString("ZONE", _zone), "available zone. or use ZONE env variable, value: sh001/sh002 etc.") - fs.StringVar(&AppID, "appid", os.Getenv("APP_ID"), "appid is global unique application id, register by service tree. or use APP_ID env variable.") - fs.StringVar(&DeployEnv, "deploy.env", defaultString("DEPLOY_ENV", _deployEnv), "deploy env. or use DEPLOY_ENV env variable, value: dev/fat1/uat/pre/prod etc.") - fs.StringVar(&Color, "deploy.color", os.Getenv("DEPLOY_COLOR"), "deploy.color is the identification of different experimental group.") - fs.StringVar(&DiscoveryNodes, "discovery.nodes", os.Getenv("DISCOVERY_NODES"), "discovery.nodes is seed nodes. value: 127.0.0.1:7171,127.0.0.2:7171 etc.") -} - -func defaultString(env, value string) string { - v := os.Getenv(env) - if v == "" { - return value - } - return v -} diff --git a/pkg/conf/env/env_test.go b/pkg/conf/env/env_test.go deleted file mode 100644 index a99de0caf..000000000 --- a/pkg/conf/env/env_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package env - -import ( - "flag" - "fmt" - "os" - "testing" -) - -func TestDefaultString(t *testing.T) { - v := defaultString("a", "test") - if v != "test" { - t.Fatal("v must be test") - } - if err := os.Setenv("a", "test1"); err != nil { - t.Fatal(err) - } - v = defaultString("a", "test") - if v != "test1" { - t.Fatal("v must be test1") - } -} - -func TestEnv(t *testing.T) { - tests := []struct { - flag string - env string - def string - val *string - }{ - { - "region", - "REGION", - _region, - &Region, - }, - { - "zone", - "ZONE", - _zone, - &Zone, - }, - { - "deploy.env", - "DEPLOY_ENV", - _deployEnv, - &DeployEnv, - }, - { - "appid", - "APP_ID", - "", - &AppID, - }, - { - "deploy.color", - "DEPLOY_COLOR", - "", - &Color, - }, - } - for _, test := range tests { - // flag set value - t.Run(fmt.Sprintf("%s: flag set", test.env), func(t *testing.T) { - fs := flag.NewFlagSet("", flag.ContinueOnError) - addFlag(fs) - err := fs.Parse([]string{fmt.Sprintf("-%s=%s", test.flag, "test")}) - if err != nil { - t.Fatal(err) - } - if *test.val != "test" { - t.Fatal("val must be test") - } - }) - // flag not set, env set - t.Run(fmt.Sprintf("%s: flag not set, env set", test.env), func(t *testing.T) { - *test.val = "" - os.Setenv(test.env, "test2") - fs := flag.NewFlagSet("", flag.ContinueOnError) - addFlag(fs) - err := fs.Parse([]string{}) - if err != nil { - t.Fatal(err) - } - if *test.val != "test2" { - t.Fatal("val must be test") - } - }) - // flag not set, env not set - t.Run(fmt.Sprintf("%s: flag not set, env not set", test.env), func(t *testing.T) { - *test.val = "" - os.Setenv(test.env, "") - fs := flag.NewFlagSet("", flag.ContinueOnError) - addFlag(fs) - err := fs.Parse([]string{}) - if err != nil { - t.Fatal(err) - } - if *test.val != test.def { - t.Fatal("val must be test") - } - }) - } -} diff --git a/pkg/conf/flagvar/flagvar.go b/pkg/conf/flagvar/flagvar.go deleted file mode 100644 index 7fa8dca74..000000000 --- a/pkg/conf/flagvar/flagvar.go +++ /dev/null @@ -1,18 +0,0 @@ -package flagvar - -import ( - "strings" -) - -// StringVars []string implement flag.Value -type StringVars []string - -func (s StringVars) String() string { - return strings.Join(s, ",") -} - -// Set implement flag.Value -func (s *StringVars) Set(val string) error { - *s = append(*s, val) - return nil -} diff --git a/pkg/conf/paladin/README.md b/pkg/conf/paladin/README.md deleted file mode 100644 index a49a330ba..000000000 --- a/pkg/conf/paladin/README.md +++ /dev/null @@ -1,138 +0,0 @@ -#### paladin - -##### 项目简介 - -paladin 是一个config SDK客户端,包括了file、mock几个抽象功能,方便使用本地文件或者sven\apollo配置中心,并且集成了对象自动reload功能。 - -local files: -``` -demo -conf=/data/conf/app/msm-servie.toml -// or dir -demo -conf=/data/conf/app/ -``` - -*注:使用远程配置中心的用户在执行应用,如这里的`demo`时务必**不要**带上`-conf`参数,具体见下文远程配置中心的例子* - -local file example: -``` -type exampleConf struct { - Bool bool - Int int64 - Float float64 - String string -} - -func (e *exampleConf) Set(text string) error { - var ec exampleConf - if err := toml.Unmarshal([]byte(text), &ec); err != nil { - return err - } - *e = ec - return nil -} - -func ExampleClient() { - if err := paladin.Init(); err != nil { - panic(err) - } - var ( - ec exampleConf - eo exampleConf - m paladin.TOML - strs []string - ) - // config unmarshal - if err := paladin.Get("example.toml").UnmarshalTOML(&ec); err != nil { - panic(err) - } - // config setter - if err := paladin.Watch("example.toml", &ec); err != nil { - panic(err) - } - // paladin map - if err := paladin.Watch("example.toml", &m); err != nil { - panic(err) - } - s, err := m.Value("key").String() - b, err := m.Value("key").Bool() - i, err := m.Value("key").Int64() - f, err := m.Value("key").Float64() - // value slice - err = m.Value("strings").Slice(&strs) - // watch key - for event := range paladin.WatchEvent(context.TODO(), "key") { - fmt.Println(event) - } -} -``` - -remote config center example: -``` -type exampleConf struct { - Bool bool - Int int64 - Float float64 - String string -} - -func (e *exampleConf) Set(text string) error { - var ec exampleConf - if err := yaml.Unmarshal([]byte(text), &ec); err != nil { - return err - } - *e = ec - return nil -} - -func ExampleApolloClient() { - /* - pass flags or set envs that apollo needs, for example: - - ``` - export APOLLO_APP_ID=SampleApp - export APOLLO_CLUSTER=default - export APOLLO_CACHE_DIR=/tmp - export APOLLO_META_ADDR=localhost:8080 - export APOLLO_NAMESPACES=example.yml - ``` - */ - - if err := paladin.Init(apollo.PaladinDriverApollo); err != nil { - panic(err) - } - var ( - ec exampleConf - eo exampleConf - m paladin.Map - strs []string - ) - // config unmarshal - if err := paladin.Get("example.yml").UnmarshalYAML(&ec); err != nil { - panic(err) - } - // config setter - if err := paladin.Watch("example.yml", &ec); err != nil { - panic(err) - } - // paladin map - if err := paladin.Watch("example.yml", &m); err != nil { - panic(err) - } - s, err := m.Value("key").String() - b, err := m.Value("key").Bool() - i, err := m.Value("key").Int64() - f, err := m.Value("key").Float64() - // value slice - err = m.Value("strings").Slice(&strs) - // watch key - for event := range paladin.WatchEvent(context.TODO(), "key") { - fmt.Println(event) - } -} -``` - -##### 编译环境 - -- **请只用 Golang v1.12.x 以上版本编译执行** - -##### 依赖包 diff --git a/pkg/conf/paladin/apollo/apollo.go b/pkg/conf/paladin/apollo/apollo.go deleted file mode 100644 index 83b1f978e..000000000 --- a/pkg/conf/paladin/apollo/apollo.go +++ /dev/null @@ -1,275 +0,0 @@ -package apollo - -import ( - "context" - "errors" - "flag" - "log" - "os" - "strings" - "sync" - "time" - - "github.com/philchia/agollo/v4" - - "github.com/go-kratos/kratos/pkg/conf/paladin" -) - -var ( - _ paladin.Client = &apollo{} - defaultValue = "" -) - -type apolloWatcher struct { - keys []string // in apollo, they're called namespaces - C chan paladin.Event -} - -func newApolloWatcher(keys []string) *apolloWatcher { - return &apolloWatcher{keys: keys, C: make(chan paladin.Event, 5)} -} - -func (aw *apolloWatcher) HasKey(key string) bool { - if len(aw.keys) == 0 { - return true - } - for _, k := range aw.keys { - if k == key { - return true - } - } - return false -} - -func (aw *apolloWatcher) Handle(event paladin.Event) { - select { - case aw.C <- event: - default: - log.Printf("paladin: event channel full discard ns %s update event", event.Key) - } -} - -// apollo is apollo config client. -type apollo struct { - client agollo.Client - values *paladin.Map - wmu sync.RWMutex - watchers map[*apolloWatcher]struct{} -} - -// Config is apollo config client config. -type Config struct { - AppID string `json:"app_id"` - Cluster string `json:"cluster"` - CacheDir string `json:"cache_dir"` - MetaAddr string `json:"meta_addr"` - Namespaces []string `json:"namespaces"` - AccesskeySecret string `json:"accesskey_secret"` -} - -type apolloDriver struct{} - -var ( - confAppID, confCluster, confCacheDir, confMetaAddr, confNamespaces, accesskeySecret string -) - -func init() { - addApolloFlags() - paladin.Register(PaladinDriverApollo, &apolloDriver{}) -} - -func addApolloFlags() { - flag.StringVar(&confAppID, "apollo.appid", "", "apollo app id") - flag.StringVar(&confCluster, "apollo.cluster", "", "apollo cluster") - flag.StringVar(&confCacheDir, "apollo.cachedir", "/tmp", "apollo cache dir") - flag.StringVar(&confMetaAddr, "apollo.metaaddr", "", "apollo meta server addr, e.g. localhost:8080") - flag.StringVar(&confNamespaces, "apollo.namespaces", "", "subscribed apollo namespaces, comma separated, e.g. app.yml,mysql.yml") - flag.StringVar(&accesskeySecret, "apollo.accesskeysecret", "", "apollo accesskeysecret") -} - -func buildConfigForApollo() (c *Config, err error) { - if appidFromEnv := os.Getenv("APOLLO_APP_ID"); appidFromEnv != "" { - confAppID = appidFromEnv - } - if confAppID == "" { - err = errors.New("invalid apollo appid, pass it via APOLLO_APP_ID=xxx with env or --apollo.appid=xxx with flag") - return - } - if clusterFromEnv := os.Getenv("APOLLO_CLUSTER"); clusterFromEnv != "" { - confCluster = clusterFromEnv - } - if confCluster == "" { - err = errors.New("invalid apollo cluster, pass it via APOLLO_CLUSTER=xxx with env or --apollo.cluster=xxx with flag") - return - } - if cacheDirFromEnv := os.Getenv("APOLLO_CACHE_DIR"); cacheDirFromEnv != "" { - confCacheDir = cacheDirFromEnv - } - if metaAddrFromEnv := os.Getenv("APOLLO_META_ADDR"); metaAddrFromEnv != "" { - confMetaAddr = metaAddrFromEnv - } - if confMetaAddr == "" { - err = errors.New("invalid apollo meta addr, pass it via APOLLO_META_ADDR=xxx with env or --apollo.metaaddr=xxx with flag") - return - } - if namespacesFromEnv := os.Getenv("APOLLO_NAMESPACES"); namespacesFromEnv != "" { - confNamespaces = namespacesFromEnv - } - namespaceNames := strings.Split(confNamespaces, ",") - if len(namespaceNames) == 0 { - err = errors.New("invalid apollo namespaces, pass it via APOLLO_NAMESPACES=xxx with env or --apollo.namespaces=xxx with flag") - return - } - if accesskeySecretEnv := os.Getenv("APOLLO_ACCESS_KEY_SECRET"); accesskeySecretEnv != "" { - accesskeySecret = accesskeySecretEnv - } - c = &Config{ - AppID: confAppID, - Cluster: confCluster, - CacheDir: confCacheDir, - MetaAddr: confMetaAddr, - Namespaces: namespaceNames, - AccesskeySecret: accesskeySecret, - } - return -} - -// New new an apollo config client. -// it watches apollo namespaces changes and updates local cache. -// BTW, in our context, namespaces in apollo means keys in paladin. -func (ad *apolloDriver) New() (paladin.Client, error) { - c, err := buildConfigForApollo() - if err != nil { - return nil, err - } - return ad.new(c) -} - -func (ad *apolloDriver) new(conf *Config) (paladin.Client, error) { - if conf == nil { - err := errors.New("invalid apollo conf") - return nil, err - } - client := agollo.NewClient(&agollo.Conf{ - AppID: conf.AppID, - Cluster: conf.Cluster, - NameSpaceNames: conf.Namespaces, // these namespaces will be subscribed at init - CacheDir: conf.CacheDir, - MetaAddr: conf.MetaAddr, - AccesskeySecret: conf.AccesskeySecret, - }) - err := client.Start() - if err != nil { - return nil, err - } - a := &apollo{ - client: client, - values: new(paladin.Map), - watchers: make(map[*apolloWatcher]struct{}), - } - raws, err := a.loadValues(conf.Namespaces) - if err != nil { - return nil, err - } - a.values.Store(raws) - // watch namespaces by default. - a.WatchEvent(context.TODO(), conf.Namespaces...) - go a.watchproc(conf.Namespaces) - return a, nil -} - -// loadValues load values from apollo namespaces to values -func (a *apollo) loadValues(keys []string) (values map[string]*paladin.Value, err error) { - values = make(map[string]*paladin.Value, len(keys)) - for _, k := range keys { - if values[k], err = a.loadValue(k); err != nil { - return - } - } - return -} - -// loadValue load value from apollo namespace content to value -func (a *apollo) loadValue(key string) (*paladin.Value, error) { - content := a.client.GetContent(agollo.WithNamespace(key)) - return paladin.NewValue(content, content), nil -} - -// reloadValue reload value by key and send event -func (a *apollo) reloadValue(key string) (err error) { - // NOTE: in some case immediately read content from client after receive event - // will get old content due to cache, sleep 100ms make sure get correct content. - time.Sleep(100 * time.Millisecond) - var ( - value *paladin.Value - rawValue string - ) - value, err = a.loadValue(key) - if err != nil { - return - } - rawValue, err = value.Raw() - if err != nil { - return - } - raws := a.values.Load() - raws[key] = value - a.values.Store(raws) - a.wmu.RLock() - n := 0 - for w := range a.watchers { - if w.HasKey(key) { - n++ - // FIXME(Colstuwjx): check change event and send detail type like EventAdd\Update\Delete. - w.Handle(paladin.Event{Event: paladin.EventUpdate, Key: key, Value: rawValue}) - } - } - a.wmu.RUnlock() - log.Printf("paladin: reload config: %s events: %d\n", key, n) - return -} - -// apollo config daemon to watch remote apollo notifications -func (a *apollo) watchproc(keys []string) { - a.client.OnUpdate(func(event *agollo.ChangeEvent) { - if err := a.reloadValue(event.Namespace); err != nil { - log.Printf("paladin: load key: %s error: %s, skipped", event.Namespace, err) - } - }) -} -// Get return value by key. -func (a *apollo) Get(key string) *paladin.Value { - return a.values.Get(key) -} - -// GetAll return value map. -func (a *apollo) GetAll() *paladin.Map { - return a.values -} - -// WatchEvent watch with the specified keys. -func (a *apollo) WatchEvent(ctx context.Context, keys ...string) <-chan paladin.Event { - aw := newApolloWatcher(keys) - err := a.client.SubscribeToNamespaces(keys...) - if err != nil { - log.Printf("subscribe namespaces %v failed, %v", keys, err) - return aw.C - } - a.wmu.Lock() - a.watchers[aw] = struct{}{} - a.wmu.Unlock() - return aw.C -} - -// Close close watcher. -func (a *apollo) Close() (err error) { - if err = a.client.Stop(); err != nil { - return - } - a.wmu.RLock() - for w := range a.watchers { - close(w.C) - } - a.wmu.RUnlock() - return -} \ No newline at end of file diff --git a/pkg/conf/paladin/apollo/apollo_test.go b/pkg/conf/paladin/apollo/apollo_test.go deleted file mode 100644 index 387e77ce2..000000000 --- a/pkg/conf/paladin/apollo/apollo_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package apollo - -import ( - "context" - "fmt" - "log" - "os" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/conf/paladin/apollo/internal/mockserver" -) - -func TestMain(m *testing.M) { - setup() - code := m.Run() - teardown() - os.Exit(code) -} - -func setup() { - go func() { - if err := mockserver.Run(); err != nil { - log.Fatal(err) - } - }() - // wait for mock server to run - time.Sleep(time.Millisecond * 500) -} - -func teardown() { - mockserver.Close() -} - -func TestApolloMock(t *testing.T) { - var ( - testAppYAML = "app.yml" - testAppYAMLContent1 = "test: test12234\ntest2: test333" - testAppYAMLContent2 = "test: 1111" - testClientJSON = "client.json" - testClientJSONContent = `{"name":"agollo"}` - ) - os.Setenv("APOLLO_APP_ID", "SampleApp") - os.Setenv("APOLLO_CLUSTER", "default") - os.Setenv("APOLLO_CACHE_DIR", "/tmp") - os.Setenv("APOLLO_META_ADDR", "localhost:8010") - os.Setenv("APOLLO_NAMESPACES", fmt.Sprintf("%s,%s", testAppYAML, testClientJSON)) - mockserver.Set(testAppYAML, "content", testAppYAMLContent1) - mockserver.Set(testClientJSON, "content", testClientJSONContent) - ad := &apolloDriver{} - apollo, err := ad.New() - if err != nil { - t.Fatalf("new apollo error, %v", err) - } - value := apollo.Get(testAppYAML) - if content, _ := value.String(); content != testAppYAMLContent1 { - t.Fatalf("got app.yml unexpected value %s", content) - } - value = apollo.Get(testClientJSON) - if content, _ := value.String(); content != testClientJSONContent { - t.Fatalf("got app.yml unexpected value %s", content) - } - mockserver.Set(testAppYAML, "content", testAppYAMLContent2) - updates := apollo.WatchEvent(context.TODO(), testAppYAML) - select { - case <-updates: - case <-time.After(time.Millisecond * 30000): - } - value = apollo.Get(testAppYAML) - if content, _ := value.String(); content != testAppYAMLContent2 { - t.Fatalf("got app.yml unexpected updated value %s", content) - } -} diff --git a/pkg/conf/paladin/apollo/const.go b/pkg/conf/paladin/apollo/const.go deleted file mode 100644 index 1aac397fe..000000000 --- a/pkg/conf/paladin/apollo/const.go +++ /dev/null @@ -1,6 +0,0 @@ -package apollo - -const ( - // PaladinDriverApollo ... - PaladinDriverApollo = "apollo" -) diff --git a/pkg/conf/paladin/apollo/internal/mockserver/mockserver.go b/pkg/conf/paladin/apollo/internal/mockserver/mockserver.go deleted file mode 100644 index 2aecb40a8..000000000 --- a/pkg/conf/paladin/apollo/internal/mockserver/mockserver.go +++ /dev/null @@ -1,149 +0,0 @@ -package mockserver - -import ( - "context" - "encoding/json" - "net/http" - "strings" - "sync" - "time" -) - -type notification struct { - NamespaceName string `json:"namespaceName,omitempty"` - NotificationID int `json:"notificationId,omitempty"` -} - -type result struct { - // AppID string `json:"appId"` - // Cluster string `json:"cluster"` - NamespaceName string `json:"namespaceName"` - Configurations map[string]string `json:"configurations"` - ReleaseKey string `json:"releaseKey"` -} - -type mockServer struct { - server http.Server - - lock sync.Mutex - notifications map[string]int - config map[string]map[string]string -} - -func (s *mockServer) NotificationHandler(rw http.ResponseWriter, req *http.Request) { - s.lock.Lock() - defer s.lock.Unlock() - req.ParseForm() - var notifications []notification - if err := json.Unmarshal([]byte(req.FormValue("notifications")), ¬ifications); err != nil { - rw.WriteHeader(http.StatusInternalServerError) - return - } - var changes []notification - for _, noti := range notifications { - if currentID := s.notifications[noti.NamespaceName]; currentID != noti.NotificationID { - changes = append(changes, notification{NamespaceName: noti.NamespaceName, NotificationID: currentID}) - } - } - - if len(changes) == 0 { - rw.WriteHeader(http.StatusNotModified) - return - } - bts, err := json.Marshal(&changes) - if err != nil { - rw.WriteHeader(http.StatusInternalServerError) - return - } - rw.Write(bts) -} - -func (s *mockServer) ConfigHandler(rw http.ResponseWriter, req *http.Request) { - req.ParseForm() - - strs := strings.Split(req.RequestURI, "/") - var namespace, releaseKey = strings.Split(strs[4], "?")[0], req.FormValue("releaseKey") - config := s.Get(namespace) - - ret := result{NamespaceName: namespace, Configurations: config, ReleaseKey: releaseKey} - bts, err := json.Marshal(&ret) - if err != nil { - rw.WriteHeader(http.StatusInternalServerError) - return - } - rw.Write(bts) -} - -var server *mockServer - -func (s *mockServer) Set(namespace, key, value string) { - server.lock.Lock() - defer server.lock.Unlock() - - notificationID := s.notifications[namespace] - notificationID++ - s.notifications[namespace] = notificationID - - if kv, ok := s.config[namespace]; ok { - kv[key] = value - return - } - kv := map[string]string{key: value} - s.config[namespace] = kv -} - -func (s *mockServer) Get(namespace string) map[string]string { - server.lock.Lock() - defer server.lock.Unlock() - - return s.config[namespace] -} - -func (s *mockServer) Delete(namespace, key string) { - server.lock.Lock() - defer server.lock.Unlock() - - if kv, ok := s.config[namespace]; ok { - delete(kv, key) - } - - notificationID := s.notifications[namespace] - notificationID++ - s.notifications[namespace] = notificationID -} - -// Set namespace's key value -func Set(namespace, key, value string) { - server.Set(namespace, key, value) -} - -// Delete namespace's key -func Delete(namespace, key string) { - server.Delete(namespace, key) -} - -// Run mock server -func Run() error { - initServer() - return server.server.ListenAndServe() -} - -func initServer() { - server = &mockServer{ - notifications: map[string]int{}, - config: map[string]map[string]string{}, - } - mux := http.NewServeMux() - mux.Handle("/notifications/", http.HandlerFunc(server.NotificationHandler)) - mux.Handle("/configs/", http.HandlerFunc(server.ConfigHandler)) - server.server.Handler = mux - server.server.Addr = ":8010" -} - -// Close mock server -func Close() error { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second)) - defer cancel() - - return server.server.Shutdown(ctx) -} diff --git a/pkg/conf/paladin/client.go b/pkg/conf/paladin/client.go deleted file mode 100644 index 62c0532fe..000000000 --- a/pkg/conf/paladin/client.go +++ /dev/null @@ -1,49 +0,0 @@ -package paladin - -import ( - "context" -) - -const ( - // EventAdd config add event. - EventAdd EventType = iota - // EventUpdate config update event. - EventUpdate - // EventRemove config remove event. - EventRemove -) - -// EventType is config event. -type EventType int - -// Event is watch event. -type Event struct { - Event EventType - Key string - Value string -} - -// Watcher is config watcher. -type Watcher interface { - WatchEvent(context.Context, ...string) <-chan Event - Close() error -} - -// Setter is value setter. -type Setter interface { - Set(string) error -} - -// Getter is value getter. -type Getter interface { - // Get a config value by a config key(may be a sven filename). - Get(string) *Value - // GetAll return all config key->value map. - GetAll() *Map -} - -// Client is config client. -type Client interface { - Watcher - Getter -} diff --git a/pkg/conf/paladin/default.go b/pkg/conf/paladin/default.go deleted file mode 100644 index a9410f563..000000000 --- a/pkg/conf/paladin/default.go +++ /dev/null @@ -1,92 +0,0 @@ -package paladin - -import ( - "context" - "errors" - "flag" -) - -var ( - // DefaultClient default client. - DefaultClient Client - confPath string -) - -func init() { - flag.StringVar(&confPath, "conf", "", "default config path") -} - -// Init init config client. -// If confPath is set, it inits file client by default -// Otherwise we could pass args to init remote client -// args[0]: driver name, string type -func Init(args ...interface{}) (err error) { - if confPath != "" { - DefaultClient, err = NewFile(confPath) - } else { - var ( - driver Driver - ) - argsLackErr := errors.New("lack of remote config center args") - if len(args) == 0 { - panic(argsLackErr.Error()) - } - argsInvalidErr := errors.New("invalid remote config center args") - driverName, ok := args[0].(string) - if !ok { - panic(argsInvalidErr.Error()) - } - driver, err = GetDriver(driverName) - if err != nil { - return - } - DefaultClient, err = driver.New() - } - if err != nil { - return - } - return -} - -// Watch watch on a key. The configuration implements the setter interface, which is invoked when the configuration changes. -func Watch(key string, s Setter) error { - v := DefaultClient.Get(key) - str, err := v.Raw() - if err != nil { - return err - } - if err := s.Set(str); err != nil { - return err - } - go func() { - for event := range WatchEvent(context.Background(), key) { - s.Set(event.Value) - } - }() - return nil -} - -// WatchEvent watch on multi keys. Events are returned when the configuration changes. -func WatchEvent(ctx context.Context, keys ...string) <-chan Event { - return DefaultClient.WatchEvent(ctx, keys...) -} - -// Get return value by key. -func Get(key string) *Value { - return DefaultClient.Get(key) -} - -// GetAll return all config map. -func GetAll() *Map { - return DefaultClient.GetAll() -} - -// Keys return values key. -func Keys() []string { - return DefaultClient.GetAll().Keys() -} - -// Close close watcher. -func Close() error { - return DefaultClient.Close() -} diff --git a/pkg/conf/paladin/driver.go b/pkg/conf/paladin/driver.go deleted file mode 100644 index 9f2151e4c..000000000 --- a/pkg/conf/paladin/driver.go +++ /dev/null @@ -1,9 +0,0 @@ -package paladin - -// Driver defined paladin remote client impl -// each remote config center driver must do -// 1. implements `New` method -// 2. call `Register` to register itself -type Driver interface { - New() (Client, error) -} diff --git a/pkg/conf/paladin/example_test.go b/pkg/conf/paladin/example_test.go deleted file mode 100644 index 5262785a7..000000000 --- a/pkg/conf/paladin/example_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package paladin_test - -import ( - "context" - "fmt" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/conf/paladin/apollo" - - "github.com/BurntSushi/toml" -) - -type exampleConf struct { - Bool bool - Int int64 - Float float64 - String string - Strings []string -} - -func (e *exampleConf) Set(text string) error { - var ec exampleConf - if err := toml.Unmarshal([]byte(text), &ec); err != nil { - return err - } - *e = ec - return nil -} - -// ExampleClient is an example client usage. -// exmaple.toml: -/* - bool = true - int = 100 - float = 100.1 - string = "text" - strings = ["a", "b", "c"] -*/ -func ExampleClient() { - if err := paladin.Init(); err != nil { - panic(err) - } - var ec exampleConf - // var setter - if err := paladin.Watch("example.toml", &ec); err != nil { - panic(err) - } - if err := paladin.Get("example.toml").UnmarshalTOML(&ec); err != nil { - panic(err) - } - // use exampleConf - // watch event key - go func() { - for event := range paladin.WatchEvent(context.TODO(), "key") { - fmt.Println(event) - } - }() -} - -// ExampleApolloClient is an example client for apollo driver usage. -func ExampleApolloClient() { - /* - pass flags or set envs that apollo needs, for example: - - ``` - export APOLLO_APP_ID=SampleApp - export APOLLO_CLUSTER=default - export APOLLO_CACHE_DIR=/tmp - export APOLLO_META_ADDR=localhost:8080 - export APOLLO_NAMESPACES=example.yml - ``` - */ - - if err := paladin.Init(apollo.PaladinDriverApollo); err != nil { - panic(err) - } - var ec exampleConf - // var setter - if err := paladin.Watch("example.yml", &ec); err != nil { - panic(err) - } - if err := paladin.Get("example.yml").UnmarshalYAML(&ec); err != nil { - panic(err) - } - // use exampleConf - // watch event key - go func() { - for event := range paladin.WatchEvent(context.TODO(), "key") { - fmt.Println(event) - } - }() -} - -// ExampleMap is an example map usage. -// exmaple.toml: -/* - bool = true - int = 100 - float = 100.1 - string = "text" - strings = ["a", "b", "c"] - - [object] - string = "text" - bool = true - int = 100 - float = 100.1 - strings = ["a", "b", "c"] -*/ -func ExampleMap() { - var ( - m paladin.TOML - strs []string - ) - // paladin toml - if err := paladin.Watch("example.toml", &m); err != nil { - panic(err) - } - // value string - s, err := m.Get("string").String() - if err != nil { - s = "default" - } - fmt.Println(s) - // value bool - b, err := m.Get("bool").Bool() - if err != nil { - b = false - } - fmt.Println(b) - // value int - i, err := m.Get("int").Int64() - if err != nil { - i = 100 - } - fmt.Println(i) - // value float - f, err := m.Get("float").Float64() - if err != nil { - f = 100.1 - } - fmt.Println(f) - // value slice - if err = m.Get("strings").Slice(&strs); err == nil { - fmt.Println(strs) - } -} diff --git a/pkg/conf/paladin/file.go b/pkg/conf/paladin/file.go deleted file mode 100644 index c79abe3e5..000000000 --- a/pkg/conf/paladin/file.go +++ /dev/null @@ -1,203 +0,0 @@ -package paladin - -import ( - "context" - "fmt" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/fsnotify/fsnotify" -) - -const ( - defaultChSize = 10 -) - -var _ Client = &file{} - -// file is file config client. -type file struct { - values *Map - rawVal map[string]*Value - - watchChs map[string][]chan Event - mx sync.Mutex - wg sync.WaitGroup - - base string - done chan struct{} -} - -func isHiddenFile(name string) bool { - // TODO: support windows. - return strings.HasPrefix(filepath.Base(name), ".") -} - -func readAllPaths(base string) ([]string, error) { - fi, err := os.Stat(base) - if err != nil { - return nil, fmt.Errorf("check local config file fail! error: %s", err) - } - // dirs or file to paths - var paths []string - if fi.IsDir() { - files, err := ioutil.ReadDir(base) - if err != nil { - return nil, fmt.Errorf("read dir %s error: %s", base, err) - } - for _, file := range files { - if !file.IsDir() && !isHiddenFile(file.Name()) { - paths = append(paths, path.Join(base, file.Name())) - } - } - } else { - paths = append(paths, base) - } - return paths, nil -} - -func loadValuesFromPaths(paths []string) (map[string]*Value, error) { - // laod config file to values - var err error - values := make(map[string]*Value, len(paths)) - for _, fpath := range paths { - if values[path.Base(fpath)], err = loadValue(fpath); err != nil { - return nil, err - } - } - return values, nil -} - -func loadValue(fpath string) (*Value, error) { - data, err := ioutil.ReadFile(fpath) - if err != nil { - return nil, err - } - content := string(data) - return &Value{val: content, raw: content}, nil -} - -// NewFile new a config file client. -// conf = /data/conf/app/ -// conf = /data/conf/app/xxx.toml -func NewFile(base string) (Client, error) { - // platform slash - base = filepath.FromSlash(base) - - paths, err := readAllPaths(base) - if err != nil { - return nil, err - } - if len(paths) == 0 { - return nil, fmt.Errorf("empty config path") - } - - rawVal, err := loadValuesFromPaths(paths) - if err != nil { - return nil, err - } - - valMap := &Map{} - valMap.Store(rawVal) - fc := &file{ - values: valMap, - rawVal: rawVal, - watchChs: make(map[string][]chan Event), - - base: base, - done: make(chan struct{}, 1), - } - - fc.wg.Add(1) - go fc.daemon() - - return fc, nil -} - -// Get return value by key. -func (f *file) Get(key string) *Value { - return f.values.Get(key) -} - -// GetAll return value map. -func (f *file) GetAll() *Map { - return f.values -} - -// WatchEvent watch multi key. -func (f *file) WatchEvent(ctx context.Context, keys ...string) <-chan Event { - f.mx.Lock() - defer f.mx.Unlock() - ch := make(chan Event, defaultChSize) - for _, key := range keys { - f.watchChs[key] = append(f.watchChs[key], ch) - } - return ch -} - -// Close close watcher. -func (f *file) Close() error { - f.done <- struct{}{} - f.wg.Wait() - return nil -} - -// file config daemon to watch file modification -func (f *file) daemon() { - defer f.wg.Done() - fswatcher, err := fsnotify.NewWatcher() - if err != nil { - log.Printf("create file watcher fail! reload function will lose efficacy error: %s", err) - return - } - if err = fswatcher.Add(f.base); err != nil { - log.Printf("create fsnotify for base path %s fail %s, reload function will lose efficacy", f.base, err) - return - } - log.Printf("start watch filepath: %s", f.base) - for event := range fswatcher.Events { - switch event.Op { - // use vim edit config will trigger rename - case fsnotify.Write, fsnotify.Create: - f.reloadFile(event.Name) - case fsnotify.Chmod: - default: - log.Printf("unsupport event %s ingored", event) - } - } -} - -func (f *file) reloadFile(name string) { - if isHiddenFile(name) { - return - } - // NOTE: in some case immediately read file content after receive event - // will get old content, sleep 100ms make sure get correct content. - time.Sleep(200 * time.Millisecond) - key := filepath.Base(name) - val, err := loadValue(name) - if err != nil { - log.Printf("load file %s error: %s, skipped", name, err) - return - } - f.rawVal[key] = val - f.values.Store(f.rawVal) - - f.mx.Lock() - chs := f.watchChs[key] - f.mx.Unlock() - - for _, ch := range chs { - select { - case ch <- Event{Event: EventUpdate, Key: key, Value: val.raw}: - default: - log.Printf("event channel full discard file %s update event", name) - } - } -} diff --git a/pkg/conf/paladin/file_test.go b/pkg/conf/paladin/file_test.go deleted file mode 100644 index 36232ed61..000000000 --- a/pkg/conf/paladin/file_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package paladin - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestNewFile(t *testing.T) { - // test data - path := "/tmp/test_conf/" - assert.Nil(t, os.MkdirAll(path, 0700)) - assert.Nil(t, ioutil.WriteFile(filepath.Join(path, "test.toml"), []byte(` - text = "hello" - number = 100 - slice = [1, 2, 3] - sliceStr = ["1", "2", "3"] - `), 0644)) - // test client - cli, err := NewFile(filepath.Join(path, "test.toml")) - assert.Nil(t, err) - assert.NotNil(t, cli) - // test map - m := Map{} - text, err := cli.Get("test.toml").String() - assert.Nil(t, err) - assert.Nil(t, m.Set(text), "text") - s, err := m.Get("text").String() - assert.Nil(t, err) - assert.Equal(t, s, "hello", "text") - n, err := m.Get("number").Int64() - assert.Nil(t, err) - assert.Equal(t, n, int64(100), "number") -} - -func TestNewFilePath(t *testing.T) { - // test data - path := "/tmp/test_conf/" - assert.Nil(t, os.MkdirAll(path, 0700)) - assert.Nil(t, ioutil.WriteFile(filepath.Join(path, "test.toml"), []byte(` - text = "hello" - number = 100 - `), 0644)) - assert.Nil(t, ioutil.WriteFile(filepath.Join(path, "abc.toml"), []byte(` - text = "hello" - number = 100 - `), 0644)) - // test client - cli, err := NewFile(path) - assert.Nil(t, err) - assert.NotNil(t, cli) - // test map - m := Map{} - text, err := cli.Get("test.toml").String() - assert.Nil(t, err) - assert.Nil(t, m.Set(text), "text") - s, err := m.Get("text").String() - assert.Nil(t, err, s) - assert.Equal(t, s, "hello", "text") - n, err := m.Get("number").Int64() - assert.Nil(t, err, s) - assert.Equal(t, n, int64(100), "number") -} - -/* -func TestFileEvent(t *testing.T) { - // test data - path := "/tmp/test_conf_event/" - assert.Nil(t, os.MkdirAll(path, 0700)) - assert.Nil(t, ioutil.WriteFile(filepath.Join(path, "test.toml"), []byte(` - text = "hello" - number = 100 - `), 0644)) - assert.Nil(t, ioutil.WriteFile(filepath.Join(path, "abc.toml"), []byte(` - text = "hello" - number = 100 - `), 0644)) - // test client - cli, err := NewFile(path) - assert.Nil(t, err) - assert.NotNil(t, cli) - ch := cli.WatchEvent(context.Background(), "test.toml", "abc.toml") - time.Sleep(time.Second) - timeout := time.NewTimer(time.Second) - - // for file test.toml - ioutil.WriteFile(filepath.Join(path, "test.toml"), []byte(`hello`), 0644) - select { - case <-timeout.C: - t.Fatalf("run test timeout") - case ev := <-ch: - if ev.Key == "test.toml" { - assert.Equal(t, EventUpdate, ev.Event) - assert.Equal(t, "hello", ev.Value) - } - } - content1, _ := cli.Get("test.toml").String() - assert.Equal(t, "hello", content1) - - // for file abc.toml - ioutil.WriteFile(filepath.Join(path, "abc.toml"), []byte(`test`), 0644) - select { - case <-timeout.C: - t.Fatalf("run test timeout") - case ev := <-ch: - if ev.Key == "abc.toml" { - assert.Equal(t, EventUpdate, ev.Event) - assert.Equal(t, "test", ev.Value) - } - } - content2, _ := cli.Get("abc.toml").String() - assert.Equal(t, "test", content2) -} -*/ - -func TestHiddenFile(t *testing.T) { - path := "/tmp/test_hidden_event/" - assert.Nil(t, os.MkdirAll(path, 0700)) - assert.Nil(t, ioutil.WriteFile(filepath.Join(path, "test.toml"), []byte(`hello`), 0644)) - assert.Nil(t, ioutil.WriteFile(filepath.Join(path, "abc.toml"), []byte(` - text = "hello" - number = 100 - `), 0644)) - // test client - cli, err := NewFile(path) - assert.Nil(t, err) - assert.NotNil(t, cli) - cli.WatchEvent(context.Background(), "test.toml") - time.Sleep(time.Millisecond) - ioutil.WriteFile(filepath.Join(path, "abc.toml"), []byte(`hello`), 0644) - time.Sleep(time.Second) - content1, _ := cli.Get("test.toml").String() - assert.Equal(t, "hello", content1) - _, err = cli.Get(".abc.toml").String() - assert.NotNil(t, err) -} - -func TestOneLevelSymbolicFile(t *testing.T) { - path := "/tmp/test_symbolic_link/" - path2 := "/tmp/test_symbolic_link/configs/" - assert.Nil(t, os.MkdirAll(path2, 0700)) - assert.Nil(t, ioutil.WriteFile(filepath.Join(path, "test.toml"), []byte(`hello`), 0644)) - assert.Nil(t, os.Symlink(filepath.Join(path, "test.toml"), filepath.Join(path2, "test.toml.ln"))) - // test client - cli, err := NewFile(path2) - assert.Nil(t, err) - assert.NotNil(t, cli) - content, _ := cli.Get("test.toml.ln").String() - assert.Equal(t, "hello", content) - os.Remove(filepath.Join(path, "test.toml")) - os.Remove(filepath.Join(path2, "test.toml.ln")) -} diff --git a/pkg/conf/paladin/helper.go b/pkg/conf/paladin/helper.go deleted file mode 100644 index 115d438fc..000000000 --- a/pkg/conf/paladin/helper.go +++ /dev/null @@ -1,76 +0,0 @@ -package paladin - -import "time" - -// Bool return bool value. -func Bool(v *Value, def bool) bool { - b, err := v.Bool() - if err != nil { - return def - } - return b -} - -// Int return int value. -func Int(v *Value, def int) int { - i, err := v.Int() - if err != nil { - return def - } - return i -} - -// Int32 return int32 value. -func Int32(v *Value, def int32) int32 { - i, err := v.Int32() - if err != nil { - return def - } - return i -} - -// Int64 return int64 value. -func Int64(v *Value, def int64) int64 { - i, err := v.Int64() - if err != nil { - return def - } - return i -} - -// Float32 return float32 value. -func Float32(v *Value, def float32) float32 { - f, err := v.Float32() - if err != nil { - return def - } - return f -} - -// Float64 return float32 value. -func Float64(v *Value, def float64) float64 { - f, err := v.Float64() - if err != nil { - return def - } - return f -} - -// String return string value. -func String(v *Value, def string) string { - s, err := v.String() - if err != nil { - return def - } - return s -} - -// Duration parses a duration string. A duration string is a possibly signed sequence of decimal numbers -// each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -func Duration(v *Value, def time.Duration) time.Duration { - dur, err := v.Duration() - if err != nil { - return def - } - return dur -} diff --git a/pkg/conf/paladin/helper_test.go b/pkg/conf/paladin/helper_test.go deleted file mode 100644 index aaa17d4d4..000000000 --- a/pkg/conf/paladin/helper_test.go +++ /dev/null @@ -1,286 +0,0 @@ -package paladin - -import ( - "testing" - "time" -) - -func TestBool(t *testing.T) { - type args struct { - v *Value - def bool - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "ok", - args: args{v: &Value{val: true}}, - want: true, - }, - { - name: "fail", - args: args{v: &Value{}}, - want: false, - }, - { - name: "default", - args: args{v: &Value{}, def: true}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := Bool(tt.args.v, tt.args.def); got != tt.want { - t.Errorf("Bool() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestInt(t *testing.T) { - type args struct { - v *Value - def int - } - tests := []struct { - name string - args args - want int - }{ - { - name: "ok", - args: args{v: &Value{val: int64(2233)}}, - want: 2233, - }, - { - name: "fail", - args: args{v: &Value{}}, - want: 0, - }, - { - name: "default", - args: args{v: &Value{}, def: 2233}, - want: 2233, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := Int(tt.args.v, tt.args.def); got != tt.want { - t.Errorf("Int() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestInt32(t *testing.T) { - type args struct { - v *Value - def int32 - } - tests := []struct { - name string - args args - want int32 - }{ - { - name: "ok", - args: args{v: &Value{val: int64(2233)}}, - want: 2233, - }, - { - name: "fail", - args: args{v: &Value{}}, - want: 0, - }, - { - name: "default", - args: args{v: &Value{}, def: 2233}, - want: 2233, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := Int32(tt.args.v, tt.args.def); got != tt.want { - t.Errorf("Int32() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestInt64(t *testing.T) { - type args struct { - v *Value - def int64 - } - tests := []struct { - name string - args args - want int64 - }{ - { - name: "ok", - args: args{v: &Value{val: int64(2233)}}, - want: 2233, - }, - { - name: "fail", - args: args{v: &Value{}}, - want: 0, - }, - { - name: "default", - args: args{v: &Value{}, def: 2233}, - want: 2233, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := Int64(tt.args.v, tt.args.def); got != tt.want { - t.Errorf("Int64() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestFloat32(t *testing.T) { - type args struct { - v *Value - def float32 - } - tests := []struct { - name string - args args - want float32 - }{ - { - name: "ok", - args: args{v: &Value{val: float64(2233)}}, - want: float32(2233), - }, - { - name: "fail", - args: args{v: &Value{}}, - want: 0, - }, - { - name: "default", - args: args{v: &Value{}, def: float32(2233)}, - want: float32(2233), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := Float32(tt.args.v, tt.args.def); got != tt.want { - t.Errorf("Float32() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestFloat64(t *testing.T) { - type args struct { - v *Value - def float64 - } - tests := []struct { - name string - args args - want float64 - }{ - { - name: "ok", - args: args{v: &Value{val: float64(2233)}}, - want: float64(2233), - }, - { - name: "fail", - args: args{v: &Value{}}, - want: 0, - }, - { - name: "default", - args: args{v: &Value{}, def: float64(2233)}, - want: float64(2233), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := Float64(tt.args.v, tt.args.def); got != tt.want { - t.Errorf("Float64() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestString(t *testing.T) { - type args struct { - v *Value - def string - } - tests := []struct { - name string - args args - want string - }{ - { - name: "ok", - args: args{v: &Value{val: "test"}}, - want: "test", - }, - { - name: "fail", - args: args{v: &Value{}}, - want: "", - }, - { - name: "default", - args: args{v: &Value{}, def: "test"}, - want: "test", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := String(tt.args.v, tt.args.def); got != tt.want { - t.Errorf("String() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestDuration(t *testing.T) { - type args struct { - v *Value - def time.Duration - } - tests := []struct { - name string - args args - want time.Duration - }{ - { - name: "ok", - args: args{v: &Value{val: "1s"}}, - want: time.Second, - }, - { - name: "fail", - args: args{v: &Value{}}, - want: 0, - }, - { - name: "default", - args: args{v: &Value{}, def: time.Second}, - want: time.Second, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := Duration(tt.args.v, tt.args.def); got != tt.want { - t.Errorf("Duration() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/conf/paladin/map.go b/pkg/conf/paladin/map.go deleted file mode 100644 index fa43dc116..000000000 --- a/pkg/conf/paladin/map.go +++ /dev/null @@ -1,60 +0,0 @@ -package paladin - -import ( - "strings" - "sync/atomic" -) - -// KeyNamed key naming to lower case. -func KeyNamed(key string) string { - return strings.ToLower(key) -} - -// Map is config map, key(filename) -> value(file). -type Map struct { - values atomic.Value -} - -// Store sets the value of the Value to values map. -func (m *Map) Store(values map[string]*Value) { - dst := make(map[string]*Value, len(values)) - for k, v := range values { - dst[KeyNamed(k)] = v - } - m.values.Store(dst) -} - -// Load returns the value set by the most recent Store. -func (m *Map) Load() map[string]*Value { - src := m.values.Load().(map[string]*Value) - dst := make(map[string]*Value, len(src)) - for k, v := range src { - dst[k] = v - } - return dst -} - -// Exist check if values map exist a key. -func (m *Map) Exist(key string) bool { - _, ok := m.Load()[KeyNamed(key)] - return ok -} - -// Get return get value by key. -func (m *Map) Get(key string) *Value { - v, ok := m.Load()[KeyNamed(key)] - if ok { - return v - } - return &Value{} -} - -// Keys return map keys. -func (m *Map) Keys() []string { - values := m.Load() - keys := make([]string, 0, len(values)) - for key := range values { - keys = append(keys, key) - } - return keys -} diff --git a/pkg/conf/paladin/map_test.go b/pkg/conf/paladin/map_test.go deleted file mode 100644 index b4d7a3ac0..000000000 --- a/pkg/conf/paladin/map_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package paladin_test - -import ( - "testing" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - - "github.com/BurntSushi/toml" - "github.com/stretchr/testify/assert" -) - -type fruit struct { - Fruit []struct { - Name string - } -} - -func (f *fruit) Set(text string) error { - return toml.Unmarshal([]byte(text), f) -} - -func TestMap(t *testing.T) { - s := ` - # kv - text = "hello" - number = 100 - point = 100.1 - boolean = true - KeyCase = "test" - - # slice - numbers = [1, 2, 3] - strings = ["a", "b", "c"] - empty = [] - [[fruit]] - name = "apple" - [[fruit]] - name = "banana" - - # table - [database] - server = "192.168.1.1" - connection_max = 5000 - enabled = true - - [pool] - [pool.breaker] - xxx = "xxx" - ` - m := paladin.Map{} - assert.Nil(t, m.Set(s), s) - str, err := m.Get("text").String() - assert.Nil(t, err) - assert.Equal(t, str, "hello", "text") - n, err := m.Get("number").Int64() - assert.Nil(t, err) - assert.Equal(t, n, int64(100), "number") - p, err := m.Get("point").Float64() - assert.Nil(t, err) - assert.Equal(t, p, 100.1, "point") - b, err := m.Get("boolean").Bool() - assert.Nil(t, err) - assert.Equal(t, b, true, "boolean") - // key lower case - lb, err := m.Get("Boolean").Bool() - assert.Nil(t, err) - assert.Equal(t, lb, true, "boolean") - lt, err := m.Get("KeyCase").String() - assert.Nil(t, err) - assert.Equal(t, lt, "test", "key case") - var sliceInt []int64 - err = m.Get("numbers").Slice(&sliceInt) - assert.Nil(t, err) - assert.Equal(t, sliceInt, []int64{1, 2, 3}) - var sliceStr []string - err = m.Get("strings").Slice(&sliceStr) - assert.Nil(t, err) - assert.Equal(t, []string{"a", "b", "c"}, sliceStr) - err = m.Get("strings").Slice(&sliceStr) - assert.Nil(t, err) - assert.Equal(t, []string{"a", "b", "c"}, sliceStr) - // errors - err = m.Get("strings").Slice(sliceInt) - assert.NotNil(t, err) - err = m.Get("strings").Slice(&sliceInt) - assert.NotNil(t, err) - var obj struct { - Name string - } - err = m.Get("strings").Slice(obj) - assert.NotNil(t, err) - err = m.Get("strings").Slice(&obj) - assert.NotNil(t, err) -} diff --git a/pkg/conf/paladin/mock.go b/pkg/conf/paladin/mock.go deleted file mode 100644 index 4e705c1de..000000000 --- a/pkg/conf/paladin/mock.go +++ /dev/null @@ -1,40 +0,0 @@ -package paladin - -import ( - "context" -) - -var _ Client = &Mock{} - -// Mock is Mock config client. -type Mock struct { - C chan Event - *Map -} - -// NewMock new a config mock client. -func NewMock(vs map[string]string) Client { - values := make(map[string]*Value, len(vs)) - for k, v := range vs { - values[k] = &Value{val: v, raw: v} - } - m := new(Map) - m.Store(values) - return &Mock{Map: m, C: make(chan Event)} -} - -// GetAll return value map. -func (m *Mock) GetAll() *Map { - return m.Map -} - -// WatchEvent watch multi key. -func (m *Mock) WatchEvent(ctx context.Context, key ...string) <-chan Event { - return m.C -} - -// Close close watcher. -func (m *Mock) Close() error { - close(m.C) - return nil -} diff --git a/pkg/conf/paladin/mock_test.go b/pkg/conf/paladin/mock_test.go deleted file mode 100644 index 09785d716..000000000 --- a/pkg/conf/paladin/mock_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package paladin_test - -import ( - "testing" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - - "github.com/stretchr/testify/assert" -) - -func TestMock(t *testing.T) { - cs := map[string]string{ - "key_toml": ` - key_bool = true - key_int = 100 - key_float = 100.1 - key_string = "text" - `, - } - cli := paladin.NewMock(cs) - // test vlaue - var m paladin.TOML - err := cli.Get("key_toml").Unmarshal(&m) - assert.Nil(t, err) - b, err := m.Get("key_bool").Bool() - assert.Nil(t, err) - assert.Equal(t, b, true) - i, err := m.Get("key_int").Int64() - assert.Nil(t, err) - assert.Equal(t, i, int64(100)) - f, err := m.Get("key_float").Float64() - assert.Nil(t, err) - assert.Equal(t, f, float64(100.1)) - s, err := m.Get("key_string").String() - assert.Nil(t, err) - assert.Equal(t, s, "text") -} diff --git a/pkg/conf/paladin/register.go b/pkg/conf/paladin/register.go deleted file mode 100644 index 400497745..000000000 --- a/pkg/conf/paladin/register.go +++ /dev/null @@ -1,55 +0,0 @@ -package paladin - -import ( - "fmt" - "sort" - "sync" -) - -var ( - driversMu sync.RWMutex - drivers = make(map[string]Driver) -) - -// Register makes a paladin driver available by the provided name. -// If Register is called twice with the same name or if driver is nil, -// it panics. -func Register(name string, driver Driver) { - driversMu.Lock() - defer driversMu.Unlock() - - if driver == nil { - panic("paladin: driver is nil") - } - - if _, dup := drivers[name]; dup { - panic("paladin: Register called twice for driver " + name) - } - - drivers[name] = driver -} - -// Drivers returns a sorted list of the names of the registered paladin driver. -func Drivers() []string { - driversMu.RLock() - defer driversMu.RUnlock() - - var list []string - for name := range drivers { - list = append(list, name) - } - - sort.Strings(list) - return list -} - -// GetDriver returns a driver implement by name. -func GetDriver(name string) (Driver, error) { - driversMu.RLock() - driveri, ok := drivers[name] - driversMu.RUnlock() - if !ok { - return nil, fmt.Errorf("paladin: unknown driver %q (forgotten import?)", name) - } - return driveri, nil -} diff --git a/pkg/conf/paladin/toml.go b/pkg/conf/paladin/toml.go deleted file mode 100644 index 09595fb0b..000000000 --- a/pkg/conf/paladin/toml.go +++ /dev/null @@ -1,73 +0,0 @@ -package paladin - -import ( - "bytes" - "reflect" - "strconv" - - "github.com/BurntSushi/toml" - "github.com/pkg/errors" -) - -// TOML is toml map. -type TOML = Map - -// Set set the map by value. -func (m *TOML) Set(text string) error { - if err := m.UnmarshalText([]byte(text)); err != nil { - return err - } - return nil -} - -// UnmarshalText implemented toml. -func (m *TOML) UnmarshalText(text []byte) error { - raws := map[string]interface{}{} - if err := toml.Unmarshal(text, &raws); err != nil { - return err - } - values := map[string]*Value{} - for k, v := range raws { - k = KeyNamed(k) - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Map: - buf := bytes.NewBuffer(nil) - err := toml.NewEncoder(buf).Encode(v) - // b, err := toml.Marshal(v) - if err != nil { - return err - } - // NOTE: value is map[string]interface{} - values[k] = &Value{val: v, raw: buf.String()} - case reflect.Slice: - raw := map[string]interface{}{ - k: v, - } - buf := bytes.NewBuffer(nil) - err := toml.NewEncoder(buf).Encode(raw) - // b, err := toml.Marshal(raw) - if err != nil { - return err - } - // NOTE: value is []interface{} - values[k] = &Value{val: v, raw: buf.String()} - case reflect.Bool: - b := v.(bool) - values[k] = &Value{val: b, raw: strconv.FormatBool(b)} - case reflect.Int64: - i := v.(int64) - values[k] = &Value{val: i, raw: strconv.FormatInt(i, 10)} - case reflect.Float64: - f := v.(float64) - values[k] = &Value{val: f, raw: strconv.FormatFloat(f, 'f', -1, 64)} - case reflect.String: - s := v.(string) - values[k] = &Value{val: s, raw: s} - default: - return errors.Errorf("UnmarshalTOML: unknown kind(%v)", rv.Kind()) - } - } - m.Store(values) - return nil -} diff --git a/pkg/conf/paladin/value.go b/pkg/conf/paladin/value.go deleted file mode 100644 index 733db7df7..000000000 --- a/pkg/conf/paladin/value.go +++ /dev/null @@ -1,185 +0,0 @@ -package paladin - -import ( - "encoding" - "encoding/json" - "reflect" - "time" - - "github.com/BurntSushi/toml" - "github.com/pkg/errors" - yaml "gopkg.in/yaml.v2" -) - -// ErrNotExist value key not exist. -var ( - ErrNotExist = errors.New("paladin: value key not exist") - ErrTypeAssertion = errors.New("paladin: value type assertion no match") - ErrDifferentTypes = errors.New("paladin: value different types") -) - -// Value is config value, maybe a json/toml/ini/string file. -type Value struct { - val interface{} - slice interface{} - raw string -} - -// NewValue new a value -func NewValue(val interface{}, raw string) *Value { - return &Value{ - val: val, - raw: raw, - } -} - -// Bool return bool value. -func (v *Value) Bool() (bool, error) { - if v.val == nil { - return false, ErrNotExist - } - b, ok := v.val.(bool) - if !ok { - return false, ErrTypeAssertion - } - return b, nil -} - -// Int return int value. -func (v *Value) Int() (int, error) { - i, err := v.Int64() - return int(i), err -} - -// Int32 return int32 value. -func (v *Value) Int32() (int32, error) { - i, err := v.Int64() - return int32(i), err -} - -// Int64 return int64 value. -func (v *Value) Int64() (int64, error) { - if v.val == nil { - return 0, ErrNotExist - } - i, ok := v.val.(int64) - if !ok { - return 0, ErrTypeAssertion - } - return i, nil -} - -// Float32 return float32 value. -func (v *Value) Float32() (float32, error) { - f, err := v.Float64() - if err != nil { - return 0.0, err - } - return float32(f), nil -} - -// Float64 return float64 value. -func (v *Value) Float64() (float64, error) { - if v.val == nil { - return 0.0, ErrNotExist - } - f, ok := v.val.(float64) - if !ok { - return 0.0, ErrTypeAssertion - } - return f, nil -} - -// String return string value. -func (v *Value) String() (string, error) { - if v.val == nil { - return "", ErrNotExist - } - s, ok := v.val.(string) - if !ok { - return "", ErrTypeAssertion - } - return s, nil -} - -// Duration parses a duration string. A duration string is a possibly signed sequence of decimal numbers -// each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -func (v *Value) Duration() (time.Duration, error) { - s, err := v.String() - if err != nil { - return time.Duration(0), err - } - return time.ParseDuration(s) -} - -// Raw return raw value. -func (v *Value) Raw() (string, error) { - if v.val == nil { - return "", ErrNotExist - } - return v.raw, nil -} - -// Slice scan a slice interface, if slice has element it will be discard. -func (v *Value) Slice(dst interface{}) error { - // NOTE: val is []interface{}, slice is []type - if v.val == nil { - return ErrNotExist - } - rv := reflect.ValueOf(dst) - if rv.Kind() != reflect.Ptr || rv.Elem().Kind() != reflect.Slice { - return ErrDifferentTypes - } - el := rv.Elem() - // reset slice len to 0. - el.SetLen(0) - kind := el.Type().Elem().Kind() - src, ok := v.val.([]interface{}) - if !ok { - return ErrDifferentTypes - } - for _, s := range src { - if reflect.TypeOf(s).Kind() != kind { - return ErrTypeAssertion - } - el = reflect.Append(el, reflect.ValueOf(s)) - } - rv.Elem().Set(el) - return nil -} - -// Unmarshal is the interface implemented by an object that can unmarshal a textual representation of itself. -func (v *Value) Unmarshal(un encoding.TextUnmarshaler) error { - text, err := v.Raw() - if err != nil { - return err - } - return un.UnmarshalText([]byte(text)) -} - -// UnmarshalTOML unmarhsal toml to struct. -func (v *Value) UnmarshalTOML(dst interface{}) error { - text, err := v.Raw() - if err != nil { - return err - } - return toml.Unmarshal([]byte(text), dst) -} - -// UnmarshalJSON unmarhsal json to struct. -func (v *Value) UnmarshalJSON(dst interface{}) error { - text, err := v.Raw() - if err != nil { - return err - } - return json.Unmarshal([]byte(text), dst) -} - -// UnmarshalYAML unmarshal yaml to struct. -func (v *Value) UnmarshalYAML(dst interface{}) error { - text, err := v.Raw() - if err != nil { - return err - } - return yaml.Unmarshal([]byte(text), dst) -} diff --git a/pkg/conf/paladin/value_test.go b/pkg/conf/paladin/value_test.go deleted file mode 100644 index cdcdf4b27..000000000 --- a/pkg/conf/paladin/value_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package paladin - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type testUnmarshler struct { - Text string - Int int -} - -func TestValueUnmarshal(t *testing.T) { - s := ` - int = 100 - text = "hello" - ` - v := Value{val: s, raw: s} - obj := new(testUnmarshler) - assert.Nil(t, v.UnmarshalTOML(obj)) - // error - v = Value{val: nil, raw: ""} - assert.NotNil(t, v.UnmarshalTOML(obj)) -} - -func TestValue(t *testing.T) { - var tests = []struct { - in interface{} - out interface{} - }{ - { - "text", - "text", - }, - { - time.Second * 10, - "10s", - }, - { - int64(100), - int64(100), - }, - { - float64(100.1), - float64(100.1), - }, - { - true, - true, - }, - { - nil, - nil, - }, - } - for _, test := range tests { - t.Run(fmt.Sprint(test.in), func(t *testing.T) { - v := Value{val: test.in, raw: fmt.Sprint(test.in)} - switch test.in.(type) { - case nil: - s, err := v.String() - assert.NotNil(t, err) - assert.Equal(t, s, "", test.in) - i, err := v.Int64() - assert.NotNil(t, err) - assert.Equal(t, i, int64(0), test.in) - f, err := v.Float64() - assert.NotNil(t, err) - assert.Equal(t, f, float64(0.0), test.in) - b, err := v.Bool() - assert.NotNil(t, err) - assert.Equal(t, b, false, test.in) - case string: - val, err := v.String() - assert.Nil(t, err) - assert.Equal(t, val, test.out.(string), test.in) - case int64: - val, err := v.Int() - assert.Nil(t, err) - assert.Equal(t, val, int(test.out.(int64)), test.in) - val32, err := v.Int32() - assert.Nil(t, err) - assert.Equal(t, val32, int32(test.out.(int64)), test.in) - val64, err := v.Int64() - assert.Nil(t, err) - assert.Equal(t, val64, test.out.(int64), test.in) - case float64: - val32, err := v.Float32() - assert.Nil(t, err) - assert.Equal(t, val32, float32(test.out.(float64)), test.in) - val64, err := v.Float64() - assert.Nil(t, err) - assert.Equal(t, val64, test.out.(float64), test.in) - case bool: - val, err := v.Bool() - assert.Nil(t, err) - assert.Equal(t, val, test.out.(bool), test.in) - case time.Duration: - v.val = test.out - val, err := v.Duration() - assert.Nil(t, err) - assert.Equal(t, val, test.in.(time.Duration), test.out) - } - }) - } -} - -func TestValueSlice(t *testing.T) { - var tests = []struct { - in interface{} - out interface{} - }{ - { - nil, - nil, - }, - { - []interface{}{"a", "b", "c"}, - []string{"a", "b", "c"}, - }, - { - []interface{}{1, 2, 3}, - []int64{1, 2, 3}, - }, - { - []interface{}{1.1, 1.2, 1.3}, - []float64{1.1, 1.2, 1.3}, - }, - { - []interface{}{true, false, true}, - []bool{true, false, true}, - }, - } - for _, test := range tests { - t.Run(fmt.Sprint(test.in), func(t *testing.T) { - v := Value{val: test.in, raw: fmt.Sprint(test.in)} - switch test.in.(type) { - case nil: - var s []string - assert.NotNil(t, v.Slice(&s)) - case []string: - var s []string - assert.Nil(t, v.Slice(&s)) - assert.Equal(t, s, test.out) - case []int64: - var s []int64 - assert.Nil(t, v.Slice(&s)) - assert.Equal(t, s, test.out) - case []float64: - var s []float64 - assert.Nil(t, v.Slice(&s)) - assert.Equal(t, s, test.out) - case []bool: - var s []bool - assert.Nil(t, v.Slice(&s)) - assert.Equal(t, s, test.out) - } - }) - } -} - -func BenchmarkValueInt(b *testing.B) { - v := &Value{val: int64(100), raw: "100"} - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - v.Int64() - } - }) -} -func BenchmarkValueFloat(b *testing.B) { - v := &Value{val: float64(100.1), raw: "100.1"} - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - v.Float64() - } - }) -} -func BenchmarkValueBool(b *testing.B) { - v := &Value{val: true, raw: "true"} - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - v.Bool() - } - }) -} -func BenchmarkValueString(b *testing.B) { - v := &Value{val: "text", raw: "text"} - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - v.String() - } - }) -} - -func BenchmarkValueSlice(b *testing.B) { - v := &Value{val: []interface{}{1, 2, 3}, raw: "100"} - b.RunParallel(func(pb *testing.PB) { - var slice []int64 - for pb.Next() { - v.Slice(&slice) - } - }) -} diff --git a/pkg/container/group/README.md b/pkg/container/group/README.md deleted file mode 100644 index 33d48ba01..000000000 --- a/pkg/container/group/README.md +++ /dev/null @@ -1,12 +0,0 @@ -#### group - -##### 项目简介 - -懒加载对象容器 - -##### 编译环境 - -- **推荐 Golang v1.12.1 以上版本编译执行** - -##### 依赖包 - diff --git a/pkg/container/group/example_test.go b/pkg/container/group/example_test.go deleted file mode 100644 index 9f52fb809..000000000 --- a/pkg/container/group/example_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package group - -import "fmt" - -type Counter struct { - Value int -} - -func (c *Counter) Incr() { - c.Value++ -} - -func ExampleGroup_Get() { - new := func() interface{} { - fmt.Println("Only Once") - return &Counter{} - } - group := NewGroup(new) - - // Create a new Counter - group.Get("pass").(*Counter).Incr() - - // Get the created Counter again. - group.Get("pass").(*Counter).Incr() - // Output: - // Only Once -} - -func ExampleGroup_Reset() { - new := func() interface{} { - return &Counter{} - } - group := NewGroup(new) - - newV2 := func() interface{} { - fmt.Println("New V2") - return &Counter{} - } - // Reset the new function and clear all created objects. - group.Reset(newV2) - - // Create a new Counter - group.Get("pass").(*Counter).Incr() - // Output: - // New V2 -} diff --git a/pkg/container/group/group.go b/pkg/container/group/group.go deleted file mode 100644 index ad0324df0..000000000 --- a/pkg/container/group/group.go +++ /dev/null @@ -1,64 +0,0 @@ -// Package group provides a sample lazy load container. -// The group only creating a new object not until the object is needed by user. -// And it will cache all the objects to reduce the creation of object. -package group - -import "sync" - -// Group is a lazy load container. -type Group struct { - new func() interface{} - objs map[string]interface{} - sync.RWMutex -} - -// NewGroup news a group container. -func NewGroup(new func() interface{}) *Group { - if new == nil { - panic("container.group: can't assign a nil to the new function") - } - return &Group{ - new: new, - objs: make(map[string]interface{}), - } -} - -// Get gets the object by the given key. -func (g *Group) Get(key string) interface{} { - g.RLock() - obj, ok := g.objs[key] - if ok { - g.RUnlock() - return obj - } - g.RUnlock() - - // double check - g.Lock() - defer g.Unlock() - obj, ok = g.objs[key] - if ok { - return obj - } - obj = g.new() - g.objs[key] = obj - return obj -} - -// Reset resets the new function and deletes all existing objects. -func (g *Group) Reset(new func() interface{}) { - if new == nil { - panic("container.group: can't assign a nil to the new function") - } - g.Lock() - g.new = new - g.Unlock() - g.Clear() -} - -// Clear deletes all objects. -func (g *Group) Clear() { - g.Lock() - g.objs = make(map[string]interface{}) - g.Unlock() -} diff --git a/pkg/container/group/group_test.go b/pkg/container/group/group_test.go deleted file mode 100644 index 50464dbe1..000000000 --- a/pkg/container/group/group_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package group - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGroupGet(t *testing.T) { - count := 0 - g := NewGroup(func() interface{} { - count++ - return count - }) - v := g.Get("/x/internal/dummy/user") - assert.Equal(t, 1, v.(int)) - - v = g.Get("/x/internal/dummy/avatar") - assert.Equal(t, 2, v.(int)) - - v = g.Get("/x/internal/dummy/user") - assert.Equal(t, 1, v.(int)) - assert.Equal(t, 2, count) -} - -func TestGroupReset(t *testing.T) { - g := NewGroup(func() interface{} { - return 1 - }) - g.Get("/x/internal/dummy/user") - call := false - g.Reset(func() interface{} { - call = true - return 1 - }) - - length := 0 - for range g.objs { - length++ - } - - assert.Equal(t, 0, length) - - g.Get("/x/internal/dummy/user") - assert.Equal(t, true, call) -} - -func TestGroupClear(t *testing.T) { - g := NewGroup(func() interface{} { - return 1 - }) - g.Get("/x/internal/dummy/user") - length := 0 - for range g.objs { - length++ - } - assert.Equal(t, 1, length) - - g.Clear() - length = 0 - for range g.objs { - length++ - } - assert.Equal(t, 0, length) -} diff --git a/pkg/container/pool/README.md b/pkg/container/pool/README.md deleted file mode 100644 index 05d5198f9..000000000 --- a/pkg/container/pool/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# pool - -## 项目简介 - -通用连接池实现 diff --git a/pkg/container/pool/list.go b/pkg/container/pool/list.go deleted file mode 100644 index 7155953e6..000000000 --- a/pkg/container/pool/list.go +++ /dev/null @@ -1,226 +0,0 @@ -package pool - -import ( - "container/list" - "context" - "io" - "sync" - "time" -) - -var _ Pool = &List{} - -// List . -type List struct { - // New is an application supplied function for creating and configuring a - // item. - // - // The item returned from new must not be in a special state - // (subscribed to pubsub channel, transaction started, ...). - New func(ctx context.Context) (io.Closer, error) - - // mu protects fields defined below. - mu sync.Mutex - cond chan struct{} - closed bool - active int - // clean stale items - cleanerCh chan struct{} - - // Stack of item with most recently used at the front. - idles list.List - - // Config pool configuration - conf *Config -} - -// NewList creates a new pool. -func NewList(c *Config) *List { - // check Config - if c == nil || c.Active < c.Idle { - panic("config nil or Idle Must <= Active") - } - // new pool - p := &List{conf: c} - p.cond = make(chan struct{}) - p.startCleanerLocked(time.Duration(c.IdleTimeout)) - return p -} - -// Reload reload config. -func (p *List) Reload(c *Config) error { - p.mu.Lock() - p.conf = c - p.mu.Unlock() - return nil -} - -// startCleanerLocked -func (p *List) startCleanerLocked(d time.Duration) { - if d <= 0 { - // if set 0, staleCleaner() will return directly - return - } - if d < time.Duration(p.conf.IdleTimeout) && p.cleanerCh != nil { - select { - case p.cleanerCh <- struct{}{}: - default: - } - } - // run only one, clean stale items. - if p.cleanerCh == nil { - p.cleanerCh = make(chan struct{}, 1) - go p.staleCleaner() - } -} - -// staleCleaner clean stale items proc. -func (p *List) staleCleaner() { - ticker := time.NewTicker(100 * time.Millisecond) - for { - select { - case <-ticker.C: - case <-p.cleanerCh: // maxLifetime was changed or db was closed. - } - p.mu.Lock() - if p.closed || p.conf.IdleTimeout <= 0 { - p.mu.Unlock() - return - } - for i, n := 0, p.idles.Len(); i < n; i++ { - e := p.idles.Back() - if e == nil { - // no possible - break - } - ic := e.Value.(item) - if !ic.expired(time.Duration(p.conf.IdleTimeout)) { - // not need continue. - break - } - p.idles.Remove(e) - p.release() - p.mu.Unlock() - ic.c.Close() - p.mu.Lock() - } - p.mu.Unlock() - } -} - -// Get returns a item from the idles List or -// get a new item. -func (p *List) Get(ctx context.Context) (io.Closer, error) { - p.mu.Lock() - if p.closed { - p.mu.Unlock() - return nil, ErrPoolClosed - } - for { - // get idles item. - for i, n := 0, p.idles.Len(); i < n; i++ { - e := p.idles.Front() - if e == nil { - break - } - ic := e.Value.(item) - p.idles.Remove(e) - p.mu.Unlock() - if !ic.expired(time.Duration(p.conf.IdleTimeout)) { - return ic.c, nil - } - ic.c.Close() - p.mu.Lock() - p.release() - } - // Check for pool closed before dialing a new item. - if p.closed { - p.mu.Unlock() - return nil, ErrPoolClosed - } - // new item if under limit. - if p.conf.Active == 0 || p.active < p.conf.Active { - newItem := p.New - p.active++ - p.mu.Unlock() - c, err := newItem(ctx) - if err != nil { - p.mu.Lock() - p.release() - p.mu.Unlock() - c = nil - } - return c, err - } - if p.conf.WaitTimeout == 0 && !p.conf.Wait { - p.mu.Unlock() - return nil, ErrPoolExhausted - } - wt := p.conf.WaitTimeout - p.mu.Unlock() - - // slowpath: reset context timeout - nctx := ctx - cancel := func() {} - if wt > 0 { - _, nctx, cancel = wt.Shrink(ctx) - } - select { - case <-nctx.Done(): - cancel() - return nil, nctx.Err() - case <-p.cond: - } - cancel() - p.mu.Lock() - } -} - -// Put put item into pool. -func (p *List) Put(ctx context.Context, c io.Closer, forceClose bool) error { - p.mu.Lock() - if !p.closed && !forceClose { - p.idles.PushFront(item{createdAt: nowFunc(), c: c}) - if p.idles.Len() > p.conf.Idle { - c = p.idles.Remove(p.idles.Back()).(item).c - } else { - c = nil - } - } - if c == nil { - p.signal() - p.mu.Unlock() - return nil - } - p.release() - p.mu.Unlock() - return c.Close() -} - -// Close releases the resources used by the pool. -func (p *List) Close() error { - p.mu.Lock() - idles := p.idles - p.idles.Init() - p.closed = true - p.active -= idles.Len() - p.mu.Unlock() - for e := idles.Front(); e != nil; e = e.Next() { - e.Value.(item).c.Close() - } - return nil -} - -// release decrements the active count and signals waiters. The caller must -// hold p.mu during the call. -func (p *List) release() { - p.active-- - p.signal() -} - -func (p *List) signal() { - select { - default: - case p.cond <- struct{}{}: - } -} diff --git a/pkg/container/pool/list_test.go b/pkg/container/pool/list_test.go deleted file mode 100644 index 286e547b8..000000000 --- a/pkg/container/pool/list_test.go +++ /dev/null @@ -1,322 +0,0 @@ -package pool - -import ( - "context" - "io" - "testing" - "time" - - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/stretchr/testify/assert" -) - -func TestListGetPut(t *testing.T) { - // new pool - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - // test Get Put - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - c1 := connection{pool: pool, c: conn} - c1.HandleNormal() - c1.Close() -} - -func TestListPut(t *testing.T) { - var id = 0 - type connID struct { - io.Closer - id int - } - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(1 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - id = id + 1 - return &connID{id: id, Closer: &closer{}}, nil - } - // test Put(ctx, conn, true) - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - conn1 := conn.(*connID) - // Put(ctx, conn, true) drop the connection. - pool.Put(context.TODO(), conn, true) - conn, err = pool.Get(context.TODO()) - assert.Nil(t, err) - conn2 := conn.(*connID) - assert.NotEqual(t, conn1.id, conn2.id) -} - -func TestListIdleTimeout(t *testing.T) { - var id = 0 - type connID struct { - io.Closer - id int - } - config := &Config{ - Active: 1, - Idle: 1, - // conn timeout - IdleTimeout: xtime.Duration(1 * time.Millisecond), - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - id = id + 1 - return &connID{id: id, Closer: &closer{}}, nil - } - // test Put(ctx, conn, true) - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - conn1 := conn.(*connID) - // Put(ctx, conn, true) drop the connection. - pool.Put(context.TODO(), conn, false) - time.Sleep(5 * time.Millisecond) - // idletimeout and get new conn - conn, err = pool.Get(context.TODO()) - assert.Nil(t, err) - conn2 := conn.(*connID) - assert.NotEqual(t, conn1.id, conn2.id) -} - -func TestListContextTimeout(t *testing.T) { - // new pool - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - // test context timeout - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - conn, err := pool.Get(ctx) - assert.Nil(t, err) - _, err = pool.Get(ctx) - // context timeout error - assert.NotNil(t, err) - pool.Put(context.TODO(), conn, false) - _, err = pool.Get(ctx) - assert.Nil(t, err) -} - -func TestListPoolExhausted(t *testing.T) { - // test pool exhausted - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(90 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - _, err = pool.Get(ctx) - // config active == 1, so no available conns make connection exhausted. - assert.NotNil(t, err) - pool.Put(context.TODO(), conn, false) - _, err = pool.Get(ctx) - assert.Nil(t, err) -} - -func TestListStaleClean(t *testing.T) { - var id = 0 - type connID struct { - io.Closer - id int - } - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(1 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - id = id + 1 - return &connID{id: id, Closer: &closer{}}, nil - } - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - conn1 := conn.(*connID) - pool.Put(context.TODO(), conn, false) - conn, err = pool.Get(context.TODO()) - assert.Nil(t, err) - conn2 := conn.(*connID) - assert.Equal(t, conn1.id, conn2.id) - pool.Put(context.TODO(), conn, false) - // sleep more than idleTimeout - time.Sleep(2 * time.Second) - conn, err = pool.Get(context.TODO()) - assert.Nil(t, err) - conn3 := conn.(*connID) - assert.NotEqual(t, conn1.id, conn3.id) -} - -func BenchmarkList1(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleQuick() - c1.Close() - } - }) -} - -func BenchmarkList2(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleNormal() - c1.Close() - } - }) -} - -func BenchmarkPool3(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleSlow() - c1.Close() - } - }) -} - -func BenchmarkList4(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleSlow() - c1.Close() - } - }) -} - -func BenchmarkList5(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: true, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleSlow() - c1.Close() - } - }) -} diff --git a/pkg/container/pool/pool.go b/pkg/container/pool/pool.go deleted file mode 100644 index 9aec0697c..000000000 --- a/pkg/container/pool/pool.go +++ /dev/null @@ -1,62 +0,0 @@ -package pool - -import ( - "context" - "errors" - "io" - "time" - - xtime "github.com/go-kratos/kratos/pkg/time" -) - -var ( - // ErrPoolExhausted connections are exhausted. - ErrPoolExhausted = errors.New("container/pool exhausted") - // ErrPoolClosed connection pool is closed. - ErrPoolClosed = errors.New("container/pool closed") - - // nowFunc returns the current time; it's overridden in tests. - nowFunc = time.Now -) - -// Config is the pool configuration struct. -type Config struct { - // Active number of items allocated by the pool at a given time. - // When zero, there is no limit on the number of items in the pool. - Active int - // Idle number of idle items in the pool. - Idle int - // Close items after remaining item for this duration. If the value - // is zero, then item items are not closed. Applications should set - // the timeout to a value less than the server's timeout. - IdleTimeout xtime.Duration - // If WaitTimeout is set and the pool is at the Active limit, then Get() waits WatiTimeout - // until a item to be returned to the pool before returning. - WaitTimeout xtime.Duration - // If WaitTimeout is not set, then Wait effects. - // if Wait is set true, then wait until ctx timeout, or default flase and return directly. - Wait bool -} - -type item struct { - createdAt time.Time - c io.Closer -} - -func (i *item) expired(timeout time.Duration) bool { - if timeout <= 0 { - return false - } - return i.createdAt.Add(timeout).Before(nowFunc()) -} - -func (i *item) close() error { - return i.c.Close() -} - -// Pool interface. -type Pool interface { - Get(ctx context.Context) (io.Closer, error) - Put(ctx context.Context, c io.Closer, forceClose bool) error - Close() error -} diff --git a/pkg/container/pool/slice.go b/pkg/container/pool/slice.go deleted file mode 100644 index 512ece39b..000000000 --- a/pkg/container/pool/slice.go +++ /dev/null @@ -1,418 +0,0 @@ -package pool - -import ( - "context" - "io" - "sync" - "time" -) - -var _ Pool = &Slice{} - -// Slice . -type Slice struct { - // New is an application supplied function for creating and configuring a - // item. - // - // The item returned from new must not be in a special state - // (subscribed to pubsub channel, transaction started, ...). - New func(ctx context.Context) (io.Closer, error) - stop func() // stop cancels the item opener. - - // mu protects fields defined below. - mu sync.Mutex - freeItem []*item - itemRequests map[uint64]chan item - nextRequest uint64 // Next key to use in itemRequests. - active int // number of opened and pending open items - // Used to signal the need for new items - // a goroutine running itemOpener() reads on this chan and - // maybeOpenNewItems sends on the chan (one send per needed item) - // It is closed during db.Close(). The close tells the itemOpener - // goroutine to exit. - openerCh chan struct{} - closed bool - cleanerCh chan struct{} - - // Config pool configuration - conf *Config -} - -// NewSlice creates a new pool. -func NewSlice(c *Config) *Slice { - // check Config - if c == nil || c.Active < c.Idle { - panic("config nil or Idle Must <= Active") - } - ctx, cancel := context.WithCancel(context.Background()) - // new pool - p := &Slice{ - conf: c, - stop: cancel, - itemRequests: make(map[uint64]chan item), - openerCh: make(chan struct{}, 1000000), - } - p.startCleanerLocked(time.Duration(c.IdleTimeout)) - - go p.itemOpener(ctx) - return p -} - -// Reload reload config. -func (p *Slice) Reload(c *Config) error { - p.mu.Lock() - p.setActive(c.Active) - p.setIdle(c.Idle) - p.conf = c - p.mu.Unlock() - return nil -} - -// Get returns a newly-opened or cached *item. -func (p *Slice) Get(ctx context.Context) (io.Closer, error) { - p.mu.Lock() - if p.closed { - p.mu.Unlock() - return nil, ErrPoolClosed - } - idleTimeout := time.Duration(p.conf.IdleTimeout) - // Prefer a free item, if possible. - numFree := len(p.freeItem) - for numFree > 0 { - i := p.freeItem[0] - copy(p.freeItem, p.freeItem[1:]) - p.freeItem = p.freeItem[:numFree-1] - p.mu.Unlock() - if i.expired(idleTimeout) { - i.close() - p.mu.Lock() - p.release() - } else { - return i.c, nil - } - numFree = len(p.freeItem) - } - - // Out of free items or we were asked not to use one. If we're not - // allowed to open any more items, make a request and wait. - if p.conf.Active > 0 && p.active >= p.conf.Active { - // check WaitTimeout and return directly - if p.conf.WaitTimeout == 0 && !p.conf.Wait { - p.mu.Unlock() - return nil, ErrPoolExhausted - } - // Make the item channel. It's buffered so that the - // itemOpener doesn't block while waiting for the req to be read. - req := make(chan item, 1) - reqKey := p.nextRequestKeyLocked() - p.itemRequests[reqKey] = req - wt := p.conf.WaitTimeout - p.mu.Unlock() - - // reset context timeout - if wt > 0 { - var cancel func() - _, ctx, cancel = wt.Shrink(ctx) - defer cancel() - } - // Timeout the item request with the context. - select { - case <-ctx.Done(): - // Remove the item request and ensure no value has been sent - // on it after removing. - p.mu.Lock() - delete(p.itemRequests, reqKey) - p.mu.Unlock() - return nil, ctx.Err() - case ret, ok := <-req: - if !ok { - return nil, ErrPoolClosed - } - if ret.expired(idleTimeout) { - ret.close() - p.mu.Lock() - p.release() - } else { - return ret.c, nil - } - } - } - - p.active++ // optimistically - p.mu.Unlock() - c, err := p.New(ctx) - if err != nil { - p.mu.Lock() - p.release() - p.mu.Unlock() - return nil, err - } - return c, nil -} - -// Put adds a item to the p's free pool. -// err is optionally the last error that occurred on this item. -func (p *Slice) Put(ctx context.Context, c io.Closer, forceClose bool) error { - p.mu.Lock() - defer p.mu.Unlock() - if forceClose { - p.release() - return c.Close() - } - added := p.putItemLocked(c) - if !added { - p.active-- - return c.Close() - } - return nil -} - -// Satisfy a item or put the item in the idle pool and return true -// or return false. -// putItemLocked will satisfy a item if there is one, or it will -// return the *item to the freeItem list if err == nil and the idle -// item limit will not be exceeded. -// If err != nil, the value of i is ignored. -// If err == nil, then i must not equal nil. -// If a item was fulfilled or the *item was placed in the -// freeItem list, then true is returned, otherwise false is returned. -func (p *Slice) putItemLocked(c io.Closer) bool { - if p.closed { - return false - } - if p.conf.Active > 0 && p.active > p.conf.Active { - return false - } - i := item{ - c: c, - createdAt: nowFunc(), - } - if l := len(p.itemRequests); l > 0 { - var req chan item - var reqKey uint64 - for reqKey, req = range p.itemRequests { - break - } - delete(p.itemRequests, reqKey) // Remove from pending requests. - req <- i - return true - } else if !p.closed && p.maxIdleItemsLocked() > len(p.freeItem) { - p.freeItem = append(p.freeItem, &i) - return true - } - return false -} - -// Runs in a separate goroutine, opens new item when requested. -func (p *Slice) itemOpener(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case <-p.openerCh: - p.openNewItem(ctx) - } - } -} - -func (p *Slice) maybeOpenNewItems() { - numRequests := len(p.itemRequests) - if p.conf.Active > 0 { - numCanOpen := p.conf.Active - p.active - if numRequests > numCanOpen { - numRequests = numCanOpen - } - } - for numRequests > 0 { - p.active++ // optimistically - numRequests-- - if p.closed { - return - } - p.openerCh <- struct{}{} - } -} - -// openNewItem one new item -func (p *Slice) openNewItem(ctx context.Context) { - // maybeOpenNewConnctions has already executed p.active++ before it sent - // on p.openerCh. This function must execute p.active-- if the - // item fails or is closed before returning. - c, err := p.New(ctx) - p.mu.Lock() - defer p.mu.Unlock() - if err != nil { - p.release() - return - } - if !p.putItemLocked(c) { - p.active-- - c.Close() - } -} - -// setIdle sets the maximum number of items in the idle -// item pool. -// -// If MaxOpenConns is greater than 0 but less than the new IdleConns -// then the new IdleConns will be reduced to match the MaxOpenConns limit -// -// If n <= 0, no idle items are retained. -func (p *Slice) setIdle(n int) { - if n > 0 { - p.conf.Idle = n - } else { - // No idle items. - p.conf.Idle = -1 - } - // Make sure maxIdle doesn't exceed maxOpen - if p.conf.Active > 0 && p.maxIdleItemsLocked() > p.conf.Active { - p.conf.Idle = p.conf.Active - } - var closing []*item - idleCount := len(p.freeItem) - maxIdle := p.maxIdleItemsLocked() - if idleCount > maxIdle { - closing = p.freeItem[maxIdle:] - p.freeItem = p.freeItem[:maxIdle] - } - for _, c := range closing { - c.close() - } -} - -// setActive sets the maximum number of open items to the database. -// -// If IdleConns is greater than 0 and the new MaxOpenConns is less than -// IdleConns, then IdleConns will be reduced to match the new -// MaxOpenConns limit -// -// If n <= 0, then there is no limit on the number of open items. -// The default is 0 (unlimited). -func (p *Slice) setActive(n int) { - p.conf.Active = n - if n < 0 { - p.conf.Active = 0 - } - syncIdle := p.conf.Active > 0 && p.maxIdleItemsLocked() > p.conf.Active - if syncIdle { - p.setIdle(n) - } -} - -// startCleanerLocked starts itemCleaner if needed. -func (p *Slice) startCleanerLocked(d time.Duration) { - if d <= 0 { - // if set 0, staleCleaner() will return directly - return - } - if d < time.Duration(p.conf.IdleTimeout) && p.cleanerCh != nil { - select { - case p.cleanerCh <- struct{}{}: - default: - } - } - // run only one, clean stale items. - if p.cleanerCh == nil { - p.cleanerCh = make(chan struct{}, 1) - go p.staleCleaner(time.Duration(p.conf.IdleTimeout)) - } -} - -func (p *Slice) staleCleaner(d time.Duration) { - const minInterval = 100 * time.Millisecond - - if d < minInterval { - d = minInterval - } - t := time.NewTimer(d) - - for { - select { - case <-t.C: - case <-p.cleanerCh: // maxLifetime was changed or db was closed. - } - p.mu.Lock() - d = time.Duration(p.conf.IdleTimeout) - if p.closed || d <= 0 { - p.mu.Unlock() - return - } - - expiredSince := nowFunc().Add(-d) - var closing []*item - for i := 0; i < len(p.freeItem); i++ { - c := p.freeItem[i] - if c.createdAt.Before(expiredSince) { - closing = append(closing, c) - p.active-- - last := len(p.freeItem) - 1 - p.freeItem[i] = p.freeItem[last] - p.freeItem[last] = nil - p.freeItem = p.freeItem[:last] - i-- - } - } - p.mu.Unlock() - - for _, c := range closing { - c.close() - } - - if d < minInterval { - d = minInterval - } - t.Reset(d) - } -} - -// nextRequestKeyLocked returns the next item request key. -// It is assumed that nextRequest will not overflow. -func (p *Slice) nextRequestKeyLocked() uint64 { - next := p.nextRequest - p.nextRequest++ - return next -} - -const defaultIdleItems = 2 - -func (p *Slice) maxIdleItemsLocked() int { - n := p.conf.Idle - switch { - case n == 0: - return defaultIdleItems - case n < 0: - return 0 - default: - return n - } -} - -func (p *Slice) release() { - p.active-- - p.maybeOpenNewItems() -} - -// Close close pool. -func (p *Slice) Close() error { - p.mu.Lock() - if p.closed { - p.mu.Unlock() - return nil - } - if p.cleanerCh != nil { - close(p.cleanerCh) - } - var err error - for _, i := range p.freeItem { - i.close() - } - p.freeItem = nil - p.closed = true - for _, req := range p.itemRequests { - close(req) - } - p.mu.Unlock() - p.stop() - return err -} diff --git a/pkg/container/pool/slice_test.go b/pkg/container/pool/slice_test.go deleted file mode 100644 index b7939203e..000000000 --- a/pkg/container/pool/slice_test.go +++ /dev/null @@ -1,350 +0,0 @@ -package pool - -import ( - "context" - "io" - "testing" - "time" - - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/stretchr/testify/assert" -) - -type closer struct { -} - -func (c *closer) Close() error { - return nil -} - -type connection struct { - c io.Closer - pool Pool -} - -func (c *connection) HandleQuick() { - // time.Sleep(1 * time.Millisecond) -} - -func (c *connection) HandleNormal() { - time.Sleep(20 * time.Millisecond) -} - -func (c *connection) HandleSlow() { - time.Sleep(500 * time.Millisecond) -} - -func (c *connection) Close() { - c.pool.Put(context.Background(), c.c, false) -} - -func TestSliceGetPut(t *testing.T) { - // new pool - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - // test Get Put - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - c1 := connection{pool: pool, c: conn} - c1.HandleNormal() - c1.Close() -} - -func TestSlicePut(t *testing.T) { - var id = 0 - type connID struct { - io.Closer - id int - } - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(1 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - id = id + 1 - return &connID{id: id, Closer: &closer{}}, nil - } - // test Put(ctx, conn, true) - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - conn1 := conn.(*connID) - // Put(ctx, conn, true) drop the connection. - pool.Put(context.TODO(), conn, true) - conn, err = pool.Get(context.TODO()) - assert.Nil(t, err) - conn2 := conn.(*connID) - assert.NotEqual(t, conn1.id, conn2.id) -} - -func TestSliceIdleTimeout(t *testing.T) { - var id = 0 - type connID struct { - io.Closer - id int - } - config := &Config{ - Active: 1, - Idle: 1, - // conn timeout - IdleTimeout: xtime.Duration(1 * time.Millisecond), - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - id = id + 1 - return &connID{id: id, Closer: &closer{}}, nil - } - // test Put(ctx, conn, true) - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - conn1 := conn.(*connID) - // Put(ctx, conn, true) drop the connection. - pool.Put(context.TODO(), conn, false) - time.Sleep(5 * time.Millisecond) - // idletimeout and get new conn - conn, err = pool.Get(context.TODO()) - assert.Nil(t, err) - conn2 := conn.(*connID) - assert.NotEqual(t, conn1.id, conn2.id) -} - -func TestSliceContextTimeout(t *testing.T) { - // new pool - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - // test context timeout - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - conn, err := pool.Get(ctx) - assert.Nil(t, err) - _, err = pool.Get(ctx) - // context timeout error - assert.NotNil(t, err) - pool.Put(context.TODO(), conn, false) - _, err = pool.Get(ctx) - assert.Nil(t, err) -} - -func TestSlicePoolExhausted(t *testing.T) { - // test pool exhausted - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(90 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond) - defer cancel() - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - _, err = pool.Get(ctx) - // config active == 1, so no available conns make connection exhausted. - assert.NotNil(t, err) - pool.Put(context.TODO(), conn, false) - _, err = pool.Get(ctx) - assert.Nil(t, err) -} - -func TestSliceStaleClean(t *testing.T) { - var id = 0 - type connID struct { - io.Closer - id int - } - config := &Config{ - Active: 1, - Idle: 1, - IdleTimeout: xtime.Duration(1 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewList(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - id = id + 1 - return &connID{id: id, Closer: &closer{}}, nil - } - conn, err := pool.Get(context.TODO()) - assert.Nil(t, err) - conn1 := conn.(*connID) - pool.Put(context.TODO(), conn, false) - conn, err = pool.Get(context.TODO()) - assert.Nil(t, err) - conn2 := conn.(*connID) - assert.Equal(t, conn1.id, conn2.id) - pool.Put(context.TODO(), conn, false) - // sleep more than idleTimeout - time.Sleep(2 * time.Second) - conn, err = pool.Get(context.TODO()) - assert.Nil(t, err) - conn3 := conn.(*connID) - assert.NotEqual(t, conn1.id, conn3.id) -} - -func BenchmarkSlice1(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleQuick() - c1.Close() - } - }) -} - -func BenchmarkSlice2(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleNormal() - c1.Close() - } - }) -} - -func BenchmarkSlice3(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleSlow() - c1.Close() - } - }) -} - -func BenchmarkSlice4(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: false, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleSlow() - c1.Close() - } - }) -} - -func BenchmarkSlice5(b *testing.B) { - config := &Config{ - Active: 30, - Idle: 30, - IdleTimeout: xtime.Duration(90 * time.Second), - // WaitTimeout: xtime.Duration(10 * time.Millisecond), - Wait: true, - } - pool := NewSlice(config) - pool.New = func(ctx context.Context) (io.Closer, error) { - return &closer{}, nil - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - conn, err := pool.Get(context.TODO()) - if err != nil { - b.Error(err) - continue - } - c1 := connection{pool: pool, c: conn} - c1.HandleSlow() - c1.Close() - } - }) -} diff --git a/pkg/container/queue/aqm/README.md b/pkg/container/queue/aqm/README.md deleted file mode 100644 index 273de9b8d..000000000 --- a/pkg/container/queue/aqm/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# aqm - -## 项目简介 - -队列管理算法 diff --git a/pkg/container/queue/aqm/codel.go b/pkg/container/queue/aqm/codel.go deleted file mode 100644 index 0b0938bfc..000000000 --- a/pkg/container/queue/aqm/codel.go +++ /dev/null @@ -1,201 +0,0 @@ -package aqm - -import ( - "context" - "math" - "sync" - "time" - - "github.com/go-kratos/kratos/pkg/ecode" -) - -// Config codel config. -type Config struct { - Target int64 // target queue delay (default 20 ms). - Internal int64 // sliding minimum time window width (default 500 ms) - MaxOutstanding int64 //max num of concurrent acquires -} - -// Stat is the Statistics of codel. -type Stat struct { - Dropping bool - FaTime int64 - DropNext int64 - Packets int -} - -type packet struct { - ch chan bool - ts int64 -} - -var defaultConf = &Config{ - Target: 50, - Internal: 500, - MaxOutstanding: 40, -} - -// Queue queue is CoDel req buffer queue. -type Queue struct { - pool sync.Pool - packets chan packet - - mux sync.RWMutex - conf *Config - count int64 - dropping bool // Equal to 1 if in drop state - faTime int64 // Time when we'll declare we're above target (0 if below) - dropNext int64 // Packets dropped since going into drop state - outstanding int64 -} - -// Default new a default codel queue. -func Default() *Queue { - return New(defaultConf) -} - -// New new codel queue. -func New(conf *Config) *Queue { - if conf == nil { - conf = defaultConf - } - q := &Queue{ - packets: make(chan packet, 2048), - conf: conf, - } - q.pool.New = func() interface{} { - return make(chan bool) - } - return q -} - -// Reload set queue config. -func (q *Queue) Reload(c *Config) { - if c == nil || c.Internal <= 0 || c.Target <= 0 || c.MaxOutstanding <= 0 { - return - } - // TODO codel queue size - q.mux.Lock() - q.conf = c - q.mux.Unlock() -} - -// Stat return the statistics of codel -func (q *Queue) Stat() Stat { - q.mux.Lock() - defer q.mux.Unlock() - return Stat{ - Dropping: q.dropping, - FaTime: q.faTime, - DropNext: q.dropNext, - Packets: len(q.packets), - } -} - -// Push req into CoDel request buffer queue. -// if return error is nil,the caller must call q.Done() after finish request handling -func (q *Queue) Push(ctx context.Context) (err error) { - q.mux.Lock() - if q.outstanding < q.conf.MaxOutstanding && len(q.packets) == 0 { - q.outstanding ++ - q.mux.Unlock() - return - } - q.mux.Unlock() - r := packet{ - ch: q.pool.Get().(chan bool), - ts: time.Now().UnixNano() / int64(time.Millisecond), - } - select { - case q.packets <- r: - default: - err = ecode.LimitExceed - q.pool.Put(r.ch) - } - if err == nil { - select { - case drop := <-r.ch: - if drop { - err = ecode.LimitExceed - } - q.pool.Put(r.ch) - case <-ctx.Done(): - err = ecode.Deadline - } - } - return -} - -// Pop req from CoDel request buffer queue. -func (q *Queue) Pop() { - q.mux.Lock() - q.outstanding -- - if q.outstanding < 0 { - q.outstanding = 0 - q.mux.Unlock() - return - } - defer q.mux.Unlock() - for { - select { - case p := <-q.packets: - drop := q.judge(p) - select { - case p.ch <- drop: - if !drop { - return - } - default: - q.pool.Put(p.ch) - } - default: - return - } - } -} - -func (q *Queue) controlLaw(now int64) int64 { - q.dropNext = now + int64(float64(q.conf.Internal)/math.Sqrt(float64(q.count))) - return q.dropNext -} - -// judge decide if the packet should drop or not. -func (q *Queue) judge(p packet) (drop bool) { - now := time.Now().UnixNano() / int64(time.Millisecond) - sojurn := now - p.ts - if sojurn < q.conf.Target { - q.faTime = 0 - } else if q.faTime == 0 { - q.faTime = now + q.conf.Internal - } else if now >= q.faTime { - drop = true - } - if q.dropping { - if !drop { - // sojourn time below target - leave dropping state - q.dropping = false - } else if now > q.dropNext { - q.count++ - q.dropNext = q.controlLaw(q.dropNext) - return - } - } else if drop && (now-q.dropNext < q.conf.Internal || now-q.faTime >= q.conf.Internal) { - q.dropping = true - // If we're in a drop cycle, the drop rate that controlled the queue - // on the last cycle is a good starting point to control it now. - if now-q.dropNext < q.conf.Internal { - if q.count > 2 { - q.count = q.count - 2 - } else { - q.count = 1 - } - } else { - q.count = 1 - } - q.dropNext = q.controlLaw(now) - return - } - q.outstanding ++ - drop = false - return -} diff --git a/pkg/container/queue/aqm/codel_test.go b/pkg/container/queue/aqm/codel_test.go deleted file mode 100644 index 7e2fdb819..000000000 --- a/pkg/container/queue/aqm/codel_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package aqm - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/ecode" -) - -var testConf = &Config{ - Target: 20, - Internal: 500, - MaxOutstanding: 20, -} - -var qps = time.Microsecond * 2000 - -func TestCoDel1200(t *testing.T) { - q := New(testConf) - drop := new(int64) - tm := new(int64) - accept := new(int64) - delay := time.Millisecond * 3000 - testPush(q, qps, delay, drop, tm, accept) - fmt.Printf("qps %v process time %v drop %d timeout %d accept %d \n", int64(time.Second/qps), delay, *drop, *tm, *accept) - time.Sleep(time.Second) -} - -func TestCoDel200(t *testing.T) { - q := New(testConf) - drop := new(int64) - tm := new(int64) - accept := new(int64) - delay := time.Millisecond * 2000 - testPush(q, qps, delay, drop, tm, accept) - fmt.Printf("qps %v process time %v drop %d timeout %d accept %d \n", int64(time.Second/qps), delay, *drop, *tm, *accept) - time.Sleep(time.Second) -} - -func TestCoDel100(t *testing.T) { - q := New(testConf) - drop := new(int64) - tm := new(int64) - accept := new(int64) - delay := time.Millisecond * 1000 - testPush(q, qps, delay, drop, tm, accept) - fmt.Printf("qps %v process time %v drop %d timeout %d accept %d \n", int64(time.Second/qps), delay, *drop, *tm, *accept) -} - -func TestCoDel50(t *testing.T) { - q := New(testConf) - drop := new(int64) - tm := new(int64) - accept := new(int64) - delay := time.Millisecond * 50 - testPush(q, qps, delay, drop, tm, accept) - fmt.Printf("qps %v process time %v drop %d timeout %d accept %d \n", int64(time.Second/qps), delay, *drop, *tm, *accept) -} - -func testPush(q *Queue, sleep time.Duration, delay time.Duration, drop *int64, tm *int64, accept *int64) { - var group sync.WaitGroup - for i := 0; i < 5000; i++ { - time.Sleep(sleep) - group.Add(1) - go func() { - defer group.Done() - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Millisecond*1000)) - defer cancel() - if err := q.Push(ctx); err != nil { - if err == ecode.LimitExceed { - atomic.AddInt64(drop, 1) - } else { - atomic.AddInt64(tm, 1) - } - } else { - atomic.AddInt64(accept, 1) - time.Sleep(delay) - q.Pop() - } - }() - } - group.Wait() -} - -func BenchmarkAQM(b *testing.B) { - q := Default() - b.RunParallel(func(p *testing.PB) { - for p.Next() { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Millisecond*5)) - err := q.Push(ctx) - if err == nil { - q.Pop() - } - cancel() - } - }) -} diff --git a/pkg/database/hbase/README.md b/pkg/database/hbase/README.md deleted file mode 100644 index 8e1280821..000000000 --- a/pkg/database/hbase/README.md +++ /dev/null @@ -1,40 +0,0 @@ -### database/hbase - -### 项目简介 - -Hbase Client,进行封装加入了链路追踪和统计。 - -### usage -```go -package main - -import ( - "context" - "fmt" - - "github.com/go-kratos/kratos/pkg/database/hbase" -) - -func main() { - config := &hbase.Config{Zookeeper: &hbase.ZKConfig{Addrs: []string{"localhost"}}} - client := hbase.NewClient(config) - - values := map[string]map[string][]byte{"name": {"firstname": []byte("hello"), "lastname": []byte("world")}} - ctx := context.Background() - - _, err := client.PutStr(ctx, "user", "user1", values) - if err != nil { - panic(err) - } - - result, err := client.GetStr(ctx, "user", "user1") - if err != nil { - panic(err) - } - fmt.Printf("%v", result) -} -``` - -##### 依赖包 - -1.[gohbase](https://github.com/tsuna/gohbase) diff --git a/pkg/database/hbase/config.go b/pkg/database/hbase/config.go deleted file mode 100644 index 8b24b0862..000000000 --- a/pkg/database/hbase/config.go +++ /dev/null @@ -1,23 +0,0 @@ -package hbase - -import ( - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// ZKConfig Server&Client settings. -type ZKConfig struct { - Root string - Addrs []string - Timeout xtime.Duration -} - -// Config hbase config -type Config struct { - Zookeeper *ZKConfig - RPCQueueSize int - FlushInterval xtime.Duration - EffectiveUser string - RegionLookupTimeout xtime.Duration - RegionReadTimeout xtime.Duration - TestRowKey string -} diff --git a/pkg/database/hbase/hbase.go b/pkg/database/hbase/hbase.go deleted file mode 100644 index 8534ad40d..000000000 --- a/pkg/database/hbase/hbase.go +++ /dev/null @@ -1,297 +0,0 @@ -package hbase - -import ( - "context" - "io" - "strings" - "time" - - "github.com/tsuna/gohbase" - "github.com/tsuna/gohbase/hrpc" - - "github.com/go-kratos/kratos/pkg/log" -) - -// HookFunc hook function call before every method and hook return function will call after finish. -type HookFunc func(ctx context.Context, call hrpc.Call, customName string) func(err error) - -// Client hbase client. -type Client struct { - hc gohbase.Client - addr string - config *Config - hooks []HookFunc -} - -// AddHook add hook function. -func (c *Client) AddHook(hookFn HookFunc) { - c.hooks = append(c.hooks, hookFn) -} - -func (c *Client) invokeHook(ctx context.Context, call hrpc.Call, customName string) func(error) { - finishHooks := make([]func(error), 0, len(c.hooks)) - for _, fn := range c.hooks { - finishHooks = append(finishHooks, fn(ctx, call, customName)) - } - return func(err error) { - for _, fn := range finishHooks { - fn(err) - } - } -} - -// NewClient new a hbase client. -func NewClient(config *Config, options ...gohbase.Option) *Client { - rawcli := NewRawClient(config, options...) - rawcli.AddHook(NewSlowLogHook(250 * time.Millisecond)) - rawcli.AddHook(MetricsHook(config)) - rawcli.AddHook(TraceHook("database/hbase", strings.Join(config.Zookeeper.Addrs, ","))) - return rawcli -} - -// NewRawClient new a hbase client without prometheus metrics and dapper trace hook. -func NewRawClient(config *Config, options ...gohbase.Option) *Client { - zkquorum := strings.Join(config.Zookeeper.Addrs, ",") - if config.Zookeeper.Root != "" { - options = append(options, gohbase.ZookeeperRoot(config.Zookeeper.Root)) - } - if config.Zookeeper.Timeout != 0 { - options = append(options, gohbase.ZookeeperTimeout(time.Duration(config.Zookeeper.Timeout))) - } - - if config.RPCQueueSize != 0 { - log.Warn("RPCQueueSize configuration be ignored") - } - // force RpcQueueSize = 1, don't change it !!! it has reason (゜-゜)つロ - options = append(options, gohbase.RpcQueueSize(1)) - - if config.FlushInterval != 0 { - options = append(options, gohbase.FlushInterval(time.Duration(config.FlushInterval))) - } - if config.EffectiveUser != "" { - options = append(options, gohbase.EffectiveUser(config.EffectiveUser)) - } - if config.RegionLookupTimeout != 0 { - options = append(options, gohbase.RegionLookupTimeout(time.Duration(config.RegionLookupTimeout))) - } - if config.RegionReadTimeout != 0 { - options = append(options, gohbase.RegionReadTimeout(time.Duration(config.RegionReadTimeout))) - } - hc := gohbase.NewClient(zkquorum, options...) - return &Client{ - hc: hc, - addr: zkquorum, - config: config, - } -} - -// ScanAll do scan command and return all result -// NOTE: if err != nil the results is safe for range operate even not result found -func (c *Client) ScanAll(ctx context.Context, table []byte, options ...func(hrpc.Call) error) (results []*hrpc.Result, err error) { - cursor, err := c.Scan(ctx, table, options...) - if err != nil { - return nil, err - } - for { - result, err := cursor.Next() - if err != nil { - if err == io.EOF { - break - } - return nil, err - } - results = append(results, result) - } - return results, nil -} - -type scanTrace struct { - hrpc.Scanner - finishHook func(error) -} - -func (s *scanTrace) Next() (*hrpc.Result, error) { - result, err := s.Scanner.Next() - if err != nil { - s.finishHook(err) - } - return result, err -} - -func (s *scanTrace) Close() error { - err := s.Scanner.Close() - s.finishHook(err) - return err -} - -// Scan do a scan command. -func (c *Client) Scan(ctx context.Context, table []byte, options ...func(hrpc.Call) error) (scanner hrpc.Scanner, err error) { - var scan *hrpc.Scan - scan, err = hrpc.NewScan(ctx, table, options...) - if err != nil { - return nil, err - } - st := &scanTrace{} - st.finishHook = c.invokeHook(ctx, scan, "Scan") - st.Scanner = c.hc.Scan(scan) - return st, nil -} - -// ScanStr scan string -func (c *Client) ScanStr(ctx context.Context, table string, options ...func(hrpc.Call) error) (hrpc.Scanner, error) { - return c.Scan(ctx, []byte(table), options...) -} - -// ScanStrAll scan string -// NOTE: if err != nil the results is safe for range operate even not result found -func (c *Client) ScanStrAll(ctx context.Context, table string, options ...func(hrpc.Call) error) ([]*hrpc.Result, error) { - return c.ScanAll(ctx, []byte(table), options...) -} - -// ScanRange get a scanner for the given table and key range. -// The range is half-open, i.e. [startRow; stopRow[ -- stopRow is not -// included in the range. -func (c *Client) ScanRange(ctx context.Context, table, startRow, stopRow []byte, options ...func(hrpc.Call) error) (scanner hrpc.Scanner, err error) { - var scan *hrpc.Scan - scan, err = hrpc.NewScanRange(ctx, table, startRow, stopRow, options...) - if err != nil { - return nil, err - } - st := &scanTrace{} - st.finishHook = c.invokeHook(ctx, scan, "ScanRange") - st.Scanner = c.hc.Scan(scan) - return st, nil -} - -// ScanRangeStr get a scanner for the given table and key range. -// The range is half-open, i.e. [startRow; stopRow[ -- stopRow is not -// included in the range. -func (c *Client) ScanRangeStr(ctx context.Context, table, startRow, stopRow string, options ...func(hrpc.Call) error) (hrpc.Scanner, error) { - return c.ScanRange(ctx, []byte(table), []byte(startRow), []byte(stopRow), options...) -} - -// Get get result for the given table and row key. -// NOTE: if err != nil then result != nil, if result not exists result.Cells length is 0 -func (c *Client) Get(ctx context.Context, table, key []byte, options ...func(hrpc.Call) error) (result *hrpc.Result, err error) { - var get *hrpc.Get - get, err = hrpc.NewGet(ctx, table, key, options...) - if err != nil { - return nil, err - } - - finishHook := c.invokeHook(ctx, get, "GET") - result, err = c.hc.Get(get) - finishHook(err) - return -} - -// GetStr do a get command. -// NOTE: if err != nil then result != nil, if result not exists result.Cells length is 0 -func (c *Client) GetStr(ctx context.Context, table, key string, options ...func(hrpc.Call) error) (result *hrpc.Result, err error) { - return c.Get(ctx, []byte(table), []byte(key), options...) -} - -// PutStr insert the given family-column-values in the given row key of the given table. -func (c *Client) PutStr(ctx context.Context, table string, key string, values map[string]map[string][]byte, options ...func(hrpc.Call) error) (*hrpc.Result, error) { - put, err := hrpc.NewPutStr(ctx, table, key, values, options...) - if err != nil { - return nil, err - } - - finishHook := c.invokeHook(ctx, put, "PUT") - result, err := c.hc.Put(put) - finishHook(err) - return result, err -} - -// Delete is used to perform Delete operations on a single row. -// To delete entire row, values should be nil. -// -// To delete specific families, qualifiers map should be nil: -// map[string]map[string][]byte{ -// "cf1": nil, -// "cf2": nil, -// } -// -// To delete specific qualifiers: -// map[string]map[string][]byte{ -// "cf": map[string][]byte{ -// "q1": nil, -// "q2": nil, -// }, -// } -// -// To delete all versions before and at a timestamp, pass hrpc.Timestamp() option. -// By default all versions will be removed. -// -// To delete only a specific version at a timestamp, pass hrpc.DeleteOneVersion() option -// along with a timestamp. For delete specific qualifiers request, if timestamp is not -// passed, only the latest version will be removed. For delete specific families request, -// the timestamp should be passed or it will have no effect as it's an expensive -// operation to perform. -func (c *Client) Delete(ctx context.Context, table string, key string, values map[string]map[string][]byte, options ...func(hrpc.Call) error) (*hrpc.Result, error) { - del, err := hrpc.NewDelStr(ctx, table, key, values, options...) - if err != nil { - return nil, err - } - - finishHook := c.invokeHook(ctx, del, "Delete") - result, err := c.hc.Delete(del) - finishHook(err) - return result, err -} - -// Append do a append command. -func (c *Client) Append(ctx context.Context, table string, key string, values map[string]map[string][]byte, options ...func(hrpc.Call) error) (*hrpc.Result, error) { - appd, err := hrpc.NewAppStr(ctx, table, key, values, options...) - if err != nil { - return nil, err - } - - finishHook := c.invokeHook(ctx, appd, "Append") - result, err := c.hc.Append(appd) - finishHook(err) - return result, err -} - -// Increment the given values in HBase under the given table and key. -func (c *Client) Increment(ctx context.Context, table string, key string, values map[string]map[string][]byte, options ...func(hrpc.Call) error) (int64, error) { - increment, err := hrpc.NewIncStr(ctx, table, key, values, options...) - if err != nil { - return 0, err - } - finishHook := c.invokeHook(ctx, increment, "Increment") - result, err := c.hc.Increment(increment) - finishHook(err) - return result, err -} - -// IncrementSingle increment the given value by amount in HBase under the given table, key, family and qualifier. -func (c *Client) IncrementSingle(ctx context.Context, table string, key string, family string, qualifier string, amount int64, options ...func(hrpc.Call) error) (int64, error) { - increment, err := hrpc.NewIncStrSingle(ctx, table, key, family, qualifier, amount, options...) - if err != nil { - return 0, err - } - - finishHook := c.invokeHook(ctx, increment, "IncrementSingle") - result, err := c.hc.Increment(increment) - finishHook(err) - return result, err -} - -// Ping ping. -func (c *Client) Ping(ctx context.Context) (err error) { - testRowKey := "test" - if c.config.TestRowKey != "" { - testRowKey = c.config.TestRowKey - } - values := map[string]map[string][]byte{"test": {"test": []byte("test")}} - _, err = c.PutStr(ctx, "test", testRowKey, values) - return -} - -// Close close client. -func (c *Client) Close() error { - c.hc.Close() - return nil -} diff --git a/pkg/database/hbase/metrics.go b/pkg/database/hbase/metrics.go deleted file mode 100644 index ad09331a3..000000000 --- a/pkg/database/hbase/metrics.go +++ /dev/null @@ -1,65 +0,0 @@ -package hbase - -import ( - "context" - "io" - "strings" - "time" - - "github.com/tsuna/gohbase" - "github.com/tsuna/gohbase/hrpc" - - "github.com/go-kratos/kratos/pkg/stat/metric" -) - -const namespace = "hbase_client" - -var ( - _metricReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "hbase client requests duration(ms).", - Labels: []string{"name", "addr", "command"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000, 2500}, - }) - _metricReqErr = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "error_total", - Help: "mysql client requests error count.", - Labels: []string{"name", "addr", "command", "error"}, - }) -) - -func codeFromErr(err error) string { - code := "unknown_error" - switch err { - case gohbase.ErrClientClosed: - code = "client_closed" - case gohbase.ErrCannotFindRegion: - code = "connot_find_region" - case gohbase.TableNotFound: - code = "table_not_found" - //case gohbase.ErrRegionUnavailable: - // code = "region_unavailable" - } - return code -} - -// MetricsHook if stats is nil use stat.DB as default. -func MetricsHook(config *Config) HookFunc { - return func(ctx context.Context, call hrpc.Call, customName string) func(err error) { - now := time.Now() - if customName == "" { - customName = call.Name() - } - return func(err error) { - durationMs := int64(time.Since(now) / time.Millisecond) - _metricReqDur.Observe(durationMs, strings.Join(config.Zookeeper.Addrs, ","), "", customName) - if err != nil && err != io.EOF { - _metricReqErr.Inc(strings.Join(config.Zookeeper.Addrs, ","), "", customName, codeFromErr(err)) - } - } - } -} diff --git a/pkg/database/hbase/slowlog.go b/pkg/database/hbase/slowlog.go deleted file mode 100644 index 8edd28c4e..000000000 --- a/pkg/database/hbase/slowlog.go +++ /dev/null @@ -1,24 +0,0 @@ -package hbase - -import ( - "context" - "time" - - "github.com/tsuna/gohbase/hrpc" - - "github.com/go-kratos/kratos/pkg/log" -) - -// NewSlowLogHook log slow operation. -func NewSlowLogHook(threshold time.Duration) HookFunc { - return func(ctx context.Context, call hrpc.Call, customName string) func(err error) { - start := time.Now() - return func(error) { - duration := time.Since(start) - if duration < threshold { - return - } - log.Warn("hbase slow log: %s %s %s time: %s", customName, call.Table(), call.Key(), duration) - } - } -} diff --git a/pkg/database/hbase/trace.go b/pkg/database/hbase/trace.go deleted file mode 100644 index e836ce19f..000000000 --- a/pkg/database/hbase/trace.go +++ /dev/null @@ -1,40 +0,0 @@ -package hbase - -import ( - "context" - "io" - - "github.com/tsuna/gohbase/hrpc" - - "github.com/go-kratos/kratos/pkg/net/trace" -) - -// TraceHook create new hbase trace hook. -func TraceHook(component, instance string) HookFunc { - var internalTags []trace.Tag - internalTags = append(internalTags, trace.TagString(trace.TagComponent, component)) - internalTags = append(internalTags, trace.TagString(trace.TagDBInstance, instance)) - internalTags = append(internalTags, trace.TagString(trace.TagPeerService, "hbase")) - internalTags = append(internalTags, trace.TagString(trace.TagSpanKind, "client")) - return func(ctx context.Context, call hrpc.Call, customName string) func(err error) { - noop := func(error) {} - root, ok := trace.FromContext(ctx) - if !ok { - return noop - } - if customName == "" { - customName = call.Name() - } - span := root.Fork("", "Hbase:"+customName) - span.SetTag(internalTags...) - statement := string(call.Table()) + " " + string(call.Key()) - span.SetTag(trace.TagString(trace.TagDBStatement, statement)) - return func(err error) { - if err == io.EOF { - // reset error for trace. - err = nil - } - span.Finish(&err) - } - } -} diff --git a/pkg/database/sql/README.md b/pkg/database/sql/README.md deleted file mode 100644 index d1c36c4dc..000000000 --- a/pkg/database/sql/README.md +++ /dev/null @@ -1,9 +0,0 @@ -#### database/sql - -##### 项目简介 -MySQL数据库驱动,进行封装加入了链路追踪和统计。 - -如果需要SQL级别的超时管理 可以在业务代码里面使用context.WithDeadline实现 推荐超时配置放到application.toml里面 方便热加载 - -##### 依赖包 -1. [Go-MySQL-Driver](https://github.com/go-sql-driver/mysql) diff --git a/pkg/database/sql/metrics.go b/pkg/database/sql/metrics.go deleted file mode 100644 index a4e187d54..000000000 --- a/pkg/database/sql/metrics.go +++ /dev/null @@ -1,37 +0,0 @@ -package sql - -import "github.com/go-kratos/kratos/pkg/stat/metric" - -const namespace = "mysql_client" - -var ( - _metricReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "mysql client requests duration(ms).", - Labels: []string{"name", "addr", "command"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000, 2500}, - }) - _metricReqErr = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "error_total", - Help: "mysql client requests error count.", - Labels: []string{"name", "addr", "command", "error"}, - }) - _metricConnTotal = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "connections", - Name: "total", - Help: "mysql client connections total count.", - Labels: []string{"name", "addr", "state"}, - }) - _metricConnCurrent = metric.NewGaugeVec(&metric.GaugeVecOpts{ - Namespace: namespace, - Subsystem: "connections", - Name: "current", - Help: "mysql client connections current.", - Labels: []string{"name", "addr", "state"}, - }) -) diff --git a/pkg/database/sql/mysql.go b/pkg/database/sql/mysql.go deleted file mode 100644 index 36baf21bf..000000000 --- a/pkg/database/sql/mysql.go +++ /dev/null @@ -1,36 +0,0 @@ -package sql - -import ( - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/time" - - // database driver - _ "github.com/go-sql-driver/mysql" -) - -// Config mysql config. -type Config struct { - DSN string // write data source name. - ReadDSN []string // read data source name. - Active int // pool - Idle int // pool - IdleTimeout time.Duration // connect max life time. - QueryTimeout time.Duration // query sql timeout - ExecTimeout time.Duration // execute sql timeout - TranTimeout time.Duration // transaction sql timeout - Breaker *breaker.Config // breaker -} - -// NewMySQL new db and retry connection when has error. -func NewMySQL(c *Config) (db *DB) { - if c.QueryTimeout == 0 || c.ExecTimeout == 0 || c.TranTimeout == 0 { - panic("mysql must be set query/execute/transction timeout") - } - db, err := Open(c) - if err != nil { - log.Error("open mysql error(%v)", err) - panic(err) - } - return -} diff --git a/pkg/database/sql/sql.go b/pkg/database/sql/sql.go deleted file mode 100644 index 4dbb2f683..000000000 --- a/pkg/database/sql/sql.go +++ /dev/null @@ -1,676 +0,0 @@ -package sql - -import ( - "context" - "database/sql" - "fmt" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/net/trace" - - "github.com/go-sql-driver/mysql" - "github.com/pkg/errors" -) - -const ( - _family = "sql_client" - _slowLogDuration = time.Millisecond * 250 -) - -var ( - // ErrStmtNil prepared stmt error - ErrStmtNil = errors.New("sql: prepare failed and stmt nil") - // ErrNoMaster is returned by Master when call master multiple times. - ErrNoMaster = errors.New("sql: no master instance") - // ErrNoRows is returned by Scan when QueryRow doesn't return a row. - // In such a case, QueryRow returns a placeholder *Row value that defers - // this error until a Scan. - ErrNoRows = sql.ErrNoRows - // ErrTxDone transaction done. - ErrTxDone = sql.ErrTxDone -) - -// DB database. -type DB struct { - write *conn - read []*conn - idx int64 - master *DB -} - -// conn database connection -type conn struct { - *sql.DB - breaker breaker.Breaker - conf *Config - addr string -} - -// Tx transaction. -type Tx struct { - db *conn - tx *sql.Tx - t trace.Trace - c context.Context - cancel func() -} - -// Row row. -type Row struct { - err error - *sql.Row - db *conn - query string - args []interface{} - t trace.Trace - cancel func() -} - -// Scan copies the columns from the matched row into the values pointed at by dest. -func (r *Row) Scan(dest ...interface{}) (err error) { - defer slowLog(fmt.Sprintf("Scan query(%s) args(%+v)", r.query, r.args), time.Now()) - if r.t != nil { - defer r.t.Finish(&err) - } - if r.err != nil { - err = r.err - } else if r.Row == nil { - err = ErrStmtNil - } - if err != nil { - return - } - err = r.Row.Scan(dest...) - if r.cancel != nil { - r.cancel() - } - r.db.onBreaker(&err) - if err != ErrNoRows { - err = errors.Wrapf(err, "query %s args %+v", r.query, r.args) - } - return -} - -// Rows rows. -type Rows struct { - *sql.Rows - cancel func() -} - -// Close closes the Rows, preventing further enumeration. If Next is called -// and returns false and there are no further result sets, -// the Rows are closed automatically and it will suffice to check the -// result of Err. Close is idempotent and does not affect the result of Err. -func (rs *Rows) Close() (err error) { - err = errors.WithStack(rs.Rows.Close()) - if rs.cancel != nil { - rs.cancel() - } - return -} - -// Stmt prepared stmt. -type Stmt struct { - db *conn - tx bool - query string - stmt atomic.Value - t trace.Trace -} - -// Open opens a database specified by its database driver name and a -// driver-specific data source name, usually consisting of at least a database -// name and connection information. -func Open(c *Config) (*DB, error) { - db := new(DB) - d, err := connect(c, c.DSN) - if err != nil { - return nil, err - } - addr := parseDSNAddr(c.DSN) - brkGroup := breaker.NewGroup(c.Breaker) - brk := brkGroup.Get(addr) - w := &conn{DB: d, breaker: brk, conf: c, addr: addr} - rs := make([]*conn, 0, len(c.ReadDSN)) - for _, rd := range c.ReadDSN { - d, err := connect(c, rd) - if err != nil { - return nil, err - } - addr = parseDSNAddr(rd) - brk := brkGroup.Get(addr) - r := &conn{DB: d, breaker: brk, conf: c, addr: addr} - rs = append(rs, r) - } - db.write = w - db.read = rs - db.master = &DB{write: db.write} - return db, nil -} - -func connect(c *Config, dataSourceName string) (*sql.DB, error) { - d, err := sql.Open("mysql", dataSourceName) - if err != nil { - err = errors.WithStack(err) - return nil, err - } - d.SetMaxOpenConns(c.Active) - d.SetMaxIdleConns(c.Idle) - d.SetConnMaxLifetime(time.Duration(c.IdleTimeout)) - return d, nil -} - -// Begin starts a transaction. The isolation level is dependent on the driver. -func (db *DB) Begin(c context.Context) (tx *Tx, err error) { - return db.write.begin(c) -} - -// Exec executes a query without returning any rows. -// The args are for any placeholder parameters in the query. -func (db *DB) Exec(c context.Context, query string, args ...interface{}) (res sql.Result, err error) { - return db.write.exec(c, query, args...) -} - -// Prepare creates a prepared statement for later queries or executions. -// Multiple queries or executions may be run concurrently from the returned -// statement. The caller must call the statement's Close method when the -// statement is no longer needed. -func (db *DB) Prepare(query string) (*Stmt, error) { - return db.write.prepare(query) -} - -// Prepared creates a prepared statement for later queries or executions. -// Multiple queries or executions may be run concurrently from the returned -// statement. The caller must call the statement's Close method when the -// statement is no longer needed. -func (db *DB) Prepared(query string) (stmt *Stmt) { - return db.write.prepared(query) -} - -// Query executes a query that returns rows, typically a SELECT. The args are -// for any placeholder parameters in the query. -func (db *DB) Query(c context.Context, query string, args ...interface{}) (rows *Rows, err error) { - idx := db.readIndex() - for i := range db.read { - if rows, err = db.read[(idx+i)%len(db.read)].query(c, query, args...); !ecode.EqualError(ecode.ServiceUnavailable, err) { - return - } - } - return db.write.query(c, query, args...) -} - -// QueryRow executes a query that is expected to return at most one row. -// QueryRow always returns a non-nil value. Errors are deferred until Row's -// Scan method is called. -func (db *DB) QueryRow(c context.Context, query string, args ...interface{}) *Row { - idx := db.readIndex() - for i := range db.read { - if row := db.read[(idx+i)%len(db.read)].queryRow(c, query, args...); !ecode.EqualError(ecode.ServiceUnavailable, row.err) { - return row - } - } - return db.write.queryRow(c, query, args...) -} - -func (db *DB) readIndex() int { - if len(db.read) == 0 { - return 0 - } - v := atomic.AddInt64(&db.idx, 1) - return int(v) % len(db.read) -} - -// Close closes the write and read database, releasing any open resources. -func (db *DB) Close() (err error) { - if e := db.write.Close(); e != nil { - err = errors.WithStack(e) - } - for _, rd := range db.read { - if e := rd.Close(); e != nil { - err = errors.WithStack(e) - } - } - return -} - -// Ping verifies a connection to the database is still alive, establishing a -// connection if necessary. -func (db *DB) Ping(c context.Context) (err error) { - if err = db.write.ping(c); err != nil { - return - } - for _, rd := range db.read { - if err = rd.ping(c); err != nil { - return - } - } - return -} - -// Master return *DB instance direct use master conn -// use this *DB instance only when you have some reason need to get result without any delay. -func (db *DB) Master() *DB { - if db.master == nil { - panic(ErrNoMaster) - } - return db.master -} - -func (db *conn) onBreaker(err *error) { - if err != nil && *err != nil && *err != sql.ErrNoRows && *err != sql.ErrTxDone { - db.breaker.MarkFailed() - } else { - db.breaker.MarkSuccess() - } -} - -func (db *conn) begin(c context.Context) (tx *Tx, err error) { - now := time.Now() - defer slowLog("Begin", now) - t, ok := trace.FromContext(c) - if ok { - t = t.Fork(_family, "begin") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, "")) - defer func() { - if err != nil { - t.Finish(&err) - } - }() - } - if err = db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "begin", "breaker") - return - } - _, c, cancel := db.conf.TranTimeout.Shrink(c) - rtx, err := db.BeginTx(c, nil) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "begin") - if err != nil { - err = errors.WithStack(err) - cancel() - return - } - tx = &Tx{tx: rtx, t: t, db: db, c: c, cancel: cancel} - return -} - -func (db *conn) exec(c context.Context, query string, args ...interface{}) (res sql.Result, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Exec query(%s) args(%+v)", query, args), now) - if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "exec") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, query)) - defer t.Finish(&err) - } - if err = db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "exec", "breaker") - return - } - _, c, cancel := db.conf.ExecTimeout.Shrink(c) - res, err = db.ExecContext(c, query, args...) - cancel() - db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "exec") - if err != nil { - err = errors.Wrapf(err, "exec:%s, args:%+v", query, args) - } - return -} - -func (db *conn) ping(c context.Context) (err error) { - now := time.Now() - defer slowLog("Ping", now) - if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "ping") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, "")) - defer t.Finish(&err) - } - if err = db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "ping", "breaker") - return - } - _, c, cancel := db.conf.ExecTimeout.Shrink(c) - err = db.PingContext(c) - cancel() - db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "ping") - if err != nil { - err = errors.WithStack(err) - } - return -} - -func (db *conn) prepare(query string) (*Stmt, error) { - defer slowLog(fmt.Sprintf("Prepare query(%s)", query), time.Now()) - stmt, err := db.Prepare(query) - if err != nil { - err = errors.Wrapf(err, "prepare %s", query) - return nil, err - } - st := &Stmt{query: query, db: db} - st.stmt.Store(stmt) - return st, nil -} - -func (db *conn) prepared(query string) (stmt *Stmt) { - defer slowLog(fmt.Sprintf("Prepared query(%s)", query), time.Now()) - stmt = &Stmt{query: query, db: db} - s, err := db.Prepare(query) - if err == nil { - stmt.stmt.Store(s) - return - } - go func() { - for { - s, err := db.Prepare(query) - if err != nil { - time.Sleep(time.Second) - continue - } - stmt.stmt.Store(s) - return - } - }() - return -} - -func (db *conn) query(c context.Context, query string, args ...interface{}) (rows *Rows, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Query query(%s) args(%+v)", query, args), now) - if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "query") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, query)) - defer t.Finish(&err) - } - if err = db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "query", "breaker") - return - } - _, c, cancel := db.conf.QueryTimeout.Shrink(c) - rs, err := db.DB.QueryContext(c, query, args...) - db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "query") - if err != nil { - err = errors.Wrapf(err, "query:%s, args:%+v", query, args) - cancel() - return - } - rows = &Rows{Rows: rs, cancel: cancel} - return -} - -func (db *conn) queryRow(c context.Context, query string, args ...interface{}) *Row { - now := time.Now() - defer slowLog(fmt.Sprintf("QueryRow query(%s) args(%+v)", query, args), now) - t, ok := trace.FromContext(c) - if ok { - t = t.Fork(_family, "queryrow") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, query)) - } - if err := db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "queryRow", "breaker") - return &Row{db: db, t: t, err: err} - } - _, c, cancel := db.conf.QueryTimeout.Shrink(c) - r := db.DB.QueryRowContext(c, query, args...) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "queryrow") - return &Row{db: db, Row: r, query: query, args: args, t: t, cancel: cancel} -} - -// Close closes the statement. -func (s *Stmt) Close() (err error) { - if s == nil { - err = ErrStmtNil - return - } - stmt, ok := s.stmt.Load().(*sql.Stmt) - if ok { - err = errors.WithStack(stmt.Close()) - } - return -} - -// Exec executes a prepared statement with the given arguments and returns a -// Result summarizing the effect of the statement. -func (s *Stmt) Exec(c context.Context, args ...interface{}) (res sql.Result, err error) { - if s == nil { - err = ErrStmtNil - return - } - now := time.Now() - defer slowLog(fmt.Sprintf("Exec query(%s) args(%+v)", s.query, args), now) - if s.tx { - if s.t != nil { - s.t.SetTag(trace.String(trace.TagAnnotation, s.query)) - } - } else if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "exec") - t.SetTag(trace.String(trace.TagAddress, s.db.addr), trace.String(trace.TagComment, s.query)) - defer t.Finish(&err) - } - if err = s.db.breaker.Allow(); err != nil { - _metricReqErr.Inc(s.db.addr, s.db.addr, "stmt:exec", "breaker") - return - } - stmt, ok := s.stmt.Load().(*sql.Stmt) - if !ok { - err = ErrStmtNil - return - } - _, c, cancel := s.db.conf.ExecTimeout.Shrink(c) - res, err = stmt.ExecContext(c, args...) - cancel() - s.db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), s.db.addr, s.db.addr, "stmt:exec") - if err != nil { - err = errors.Wrapf(err, "exec:%s, args:%+v", s.query, args) - } - return -} - -// Query executes a prepared query statement with the given arguments and -// returns the query results as a *Rows. -func (s *Stmt) Query(c context.Context, args ...interface{}) (rows *Rows, err error) { - if s == nil { - err = ErrStmtNil - return - } - now := time.Now() - defer slowLog(fmt.Sprintf("Query query(%s) args(%+v)", s.query, args), now) - if s.tx { - if s.t != nil { - s.t.SetTag(trace.String(trace.TagAnnotation, s.query)) - } - } else if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "query") - t.SetTag(trace.String(trace.TagAddress, s.db.addr), trace.String(trace.TagComment, s.query)) - defer t.Finish(&err) - } - if err = s.db.breaker.Allow(); err != nil { - _metricReqErr.Inc(s.db.addr, s.db.addr, "stmt:query", "breaker") - return - } - stmt, ok := s.stmt.Load().(*sql.Stmt) - if !ok { - err = ErrStmtNil - return - } - _, c, cancel := s.db.conf.QueryTimeout.Shrink(c) - rs, err := stmt.QueryContext(c, args...) - s.db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), s.db.addr, s.db.addr, "stmt:query") - if err != nil { - err = errors.Wrapf(err, "query:%s, args:%+v", s.query, args) - cancel() - return - } - rows = &Rows{Rows: rs, cancel: cancel} - return -} - -// QueryRow executes a prepared query statement with the given arguments. -// If an error occurs during the execution of the statement, that error will -// be returned by a call to Scan on the returned *Row, which is always non-nil. -// If the query selects no rows, the *Row's Scan will return ErrNoRows. -// Otherwise, the *Row's Scan scans the first selected row and discards the rest. -func (s *Stmt) QueryRow(c context.Context, args ...interface{}) (row *Row) { - now := time.Now() - defer slowLog(fmt.Sprintf("QueryRow query(%s) args(%+v)", s.query, args), now) - row = &Row{db: s.db, query: s.query, args: args} - if s == nil { - row.err = ErrStmtNil - return - } - if s.tx { - if s.t != nil { - s.t.SetTag(trace.String(trace.TagAnnotation, s.query)) - } - } else if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "queryrow") - t.SetTag(trace.String(trace.TagAddress, s.db.addr), trace.String(trace.TagComment, s.query)) - row.t = t - } - if row.err = s.db.breaker.Allow(); row.err != nil { - _metricReqErr.Inc(s.db.addr, s.db.addr, "stmt:queryrow", "breaker") - return - } - stmt, ok := s.stmt.Load().(*sql.Stmt) - if !ok { - return - } - _, c, cancel := s.db.conf.QueryTimeout.Shrink(c) - row.Row = stmt.QueryRowContext(c, args...) - row.cancel = cancel - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), s.db.addr, s.db.addr, "stmt:queryrow") - return -} - -// Commit commits the transaction. -func (tx *Tx) Commit() (err error) { - err = tx.tx.Commit() - tx.cancel() - tx.db.onBreaker(&err) - if tx.t != nil { - tx.t.Finish(&err) - } - if err != nil { - err = errors.WithStack(err) - } - return -} - -// Rollback aborts the transaction. -func (tx *Tx) Rollback() (err error) { - err = tx.tx.Rollback() - tx.cancel() - tx.db.onBreaker(&err) - if tx.t != nil { - tx.t.Finish(&err) - } - if err != nil { - err = errors.WithStack(err) - } - return -} - -// Exec executes a query that doesn't return rows. For example: an INSERT and -// UPDATE. -func (tx *Tx) Exec(query string, args ...interface{}) (res sql.Result, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Exec query(%s) args(%+v)", query, args), now) - if tx.t != nil { - tx.t.SetTag(trace.String(trace.TagAnnotation, fmt.Sprintf("exec %s", query))) - } - res, err = tx.tx.ExecContext(tx.c, query, args...) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), tx.db.addr, tx.db.addr, "tx:exec") - if err != nil { - err = errors.Wrapf(err, "exec:%s, args:%+v", query, args) - } - return -} - -// Query executes a query that returns rows, typically a SELECT. -func (tx *Tx) Query(query string, args ...interface{}) (rows *Rows, err error) { - if tx.t != nil { - tx.t.SetTag(trace.String(trace.TagAnnotation, fmt.Sprintf("query %s", query))) - } - now := time.Now() - defer slowLog(fmt.Sprintf("Query query(%s) args(%+v)", query, args), now) - defer func() { - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), tx.db.addr, tx.db.addr, "tx:query") - }() - rs, err := tx.tx.QueryContext(tx.c, query, args...) - if err == nil { - rows = &Rows{Rows: rs} - } else { - err = errors.Wrapf(err, "query:%s, args:%+v", query, args) - } - return -} - -// QueryRow executes a query that is expected to return at most one row. -// QueryRow always returns a non-nil value. Errors are deferred until Row's -// Scan method is called. -func (tx *Tx) QueryRow(query string, args ...interface{}) *Row { - if tx.t != nil { - tx.t.SetTag(trace.String(trace.TagAnnotation, fmt.Sprintf("queryrow %s", query))) - } - now := time.Now() - defer slowLog(fmt.Sprintf("QueryRow query(%s) args(%+v)", query, args), now) - defer func() { - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), tx.db.addr, tx.db.addr, "tx:queryrow") - }() - r := tx.tx.QueryRowContext(tx.c, query, args...) - return &Row{Row: r, db: tx.db, query: query, args: args} -} - -// Stmt returns a transaction-specific prepared statement from an existing statement. -func (tx *Tx) Stmt(stmt *Stmt) *Stmt { - as, ok := stmt.stmt.Load().(*sql.Stmt) - if !ok { - return nil - } - ts := tx.tx.StmtContext(tx.c, as) - st := &Stmt{query: stmt.query, tx: true, t: tx.t, db: tx.db} - st.stmt.Store(ts) - return st -} - -// Prepare creates a prepared statement for use within a transaction. -// The returned statement operates within the transaction and can no longer be -// used once the transaction has been committed or rolled back. -// To use an existing prepared statement on this transaction, see Tx.Stmt. -func (tx *Tx) Prepare(query string) (*Stmt, error) { - if tx.t != nil { - tx.t.SetTag(trace.String(trace.TagAnnotation, fmt.Sprintf("prepare %s", query))) - } - defer slowLog(fmt.Sprintf("Prepare query(%s)", query), time.Now()) - stmt, err := tx.tx.Prepare(query) - if err != nil { - err = errors.Wrapf(err, "prepare %s", query) - return nil, err - } - st := &Stmt{query: query, tx: true, t: tx.t, db: tx.db} - st.stmt.Store(stmt) - return st, nil -} - -// parseDSNAddr parse dsn name and return addr. -func parseDSNAddr(dsn string) (addr string) { - cfg, err := mysql.ParseDSN(dsn) - if err != nil { - // just ignore parseDSN error, mysql client will return error for us when connect. - return "" - } - return cfg.Addr -} - -func slowLog(statement string, now time.Time) { - du := time.Since(now) - if du > _slowLogDuration { - log.Warn("%s slow log statement: %s time: %v", _family, statement, du) - } -} diff --git a/pkg/database/sql/sql_test.go b/pkg/database/sql/sql_test.go deleted file mode 100644 index 191ab7937..000000000 --- a/pkg/database/sql/sql_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package sql - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestParseAddrDSN(t *testing.T) { - t.Run("test parse addr dsn", func(t *testing.T) { - addr := parseDSNAddr("test:test@tcp(172.16.0.148:3306)/test?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8") - assert.Equal(t, "172.16.0.148:3306", addr) - }) - t.Run("test password has @", func(t *testing.T) { - addr := parseDSNAddr("root:root@dev@tcp(1.2.3.4:3306)/abc?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8") - assert.Equal(t, "1.2.3.4:3306", addr) - }) -} diff --git a/pkg/database/tidb/README.md b/pkg/database/tidb/README.md deleted file mode 100644 index 91b525782..000000000 --- a/pkg/database/tidb/README.md +++ /dev/null @@ -1,14 +0,0 @@ -#### database/tidb - -##### 项目简介 -TiDB数据库驱动 对mysql驱动进行封装 - -##### 功能 -1. 支持discovery服务发现 多节点直连 -2. 支持通过lvs单一地址连接 -3. 支持prepare绑定多个节点 -4. 支持动态增减节点负载均衡 -5. 日志区分运行节点 - -##### 依赖包 -1.[Go-MySQL-Driver](https://github.com/go-sql-driver/mysql) diff --git a/pkg/database/tidb/discovery.go b/pkg/database/tidb/discovery.go deleted file mode 100644 index fb728c2aa..000000000 --- a/pkg/database/tidb/discovery.go +++ /dev/null @@ -1,56 +0,0 @@ -package tidb - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/naming" - "github.com/go-kratos/kratos/pkg/naming/discovery" -) - -var _schema = "tidb://" - -func (db *DB) nodeList() (nodes []string) { - var ( - insZone *naming.InstancesInfo - ins []*naming.Instance - ok bool - ) - if insZone, ok = db.dis.Fetch(context.Background()); !ok { - return - } - if ins, ok = insZone.Instances[env.Zone]; !ok || len(ins) == 0 { - return - } - for _, in := range ins { - for _, addr := range in.Addrs { - if strings.HasPrefix(addr, _schema) { - addr = strings.Replace(addr, _schema, "", -1) - nodes = append(nodes, addr) - } - } - } - log.Info("tidb get %s instances(%v)", db.appid, nodes) - return -} - -func (db *DB) disc() (nodes []string) { - db.dis = discovery.Build(db.appid) - e := db.dis.Watch() - select { - case <-e: - nodes = db.nodeList() - case <-time.After(10 * time.Second): - panic("tidb init discovery err") - } - if len(nodes) == 0 { - panic(fmt.Sprintf("tidb %s no instance", db.appid)) - } - go db.nodeproc(e) - log.Info("init tidb discvoery info successfully") - return -} diff --git a/pkg/database/tidb/metrics.go b/pkg/database/tidb/metrics.go deleted file mode 100644 index 96687ed16..000000000 --- a/pkg/database/tidb/metrics.go +++ /dev/null @@ -1,37 +0,0 @@ -package tidb - -import "github.com/go-kratos/kratos/pkg/stat/metric" - -const namespace = "tidb_client" - -var ( - _metricReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "tidb client requests duration(ms).", - Labels: []string{"name", "addr", "command"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000, 2500}, - }) - _metricReqErr = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "requests", - Name: "error_total", - Help: "tidb client requests error count.", - Labels: []string{"name", "addr", "command", "error"}, - }) - _metricConnTotal = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: namespace, - Subsystem: "connections", - Name: "total", - Help: "tidb client connections total count.", - Labels: []string{"name", "addr", "state"}, - }) - _metricConnCurrent = metric.NewGaugeVec(&metric.GaugeVecOpts{ - Namespace: namespace, - Subsystem: "connections", - Name: "current", - Help: "tidb client connections current.", - Labels: []string{"name", "addr", "state"}, - }) -) diff --git a/pkg/database/tidb/node_proc.go b/pkg/database/tidb/node_proc.go deleted file mode 100644 index 0b72acdc5..000000000 --- a/pkg/database/tidb/node_proc.go +++ /dev/null @@ -1,82 +0,0 @@ -package tidb - -import ( - "time" - - "github.com/go-kratos/kratos/pkg/log" -) - -func (db *DB) nodeproc(e <-chan struct{}) { - if db.dis == nil { - return - } - for { - <-e - nodes := db.nodeList() - if len(nodes) == 0 { - continue - } - cm := make(map[string]*conn) - var conns []*conn - for _, conn := range db.conns { - cm[conn.addr] = conn - } - for _, node := range nodes { - if cm[node] != nil { - conns = append(conns, cm[node]) - continue - } - c, err := db.connectDSN(genDSN(db.conf.DSN, node)) - if err == nil { - conns = append(conns, c) - } else { - log.Error("tidb: connect addr: %s err: %+v", node, err) - } - } - if len(conns) == 0 { - log.Error("tidb: no nodes ignore event") - continue - } - oldConns := db.conns - db.mutex.Lock() - db.conns = conns - db.mutex.Unlock() - log.Info("tidb: new nodes: %v", nodes) - var removedConn []*conn - for _, conn := range oldConns { - var exist bool - for _, c := range conns { - if c.addr == conn.addr { - exist = true - break - } - } - if !exist { - removedConn = append(removedConn, conn) - } - } - go db.closeConns(removedConn) - } -} - -func (db *DB) closeConns(conns []*conn) { - if len(conns) == 0 { - return - } - du := db.conf.QueryTimeout - if db.conf.ExecTimeout > du { - du = db.conf.ExecTimeout - } - if db.conf.TranTimeout > du { - du = db.conf.TranTimeout - } - time.Sleep(time.Duration(du)) - for _, conn := range conns { - err := conn.Close() - if err != nil { - log.Error("tidb: close removed conn: %s err: %v", conn.addr, err) - } else { - log.Info("tidb: close removed conn: %s", conn.addr) - } - } -} diff --git a/pkg/database/tidb/sql.go b/pkg/database/tidb/sql.go deleted file mode 100644 index ee932940e..000000000 --- a/pkg/database/tidb/sql.go +++ /dev/null @@ -1,739 +0,0 @@ -package tidb - -import ( - "context" - "database/sql" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/naming" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/net/trace" - - "github.com/go-sql-driver/mysql" - "github.com/pkg/errors" -) - -const ( - _family = "tidb_client" - _slowLogDuration = time.Millisecond * 250 -) - -var ( - // ErrStmtNil prepared stmt error - ErrStmtNil = errors.New("sql: prepare failed and stmt nil") - // ErrNoRows is returned by Scan when QueryRow doesn't return a row. - // In such a case, QueryRow returns a placeholder *Row value that defers - // this error until a Scan. - ErrNoRows = sql.ErrNoRows - // ErrTxDone transaction done. - ErrTxDone = sql.ErrTxDone -) - -// DB database. -type DB struct { - conf *Config - conns []*conn - idx int64 - dis naming.Resolver - appid string - mutex sync.RWMutex - breakerGroup *breaker.Group -} - -// conn database connection -type conn struct { - *sql.DB - breaker breaker.Breaker - conf *Config - addr string -} - -// Tx transaction. -type Tx struct { - db *conn - tx *sql.Tx - t trace.Trace - c context.Context - cancel func() -} - -// Row row. -type Row struct { - err error - *sql.Row - db *conn - query string - args []interface{} - t trace.Trace - cancel func() -} - -// Scan copies the columns from the matched row into the values pointed at by dest. -func (r *Row) Scan(dest ...interface{}) (err error) { - defer slowLog(fmt.Sprintf("Scan addr: %s query(%s) args(%+v)", r.db.addr, r.query, r.args), time.Now()) - if r.t != nil { - defer r.t.Finish(&err) - } - if r.err != nil { - err = r.err - } else if r.Row == nil { - err = ErrStmtNil - } - if err != nil { - return - } - err = r.Row.Scan(dest...) - if r.cancel != nil { - r.cancel() - } - r.db.onBreaker(&err) - if err != ErrNoRows { - err = errors.Wrapf(err, "addr: %s, query %s args %+v", r.db.addr, r.query, r.args) - } - return -} - -// Rows rows. -type Rows struct { - *sql.Rows - cancel func() -} - -// Close closes the Rows, preventing further enumeration. If Next is called -// and returns false and there are no further result sets, -// the Rows are closed automatically and it will suffice to check the -// result of Err. Close is idempotent and does not affect the result of Err. -func (rs *Rows) Close() (err error) { - err = errors.WithStack(rs.Rows.Close()) - if rs.cancel != nil { - rs.cancel() - } - return -} - -// Stmt prepared stmt. -type Stmt struct { - db *conn - tx bool - query string - stmt atomic.Value - t trace.Trace -} - -// Stmts random prepared stmt. -type Stmts struct { - query string - sts map[string]*Stmt - mu sync.RWMutex - db *DB -} - -// Open opens a database specified by its database driver name and a -// driver-specific data source name, usually consisting of at least a database -// name and connection information. -func Open(c *Config) (db *DB, err error) { - db = &DB{conf: c, breakerGroup: breaker.NewGroup(c.Breaker)} - cfg, err := mysql.ParseDSN(c.DSN) - if err != nil { - return - } - var dsns []string - if cfg.Net == "discovery" { - db.appid = cfg.Addr - for _, addr := range db.disc() { - dsns = append(dsns, genDSN(c.DSN, addr)) - } - } else { - dsns = append(dsns, c.DSN) - } - - cs := make([]*conn, 0, len(dsns)) - for _, dsn := range dsns { - r, err := db.connectDSN(dsn) - if err != nil { - return db, err - } - cs = append(cs, r) - } - db.conns = cs - return -} - -func (db *DB) connectDSN(dsn string) (c *conn, err error) { - d, err := connect(db.conf, dsn) - if err != nil { - return - } - addr := parseDSNAddr(dsn) - brk := db.breakerGroup.Get(addr) - c = &conn{DB: d, breaker: brk, conf: db.conf, addr: addr} - return -} - -func connect(c *Config, dataSourceName string) (*sql.DB, error) { - d, err := sql.Open("mysql", dataSourceName) - if err != nil { - err = errors.WithStack(err) - return nil, err - } - d.SetMaxOpenConns(c.Active) - d.SetMaxIdleConns(c.Idle) - d.SetConnMaxLifetime(time.Duration(c.IdleTimeout)) - return d, nil -} - -func (db *DB) conn() (c *conn) { - db.mutex.RLock() - c = db.conns[db.index()] - db.mutex.RUnlock() - return -} - -// Begin starts a transaction. The isolation level is dependent on the driver. -func (db *DB) Begin(c context.Context) (tx *Tx, err error) { - return db.conn().begin(c) -} - -// Exec executes a query without returning any rows. -// The args are for any placeholder parameters in the query. -func (db *DB) Exec(c context.Context, query string, args ...interface{}) (res sql.Result, err error) { - return db.conn().exec(c, query, args...) -} - -// Prepare creates a prepared statement for later queries or executions. -// Multiple queries or executions may be run concurrently from the returned -// statement. The caller must call the statement's Close method when the -// statement is no longer needed. -func (db *DB) Prepare(query string) (*Stmt, error) { - return db.conn().prepare(query) -} - -// Prepared creates a prepared statement for later queries or executions. -// Multiple queries or executions may be run concurrently from the returned -// statement. The caller must call the statement's Close method when the -// statement is no longer needed. -func (db *DB) Prepared(query string) (s *Stmts) { - s = &Stmts{query: query, sts: make(map[string]*Stmt), db: db} - for _, c := range db.conns { - st := c.prepared(query) - s.mu.Lock() - s.sts[c.addr] = st - s.mu.Unlock() - } - return -} - -// Query executes a query that returns rows, typically a SELECT. The args are -// for any placeholder parameters in the query. -func (db *DB) Query(c context.Context, query string, args ...interface{}) (rows *Rows, err error) { - return db.conn().query(c, query, args...) -} - -// QueryRow executes a query that is expected to return at most one row. -// QueryRow always returns a non-nil value. Errors are deferred until Row's -// Scan method is called. -func (db *DB) QueryRow(c context.Context, query string, args ...interface{}) *Row { - return db.conn().queryRow(c, query, args...) -} - -func (db *DB) index() int { - if len(db.conns) == 1 { - return 0 - } - v := atomic.AddInt64(&db.idx, 1) - return int(v) % len(db.conns) -} - -// Close closes the databases, releasing any open resources. -func (db *DB) Close() (err error) { - db.mutex.RLock() - defer db.mutex.RUnlock() - for _, d := range db.conns { - if e := d.Close(); e != nil { - err = errors.WithStack(e) - } - } - return -} - -// Ping verifies a connection to the database is still alive, establishing a -// connection if necessary. -func (db *DB) Ping(c context.Context) (err error) { - if err = db.conn().ping(c); err != nil { - return - } - return -} - -func (db *conn) onBreaker(err *error) { - if err != nil && *err != nil && *err != sql.ErrNoRows && *err != sql.ErrTxDone { - db.breaker.MarkFailed() - } else { - db.breaker.MarkSuccess() - } -} - -func (db *conn) begin(c context.Context) (tx *Tx, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Begin addr: %s", db.addr), now) - t, ok := trace.FromContext(c) - if ok { - t = t.Fork(_family, "begin") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, "")) - defer func() { - if err != nil { - t.Finish(&err) - } - }() - } - if err = db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "begin", "breaker") - return - } - _, c, cancel := db.conf.TranTimeout.Shrink(c) - rtx, err := db.BeginTx(c, nil) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "begin") - if err != nil { - err = errors.WithStack(err) - cancel() - return - } - tx = &Tx{tx: rtx, t: t, db: db, c: c, cancel: cancel} - return -} - -func (db *conn) exec(c context.Context, query string, args ...interface{}) (res sql.Result, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Exec addr: %s query(%s) args(%+v)", db.addr, query, args), now) - if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "exec") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, query)) - defer t.Finish(&err) - } - if err = db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "exec", "breaker") - return - } - _, c, cancel := db.conf.ExecTimeout.Shrink(c) - res, err = db.ExecContext(c, query, args...) - cancel() - db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "exec") - if err != nil { - err = errors.Wrapf(err, "addr: %s exec:%s, args:%+v", db.addr, query, args) - } - return -} - -func (db *conn) ping(c context.Context) (err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Ping addr: %s", db.addr), now) - if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "ping") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, "")) - defer t.Finish(&err) - } - if err = db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "ping", "breaker") - return - } - _, c, cancel := db.conf.ExecTimeout.Shrink(c) - err = db.PingContext(c) - cancel() - db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "ping") - if err != nil { - err = errors.WithStack(err) - } - return -} - -func (db *conn) prepare(query string) (*Stmt, error) { - defer slowLog(fmt.Sprintf("Prepare addr: %s query(%s)", db.addr, query), time.Now()) - stmt, err := db.Prepare(query) - if err != nil { - err = errors.Wrapf(err, "addr: %s prepare %s", db.addr, query) - return nil, err - } - st := &Stmt{query: query, db: db} - st.stmt.Store(stmt) - return st, nil -} - -func (db *conn) prepared(query string) (stmt *Stmt) { - defer slowLog(fmt.Sprintf("Prepared addr: %s query(%s)", db.addr, query), time.Now()) - stmt = &Stmt{query: query, db: db} - s, err := db.Prepare(query) - if err == nil { - stmt.stmt.Store(s) - return - } - return -} - -func (db *conn) query(c context.Context, query string, args ...interface{}) (rows *Rows, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Query addr: %s query(%s) args(%+v)", db.addr, query, args), now) - if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "query") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, query)) - defer t.Finish(&err) - } - if err = db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "query", "breaker") - return - } - _, c, cancel := db.conf.QueryTimeout.Shrink(c) - rs, err := db.DB.QueryContext(c, query, args...) - db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "query") - if err != nil { - err = errors.Wrapf(err, "addr: %s, query:%s, args:%+v", db.addr, query, args) - cancel() - return - } - rows = &Rows{Rows: rs, cancel: cancel} - return -} - -func (db *conn) queryRow(c context.Context, query string, args ...interface{}) *Row { - now := time.Now() - defer slowLog(fmt.Sprintf("QueryRow addr: %s query(%s) args(%+v)", db.addr, query, args), now) - t, ok := trace.FromContext(c) - if ok { - t = t.Fork(_family, "queryrow") - t.SetTag(trace.String(trace.TagAddress, db.addr), trace.String(trace.TagComment, query)) - } - if err := db.breaker.Allow(); err != nil { - _metricReqErr.Inc(db.addr, db.addr, "queryrow", "breaker") - return &Row{db: db, t: t, err: err} - } - _, c, cancel := db.conf.QueryTimeout.Shrink(c) - r := db.DB.QueryRowContext(c, query, args...) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), db.addr, db.addr, "queryrow") - return &Row{db: db, Row: r, query: query, args: args, t: t, cancel: cancel} -} - -// Close closes the statement. -func (s *Stmt) Close() (err error) { - stmt, ok := s.stmt.Load().(*sql.Stmt) - if ok { - err = errors.WithStack(stmt.Close()) - } - return -} - -func (s *Stmt) prepare() (st *sql.Stmt) { - var ok bool - if st, ok = s.stmt.Load().(*sql.Stmt); ok { - return - } - var err error - if st, err = s.db.Prepare(s.query); err == nil { - s.stmt.Store(st) - } - return -} - -// Exec executes a prepared statement with the given arguments and returns a -// Result summarizing the effect of the statement. -func (s *Stmt) Exec(c context.Context, args ...interface{}) (res sql.Result, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Exec addr: %s query(%s) args(%+v)", s.db.addr, s.query, args), now) - if s.tx { - if s.t != nil { - s.t.SetTag(trace.String(trace.TagAnnotation, s.query)) - } - } else if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "exec") - t.SetTag(trace.String(trace.TagAddress, s.db.addr), trace.String(trace.TagComment, s.query)) - defer t.Finish(&err) - } - if err = s.db.breaker.Allow(); err != nil { - _metricReqErr.Inc(s.db.addr, s.db.addr, "stmt:exec", "breaker") - return - } - stmt := s.prepare() - if stmt == nil { - err = ErrStmtNil - return - } - _, c, cancel := s.db.conf.ExecTimeout.Shrink(c) - res, err = stmt.ExecContext(c, args...) - cancel() - s.db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), s.db.addr, s.db.addr, "stmt:exec") - if err != nil { - err = errors.Wrapf(err, "addr: %s exec:%s, args:%+v", s.db.addr, s.query, args) - } - return -} - -// Query executes a prepared query statement with the given arguments and -// returns the query results as a *Rows. -func (s *Stmt) Query(c context.Context, args ...interface{}) (rows *Rows, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Query addr: %s query(%s) args(%+v)", s.db.addr, s.query, args), now) - if s.tx { - if s.t != nil { - s.t.SetTag(trace.String(trace.TagAnnotation, s.query)) - } - } else if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "query") - t.SetTag(trace.String(trace.TagAddress, s.db.addr), trace.String(trace.TagComment, s.query)) - defer t.Finish(&err) - } - if err = s.db.breaker.Allow(); err != nil { - _metricReqErr.Inc(s.db.addr, s.db.addr, "stmt:query", "breaker") - return - } - stmt := s.prepare() - if stmt == nil { - err = ErrStmtNil - return - } - _, c, cancel := s.db.conf.QueryTimeout.Shrink(c) - rs, err := stmt.QueryContext(c, args...) - s.db.onBreaker(&err) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), s.db.addr, s.db.addr, "stmt:query") - if err != nil { - err = errors.Wrapf(err, "addr: %s, query:%s, args:%+v", s.db.addr, s.query, args) - cancel() - return - } - rows = &Rows{Rows: rs, cancel: cancel} - return -} - -// QueryRow executes a prepared query statement with the given arguments. -// If an error occurs during the execution of the statement, that error will -// be returned by a call to Scan on the returned *Row, which is always non-nil. -// If the query selects no rows, the *Row's Scan will return ErrNoRows. -// Otherwise, the *Row's Scan scans the first selected row and discards the rest. -func (s *Stmt) QueryRow(c context.Context, args ...interface{}) (row *Row) { - now := time.Now() - defer slowLog(fmt.Sprintf("QueryRow addr: %s query(%s) args(%+v)", s.db.addr, s.query, args), now) - row = &Row{db: s.db, query: s.query, args: args} - if s.tx { - if s.t != nil { - s.t.SetTag(trace.String(trace.TagAnnotation, s.query)) - } - } else if t, ok := trace.FromContext(c); ok { - t = t.Fork(_family, "queryrow") - t.SetTag(trace.String(trace.TagAddress, s.db.addr), trace.String(trace.TagComment, s.query)) - row.t = t - } - if row.err = s.db.breaker.Allow(); row.err != nil { - _metricReqErr.Inc(s.db.addr, s.db.addr, "stmt:queryrow", "breaker") - return - } - stmt := s.prepare() - if stmt == nil { - return - } - _, c, cancel := s.db.conf.QueryTimeout.Shrink(c) - row.Row = stmt.QueryRowContext(c, args...) - row.cancel = cancel - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), s.db.addr, s.db.addr, "stmt:queryrow") - return -} - -func (s *Stmts) prepare(conn *conn) (st *Stmt) { - if conn == nil { - conn = s.db.conn() - } - s.mu.RLock() - st = s.sts[conn.addr] - s.mu.RUnlock() - if st == nil { - st = conn.prepared(s.query) - s.mu.Lock() - s.sts[conn.addr] = st - s.mu.Unlock() - } - return -} - -// Exec executes a prepared statement with the given arguments and returns a -// Result summarizing the effect of the statement. -func (s *Stmts) Exec(c context.Context, args ...interface{}) (res sql.Result, err error) { - return s.prepare(nil).Exec(c, args...) -} - -// Query executes a prepared query statement with the given arguments and -// returns the query results as a *Rows. -func (s *Stmts) Query(c context.Context, args ...interface{}) (rows *Rows, err error) { - return s.prepare(nil).Query(c, args...) -} - -// QueryRow executes a prepared query statement with the given arguments. -// If an error occurs during the execution of the statement, that error will -// be returned by a call to Scan on the returned *Row, which is always non-nil. -// If the query selects no rows, the *Row's Scan will return ErrNoRows. -// Otherwise, the *Row's Scan scans the first selected row and discards the rest. -func (s *Stmts) QueryRow(c context.Context, args ...interface{}) (row *Row) { - return s.prepare(nil).QueryRow(c, args...) -} - -// Close closes the statement. -func (s *Stmts) Close() (err error) { - for _, st := range s.sts { - if err = errors.WithStack(st.Close()); err != nil { - return - } - } - return -} - -// Commit commits the transaction. -func (tx *Tx) Commit() (err error) { - err = tx.tx.Commit() - tx.cancel() - tx.db.onBreaker(&err) - if tx.t != nil { - tx.t.Finish(&err) - } - if err != nil { - err = errors.WithStack(err) - } - return -} - -// Rollback aborts the transaction. -func (tx *Tx) Rollback() (err error) { - err = tx.tx.Rollback() - tx.cancel() - tx.db.onBreaker(&err) - if tx.t != nil { - tx.t.Finish(&err) - } - if err != nil { - err = errors.WithStack(err) - } - return -} - -// Exec executes a query that doesn't return rows. For example: an INSERT and -// UPDATE. -func (tx *Tx) Exec(query string, args ...interface{}) (res sql.Result, err error) { - now := time.Now() - defer slowLog(fmt.Sprintf("Exec addr: %s query(%s) args(%+v)", tx.db.addr, query, args), now) - if tx.t != nil { - tx.t.SetTag(trace.String(trace.TagAnnotation, fmt.Sprintf("exec %s", query))) - } - res, err = tx.tx.ExecContext(tx.c, query, args...) - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), tx.db.addr, tx.db.addr, "tx:exec") - if err != nil { - err = errors.Wrapf(err, "addr: %s exec:%s, args:%+v", tx.db.addr, query, args) - } - return -} - -// Query executes a query that returns rows, typically a SELECT. -func (tx *Tx) Query(query string, args ...interface{}) (rows *Rows, err error) { - if tx.t != nil { - tx.t.SetTag(trace.String(trace.TagAnnotation, fmt.Sprintf("query %s", query))) - } - now := time.Now() - defer slowLog(fmt.Sprintf("Query addr: %s query(%s) args(%+v)", tx.db.addr, query, args), now) - defer func() { - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), tx.db.addr, tx.db.addr, "tx:query") - }() - rs, err := tx.tx.QueryContext(tx.c, query, args...) - if err == nil { - rows = &Rows{Rows: rs} - } else { - err = errors.Wrapf(err, "addr: %s, query:%s, args:%+v", tx.db.addr, query, args) - } - return -} - -// QueryRow executes a query that is expected to return at most one row. -// QueryRow always returns a non-nil value. Errors are deferred until Row's -// Scan method is called. -func (tx *Tx) QueryRow(query string, args ...interface{}) *Row { - if tx.t != nil { - tx.t.SetTag(trace.String(trace.TagAnnotation, fmt.Sprintf("queryrow %s", query))) - } - now := time.Now() - defer slowLog(fmt.Sprintf("QueryRow addr: %s query(%s) args(%+v)", tx.db.addr, query, args), now) - defer func() { - _metricReqDur.Observe(int64(time.Since(now)/time.Millisecond), tx.db.addr, tx.db.addr, "tx:queryrow") - }() - r := tx.tx.QueryRowContext(tx.c, query, args...) - return &Row{Row: r, db: tx.db, query: query, args: args} -} - -// Stmt returns a transaction-specific prepared statement from an existing statement. -func (tx *Tx) Stmt(stmt *Stmt) *Stmt { - if stmt == nil { - return nil - } - as, ok := stmt.stmt.Load().(*sql.Stmt) - if !ok { - return nil - } - ts := tx.tx.StmtContext(tx.c, as) - st := &Stmt{query: stmt.query, tx: true, t: tx.t, db: tx.db} - st.stmt.Store(ts) - return st -} - -// Stmts returns a transaction-specific prepared statement from an existing statement. -func (tx *Tx) Stmts(stmt *Stmts) *Stmt { - return tx.Stmt(stmt.prepare(tx.db)) -} - -// Prepare creates a prepared statement for use within a transaction. -// The returned statement operates within the transaction and can no longer be -// used once the transaction has been committed or rolled back. -// To use an existing prepared statement on this transaction, see Tx.Stmt. -func (tx *Tx) Prepare(query string) (*Stmt, error) { - if tx.t != nil { - tx.t.SetTag(trace.String(trace.TagAnnotation, fmt.Sprintf("prepare %s", query))) - } - defer slowLog(fmt.Sprintf("Prepare addr: %s query(%s)", tx.db.addr, query), time.Now()) - stmt, err := tx.tx.Prepare(query) - if err != nil { - err = errors.Wrapf(err, "addr: %s prepare %s", tx.db.addr, query) - return nil, err - } - st := &Stmt{query: query, tx: true, t: tx.t, db: tx.db} - st.stmt.Store(stmt) - return st, nil -} - -// parseDSNAddr parse dsn name and return addr. -func parseDSNAddr(dsn string) (addr string) { - if dsn == "" { - return - } - cfg, err := mysql.ParseDSN(dsn) - if err != nil { - return - } - addr = cfg.Addr - return -} - -func genDSN(dsn, addr string) (res string) { - cfg, err := mysql.ParseDSN(dsn) - if err != nil { - return - } - cfg.Addr = addr - cfg.Net = "tcp" - res = cfg.FormatDSN() - return -} - -func slowLog(statement string, now time.Time) { - du := time.Since(now) - if du > _slowLogDuration { - log.Warn("%s slow log statement: %s time: %v", _family, statement, du) - } -} diff --git a/pkg/database/tidb/tidb.go b/pkg/database/tidb/tidb.go deleted file mode 100644 index 02fbe8731..000000000 --- a/pkg/database/tidb/tidb.go +++ /dev/null @@ -1,35 +0,0 @@ -package tidb - -import ( - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/time" - - // database driver - _ "github.com/go-sql-driver/mysql" -) - -// Config mysql config. -type Config struct { - DSN string // dsn - Active int // pool - Idle int // pool - IdleTimeout time.Duration // connect max life time. - QueryTimeout time.Duration // query sql timeout - ExecTimeout time.Duration // execute sql timeout - TranTimeout time.Duration // transaction sql timeout - Breaker *breaker.Config // breaker -} - -// NewTiDB new db and retry connection when has error. -func NewTiDB(c *Config) (db *DB) { - if c.QueryTimeout == 0 || c.ExecTimeout == 0 || c.TranTimeout == 0 { - panic("tidb must be set query/execute/transction timeout") - } - db, err := Open(c) - if err != nil { - log.Error("open tidb error(%v)", err) - panic(err) - } - return -} diff --git a/pkg/ecode/common_ecode.go b/pkg/ecode/common_ecode.go deleted file mode 100644 index d28169c22..000000000 --- a/pkg/ecode/common_ecode.go +++ /dev/null @@ -1,20 +0,0 @@ -package ecode - -// All common ecode -var ( - OK = add(0) // 正确 - - NotModified = add(-304) // 木有改动 - TemporaryRedirect = add(-307) // 撞车跳转 - RequestErr = add(-400) // 请求错误 - Unauthorized = add(-401) // 未认证 - AccessDenied = add(-403) // 访问权限不足 - NothingFound = add(-404) // 啥都木有 - MethodNotAllowed = add(-405) // 不支持该方法 - Conflict = add(-409) // 冲突 - Canceled = add(-498) // 客户端取消请求 - ServerErr = add(-500) // 服务器错误 - ServiceUnavailable = add(-503) // 过载保护,服务暂不可用 - Deadline = add(-504) // 服务调用超时 - LimitExceed = add(-509) // 超出限制 -) diff --git a/pkg/ecode/ecode.go b/pkg/ecode/ecode.go deleted file mode 100644 index 259424cf4..000000000 --- a/pkg/ecode/ecode.go +++ /dev/null @@ -1,116 +0,0 @@ -package ecode - -import ( - "fmt" - "strconv" - "sync/atomic" - - "github.com/pkg/errors" -) - -var ( - _messages atomic.Value // NOTE: stored map[int]string - _codes = map[int]struct{}{} // register codes. -) - -// Register register ecode message map. -func Register(cm map[int]string) { - _messages.Store(cm) -} - -// New new a ecode.Codes by int value. -// NOTE: ecode must unique in global, the New will check repeat and then panic. -func New(e int) Code { - if e <= 0 { - panic("business ecode must greater than zero") - } - return add(e) -} - -func add(e int) Code { - if _, ok := _codes[e]; ok { - panic(fmt.Sprintf("ecode: %d already exist", e)) - } - _codes[e] = struct{}{} - return Int(e) -} - -// Codes ecode error interface which has a code & message. -type Codes interface { - // sometimes Error return Code in string form - // NOTE: don't use Error in monitor report even it also work for now - Error() string - // Code get error code. - Code() int - // Message get code message. - Message() string - //Detail get error detail,it may be nil. - Details() []interface{} -} - -// A Code is an int error code spec. -type Code int - -func (e Code) Error() string { - return strconv.FormatInt(int64(e), 10) -} - -// Code return error code -func (e Code) Code() int { return int(e) } - -// Message return error message -func (e Code) Message() string { - if cm, ok := _messages.Load().(map[int]string); ok { - if msg, ok := cm[e.Code()]; ok { - return msg - } - } - return e.Error() -} - -// Details return details. -func (e Code) Details() []interface{} { return nil } - -// Int parse code int to error. -func Int(i int) Code { return Code(i) } - -// String parse code string to error. -func String(e string) Code { - if e == "" { - return OK - } - // try error string - i, err := strconv.Atoi(e) - if err != nil { - return ServerErr - } - return Code(i) -} - -// Cause cause from error to ecode. -func Cause(e error) Codes { - if e == nil { - return OK - } - ec, ok := errors.Cause(e).(Codes) - if ok { - return ec - } - return String(e.Error()) -} - -// Equal equal a and b by code int. -func Equal(a, b Codes) bool { - if a == nil { - a = OK - } - if b == nil { - b = OK - } - return a.Code() == b.Code() -} - -// EqualError equal error -func EqualError(code Codes, err error) bool { - return Cause(err).Code() == code.Code() -} diff --git a/pkg/ecode/status.go b/pkg/ecode/status.go deleted file mode 100644 index 78fdf1e3d..000000000 --- a/pkg/ecode/status.go +++ /dev/null @@ -1,98 +0,0 @@ -package ecode - -import ( - "fmt" - "strconv" - - "github.com/go-kratos/kratos/pkg/ecode/types" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" -) - -// Error new status with code and message -func Error(code Code, message string) *Status { - return &Status{s: &types.Status{Code: int32(code.Code()), Message: message}} -} - -// Errorf new status with code and message -func Errorf(code Code, format string, args ...interface{}) *Status { - return Error(code, fmt.Sprintf(format, args...)) -} - -var _ Codes = &Status{} - -// Status statusError is an alias of a status proto -// implement ecode.Codes -type Status struct { - s *types.Status -} - -// Error implement error -func (s *Status) Error() string { - return s.Message() -} - -// Code return error code -func (s *Status) Code() int { - return int(s.s.Code) -} - -// Message return error message for developer -func (s *Status) Message() string { - if s.s.Message == "" { - return strconv.Itoa(int(s.s.Code)) - } - return s.s.Message -} - -// Details return error details -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - -// WithDetails WithDetails -func (s *Status) WithDetails(pbs ...proto.Message) (*Status, error) { - for _, pb := range pbs { - anyMsg, err := ptypes.MarshalAny(pb) - if err != nil { - return s, err - } - s.s.Details = append(s.s.Details, anyMsg) - } - return s, nil -} - -// Proto return origin protobuf message -func (s *Status) Proto() *types.Status { - return s.s -} - -// FromCode create status from ecode -func FromCode(code Code) *Status { - return &Status{s: &types.Status{Code: int32(code), Message: code.Message()}} -} - -// FromProto new status from grpc detail -func FromProto(pbMsg proto.Message) Codes { - if msg, ok := pbMsg.(*types.Status); ok { - if msg.Message == "" || msg.Message == strconv.FormatInt(int64(msg.Code), 10) { - // NOTE: if message is empty convert to pure Code, will get message from config center. - return Code(msg.Code) - } - return &Status{s: msg} - } - return Errorf(ServerErr, "invalid proto message get %v", pbMsg) -} diff --git a/pkg/ecode/status_test.go b/pkg/ecode/status_test.go deleted file mode 100644 index 5778ac4be..000000000 --- a/pkg/ecode/status_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package ecode - -import ( - "testing" - "time" - - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/stretchr/testify/assert" - - "github.com/go-kratos/kratos/pkg/ecode/types" -) - -func TestEqual(t *testing.T) { - var ( - err1 = Error(RequestErr, "test") - err2 = Errorf(RequestErr, "test") - ) - assert.Equal(t, err1, err2) - assert.True(t, Equal(nil, nil)) -} - -func TestDetail(t *testing.T) { - m := ×tamp.Timestamp{Seconds: time.Now().Unix()} - st, _ := Error(RequestErr, "RequestErr").WithDetails(m) - - assert.Equal(t, "RequestErr", st.Message()) - assert.Equal(t, int(RequestErr), st.Code()) - assert.IsType(t, m, st.Details()[0]) -} - -func TestFromCode(t *testing.T) { - err := FromCode(RequestErr) - - assert.Equal(t, int(RequestErr), err.Code()) - assert.Equal(t, "-400", err.Message()) -} - -func TestFromProto(t *testing.T) { - msg := &types.Status{Code: 2233, Message: "error"} - err := FromProto(msg) - - assert.Equal(t, 2233, err.Code()) - assert.Equal(t, "error", err.Message()) - - m := ×tamp.Timestamp{Seconds: time.Now().Unix()} - err = FromProto(m) - assert.Equal(t, -500, err.Code()) - assert.Contains(t, err.Message(), "invalid proto message get") -} - -func TestEmpty(t *testing.T) { - st := &Status{} - assert.Len(t, st.Details(), 0) - - st = nil - assert.Len(t, st.Details(), 0) -} diff --git a/pkg/ecode/types/status.pb.go b/pkg/ecode/types/status.pb.go deleted file mode 100644 index 8fe2c79c6..000000000 --- a/pkg/ecode/types/status.pb.go +++ /dev/null @@ -1,102 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: internal/types/status.proto - -package types // import "github.com/go-kratos/kratos/pkg/ecode/types" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Status struct { - // The error code see ecode.Code - Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` - // A developer-facing error message, which should be in English. Any - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_status_88668d6b2bf80f08, []int{0} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (dst *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(dst, src) -} -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Status) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -func init() { - proto.RegisterType((*Status)(nil), "bilibili.rpc.Status") -} - -func init() { proto.RegisterFile("internal/types/status.proto", fileDescriptor_status_88668d6b2bf80f08) } - -var fileDescriptor_status_88668d6b2bf80f08 = []byte{ - // 220 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0xb1, 0x4a, 0x04, 0x31, - 0x10, 0x86, 0xd9, 0x5b, 0xbd, 0xc3, 0x9c, 0x85, 0x04, 0x8b, 0x55, 0x9b, 0xc5, 0x6a, 0x0b, 0x4d, - 0x40, 0x4b, 0x2b, 0xcf, 0x17, 0x58, 0x22, 0x36, 0x76, 0x49, 0x6e, 0x2e, 0x04, 0x92, 0xcc, 0x92, - 0xe4, 0x8a, 0xbc, 0x8e, 0x4f, 0x2a, 0x9b, 0x65, 0x41, 0x8b, 0x19, 0x66, 0x98, 0xff, 0xe7, 0xfb, - 0x87, 0x3c, 0xd8, 0x90, 0x21, 0x06, 0xe9, 0x78, 0x2e, 0x13, 0x24, 0x9e, 0xb2, 0xcc, 0xe7, 0xc4, - 0xa6, 0x88, 0x19, 0xe9, 0xb5, 0xb2, 0xce, 0xce, 0xc5, 0xe2, 0xa4, 0xef, 0xef, 0x0c, 0xa2, 0x71, - 0xc0, 0xeb, 0x4d, 0x9d, 0x4f, 0x5c, 0x86, 0xb2, 0x08, 0x1f, 0x4f, 0x64, 0xfb, 0x59, 0x8d, 0x94, - 0x92, 0x0b, 0x8d, 0x47, 0xe8, 0x9a, 0xbe, 0x19, 0x2e, 0x45, 0x9d, 0x69, 0x47, 0x76, 0x1e, 0x52, - 0x92, 0x06, 0xba, 0x4d, 0xdf, 0x0c, 0x57, 0x62, 0x5d, 0x29, 0x23, 0xbb, 0x23, 0x64, 0x69, 0x5d, - 0xea, 0xda, 0xbe, 0x1d, 0xf6, 0x2f, 0xb7, 0x6c, 0x81, 0xb0, 0x15, 0xc2, 0xde, 0x43, 0x11, 0xab, - 0xe8, 0xf0, 0x45, 0x6e, 0x34, 0x7a, 0xf6, 0x37, 0xd6, 0x61, 0xbf, 0x90, 0xc7, 0xd9, 0x30, 0x36, - 0xdf, 0x4f, 0x06, 0x9f, 0x35, 0x7a, 0x8f, 0x81, 0x3b, 0xab, 0xa2, 0x8c, 0x85, 0xc3, 0x9c, 0x82, - 0xff, 0x7f, 0xf4, 0xad, 0xf6, 0x9f, 0x4d, 0x2b, 0xc6, 0x0f, 0xb5, 0xad, 0xb4, 0xd7, 0xdf, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x80, 0xa3, 0xc1, 0x82, 0x0d, 0x01, 0x00, 0x00, -} diff --git a/pkg/ecode/types/status.proto b/pkg/ecode/types/status.proto deleted file mode 100644 index 86101fc1d..000000000 --- a/pkg/ecode/types/status.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package bilibili.rpc; - -import "google/protobuf/any.proto"; - -option go_package = "github.com/go-kratos/kratos/pkg/ecode/types;types"; -option java_multiple_files = true; -option java_outer_classname = "StatusProto"; -option java_package = "com.bilibili.rpc"; -option objc_class_prefix = "RPC"; - -message Status { - // The error code see ecode.Code - int32 code = 1; - - // A developer-facing error message, which should be in English. Any - string message = 2; - - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - repeated google.protobuf.Any details = 3; -} diff --git a/pkg/log/doc.go b/pkg/log/doc.go deleted file mode 100644 index 8c85fe52e..000000000 --- a/pkg/log/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -/*Package log 是kratos日志库. - -一、主要功能: - - 1. 日志打印到elk - 2. 日志打印到本地,内部使用log4go - 3. 日志打印到标准输出 - 4. verbose日志实现,参考glog实现,可通过设置不同verbose级别,默认不开启 - -二、日志配置 - -1. 默认agent配置 - - 目前日志已经实现默认配置,可以根据env自动切换远程日志。可以直接使用以下方式: - log.Init(nil) - -2. 启动参数 or 环境变量 - - 启动参数 环境变量 说明 - log.stdout LOG_STDOUT 是否开启标准输出 - log.agent LOG_AGENT 远端日志地址:unixpacket:///var/run/lancer/collector_tcp.sock?timeout=100ms&chan=1024 - log.dir LOG_DIR 文件日志路径 - log.v LOG_V verbose日志级别 - log.module LOG_MODULE 可单独配置每个文件的verbose级别:file=1,file2=2 - log.filter LOG_FILTER 配置需要过滤的字段:field1,field2 - -3. 配置文件 -但是如果有特殊需要可以走一下格式配置: - [log] - family = "xxx-service" - dir = "/data/log/xxx-service/" - stdout = true - vLevel = 3 - filter = ["fileld1", "field2"] - [log.module] - "dao_user" = 2 - "servic*" = 1 - [log.agent] - taskID = "00000x" - proto = "unixpacket" - addr = "/var/run/lancer/collector_tcp.sock" - chanSize = 10240 - -三、配置说明 - -1.log - - family 项目名,默认读环境变量$APPID - studout 标准输出,prod环境不建议开启 - filter 配置需要过滤掉的字段,以“***”替换 - dir 文件日志地址,prod环境不建议开启 - v 开启verbose级别日志,可指定全局级别 - -2. log.module - - 可单独配置每个文件的verbose级别 - -3. log.agent -远端日志配置项 - taskID lancer分配的taskID - proto 网络协议,常见:tcp, udp, unixgram - addr 网络地址,常见:ip:prot, sock - chanSize 日志队列长度 - -四、最佳实践 - -1. KVString 使用 KVString 代替 KV 可以减少对象分配, 避免给 golang GC 造成压力. -*/ -package log diff --git a/pkg/log/dsn.go b/pkg/log/dsn.go deleted file mode 100644 index ea8158a84..000000000 --- a/pkg/log/dsn.go +++ /dev/null @@ -1,48 +0,0 @@ -package log - -import ( - "fmt" - "strconv" - "strings" -) - -type verboseModule map[string]int32 - -type logFilter []string - -func (f *logFilter) String() string { - return fmt.Sprint(*f) -} - -// Set sets the value of the named command-line flag. -// format: -log.filter key1,key2 -func (f *logFilter) Set(value string) error { - for _, i := range strings.Split(value, ",") { - *f = append(*f, strings.TrimSpace(i)) - } - return nil -} - -func (m verboseModule) String() string { - var b strings.Builder - for k, v := range m { - b.WriteString(k) - b.WriteString(strconv.FormatInt(int64(v), 10)) - b.WriteString(",") - } - return b.String() -} - -// Set sets the value of the named command-line flag. -// format: -log.module file=1,file2=2 -func (m verboseModule) Set(value string) error { - for _, i := range strings.Split(value, ",") { - kv := strings.Split(i, "=") - if len(kv) == 2 { - if v, err := strconv.ParseInt(kv[1], 10, 64); err == nil { - m[strings.TrimSpace(kv[0])] = int32(v) - } - } - } - return nil -} diff --git a/pkg/log/field.go b/pkg/log/field.go deleted file mode 100644 index 3feecb8a6..000000000 --- a/pkg/log/field.go +++ /dev/null @@ -1,58 +0,0 @@ -package log - -import ( - "math" - "time" - - "github.com/go-kratos/kratos/pkg/log/internal/core" -) - -// D represents a map of entry level data used for structured logging. -// type D map[string]interface{} -type D = core.Field - -// KVString construct Field with string value. -func KVString(key string, value string) D { - return D{Key: key, Type: core.StringType, StringVal: value} -} - -// KVInt construct Field with int value. -func KVInt(key string, value int) D { - return D{Key: key, Type: core.IntTpye, Int64Val: int64(value)} -} - -// KVInt64 construct D with int64 value. -func KVInt64(key string, value int64) D { - return D{Key: key, Type: core.Int64Type, Int64Val: value} -} - -// KVUint construct Field with uint value. -func KVUint(key string, value uint) D { - return D{Key: key, Type: core.UintType, Int64Val: int64(value)} -} - -// KVUint64 construct Field with uint64 value. -func KVUint64(key string, value uint64) D { - return D{Key: key, Type: core.Uint64Type, Int64Val: int64(value)} -} - -// KVFloat32 construct Field with float32 value. -func KVFloat32(key string, value float32) D { - return D{Key: key, Type: core.Float32Type, Int64Val: int64(math.Float32bits(value))} -} - -// KVFloat64 construct Field with float64 value. -func KVFloat64(key string, value float64) D { - return D{Key: key, Type: core.Float64Type, Int64Val: int64(math.Float64bits(value))} -} - -// KVDuration construct Field with Duration value. -func KVDuration(key string, value time.Duration) D { - return D{Key: key, Type: core.DurationType, Int64Val: int64(value)} -} - -// KV return a log kv for logging field. -// NOTE: use KV{type name} can avoid object alloc and get better performance. []~( ̄▽ ̄)~*干杯 -func KV(key string, value interface{}) D { - return D{Key: key, Value: value} -} diff --git a/pkg/log/file.go b/pkg/log/file.go deleted file mode 100644 index 173fdab97..000000000 --- a/pkg/log/file.go +++ /dev/null @@ -1,97 +0,0 @@ -package log - -import ( - "context" - "io" - "path/filepath" - "time" - - "github.com/go-kratos/kratos/pkg/log/internal/filewriter" -) - -// level idx -const ( - _debugIdx = iota - _infoIdx - _warnIdx - _errorIdx - _fatalIdx - _totalIdx -) - -var _fileNames = map[int]string{ - _debugIdx: "debug.log", - _infoIdx: "info.log", - _warnIdx: "warning.log", - _errorIdx: "error.log", - _fatalIdx: "fatal.log", -} - -// FileHandler . -type FileHandler struct { - render Render - fws [_totalIdx]*filewriter.FileWriter -} - -// NewFile crete a file logger. -func NewFile(dir string, bufferSize, rotateSize int64, maxLogFile int) *FileHandler { - // new info writer - newWriter := func(name string) *filewriter.FileWriter { - var options []filewriter.Option - if rotateSize > 0 { - options = append(options, filewriter.MaxSize(rotateSize)) - } - if maxLogFile > 0 { - options = append(options, filewriter.MaxFile(maxLogFile)) - } - w, err := filewriter.New(filepath.Join(dir, name), options...) - if err != nil { - panic(err) - } - return w - } - handler := &FileHandler{ - render: newPatternRender("[%D %T] [%L] [%S] %M"), - } - for idx, name := range _fileNames { - handler.fws[idx] = newWriter(name) - } - return handler -} - -// Log loggint to file . -func (h *FileHandler) Log(ctx context.Context, lv Level, args ...D) { - d := toMap(args...) - // add extra fields - addExtraField(ctx, d) - d[_time] = time.Now().Format(_timeFormat) - var w io.Writer - switch lv { - case _debugLevel: - w = h.fws[_debugIdx] - case _warnLevel: - w = h.fws[_warnIdx] - case _errorLevel: - w = h.fws[_errorIdx] - case _fatalLevel: - w = h.fws[_fatalIdx] - default: - w = h.fws[_infoIdx] - } - h.render.Render(w, d) - w.Write([]byte("\n")) -} - -// Close log handler -func (h *FileHandler) Close() error { - for _, fw := range h.fws { - // ignored error - fw.Close() - } - return nil -} - -// SetFormat set log format -func (h *FileHandler) SetFormat(format string) { - h.render = newPatternRender(format) -} diff --git a/pkg/log/handler.go b/pkg/log/handler.go deleted file mode 100644 index 71b846bf1..000000000 --- a/pkg/log/handler.go +++ /dev/null @@ -1,118 +0,0 @@ -package log - -import ( - "context" - "time" - - pkgerr "github.com/pkg/errors" -) - -const ( - _timeFormat = "2006-01-02T15:04:05.999999" - - // log level defined in level.go. - _levelValue = "level_value" - // log level name: INFO, WARN... - _level = "level" - // log time. - _time = "time" - // request path. - // _title = "title" - // log file. - _source = "source" - // common log filed. - _log = "log" - // app name. - _appID = "app_id" - // container ID. - _instanceID = "instance_id" - // uniq ID from trace. - _tid = "traceid" - // request time. - // _ts = "ts" - // requester. - _caller = "caller" - // container environment: prod, pre, uat, fat. - _deplyEnv = "env" - // container area. - _zone = "zone" - // mirror flag - _mirror = "mirror" - // color. - _color = "color" - // env_color - _envColor = "env_color" - // cluster. - _cluster = "cluster" -) - -// Handler is used to handle log events, outputting them to -// stdio or sending them to remote services. See the "handlers" -// directory for implementations. -// -// It is left up to Handlers to implement thread-safety. -type Handler interface { - // Log handle log - // variadic D is k-v struct represent log content - Log(context.Context, Level, ...D) - - // SetFormat set render format on log output - // see StdoutHandler.SetFormat for detail - SetFormat(string) - - // Close handler - Close() error -} - -func newHandlers(filters []string, handlers ...Handler) *Handlers { - set := make(map[string]struct{}) - for _, k := range filters { - set[k] = struct{}{} - } - return &Handlers{filters: set, handlers: handlers} -} - -// Handlers a bundle for hander with filter function. -type Handlers struct { - filters map[string]struct{} - handlers []Handler -} - -// Log handlers logging. -func (hs Handlers) Log(ctx context.Context, lv Level, d ...D) { - hasSource := false - for i := range d { - if _, ok := hs.filters[d[i].Key]; ok { - d[i].Value = "***" - } - if d[i].Key == _source { - hasSource = true - } - } - if !hasSource { - fn := funcName(3) - errIncr(lv, fn) - d = append(d, KVString(_source, fn)) - } - d = append(d, KV(_time, time.Now()), KVInt64(_levelValue, int64(lv)), KVString(_level, lv.String())) - for _, h := range hs.handlers { - h.Log(ctx, lv, d...) - } -} - -// Close close resource. -func (hs Handlers) Close() (err error) { - for _, h := range hs.handlers { - if e := h.Close(); e != nil { - err = pkgerr.WithStack(e) - } - } - return -} - -// SetFormat . -func (hs Handlers) SetFormat(format string) { - for _, h := range hs.handlers { - h.SetFormat(format) - } -} diff --git a/pkg/log/internal/LICENSE.txt b/pkg/log/internal/LICENSE.txt deleted file mode 100644 index 8ed1527bd..000000000 --- a/pkg/log/internal/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -COPY FROM: https://github.com/uber-go/zap - -Copyright (c) 2016-2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/pkg/log/internal/core/buffer.go b/pkg/log/internal/core/buffer.go deleted file mode 100644 index a2efea492..000000000 --- a/pkg/log/internal/core/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -package core - -import "strconv" - -const _size = 1024 // by default, create 1 KiB buffers - -// NewBuffer is new buffer -func NewBuffer(_size int) *Buffer { - return &Buffer{bs: make([]byte, 0, _size)} -} - -// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so -// the only way to construct one is via a Pool. -type Buffer struct { - bs []byte - pool Pool -} - -// AppendByte writes a single byte to the Buffer. -func (b *Buffer) AppendByte(v byte) { - b.bs = append(b.bs, v) -} - -// AppendString writes a string to the Buffer. -func (b *Buffer) AppendString(s string) { - b.bs = append(b.bs, s...) -} - -// AppendInt appends an integer to the underlying buffer (assuming base 10). -func (b *Buffer) AppendInt(i int64) { - b.bs = strconv.AppendInt(b.bs, i, 10) -} - -// AppendUint appends an unsigned integer to the underlying buffer (assuming -// base 10). -func (b *Buffer) AppendUint(i uint64) { - b.bs = strconv.AppendUint(b.bs, i, 10) -} - -// AppendBool appends a bool to the underlying buffer. -func (b *Buffer) AppendBool(v bool) { - b.bs = strconv.AppendBool(b.bs, v) -} - -// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN -// or +/- Inf. -func (b *Buffer) AppendFloat(f float64, bitSize int) { - b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) -} - -// Len returns the length of the underlying byte slice. -func (b *Buffer) Len() int { - return len(b.bs) -} - -// Cap returns the capacity of the underlying byte slice. -func (b *Buffer) Cap() int { - return cap(b.bs) -} - -// Bytes returns a mutable reference to the underlying byte slice. -func (b *Buffer) Bytes() []byte { - return b.bs -} - -// String returns a string copy of the underlying byte slice. -func (b *Buffer) String() string { - return string(b.bs) -} - -// Reset resets the underlying byte slice. Subsequent writes re-use the slice's -// backing array. -func (b *Buffer) Reset() { - b.bs = b.bs[:0] -} - -// Write implements io.Writer. -func (b *Buffer) Write(bs []byte) (int, error) { - b.bs = append(b.bs, bs...) - return len(bs), nil -} - -// TrimNewline trims any final "\n" byte from the end of the buffer. -func (b *Buffer) TrimNewline() { - if i := len(b.bs) - 1; i >= 0 { - if b.bs[i] == '\n' { - b.bs = b.bs[:i] - } - } -} - -// Free returns the Buffer to its Pool. -// -// Callers must not retain references to the Buffer after calling Free. -func (b *Buffer) Free() { - b.pool.put(b) -} diff --git a/pkg/log/internal/core/buffer_test.go b/pkg/log/internal/core/buffer_test.go deleted file mode 100644 index 67e723c0a..000000000 --- a/pkg/log/internal/core/buffer_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package core - -import ( - "bytes" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestBufferWrites(t *testing.T) { - buf := NewPool(0).Get() - - tests := []struct { - desc string - f func() - want string - }{ - {"AppendByte", func() { buf.AppendByte('v') }, "v"}, - {"AppendString", func() { buf.AppendString("foo") }, "foo"}, - {"AppendIntPositive", func() { buf.AppendInt(42) }, "42"}, - {"AppendIntNegative", func() { buf.AppendInt(-42) }, "-42"}, - {"AppendUint", func() { buf.AppendUint(42) }, "42"}, - {"AppendBool", func() { buf.AppendBool(true) }, "true"}, - {"AppendFloat64", func() { buf.AppendFloat(3.14, 64) }, "3.14"}, - // Intenationally introduce some floating-point error. - {"AppendFloat32", func() { buf.AppendFloat(float64(float32(3.14)), 32) }, "3.14"}, - {"AppendWrite", func() { buf.Write([]byte("foo")) }, "foo"}, - } - - for _, tt := range tests { - t.Run(tt.desc, func(t *testing.T) { - buf.Reset() - tt.f() - assert.Equal(t, tt.want, buf.String(), "Unexpected buffer.String().") - assert.Equal(t, tt.want, string(buf.Bytes()), "Unexpected string(buffer.Bytes()).") - assert.Equal(t, len(tt.want), buf.Len(), "Unexpected buffer length.") - // We're not writing more than a kibibyte in tests. - assert.Equal(t, _size, buf.Cap(), "Expected buffer capacity to remain constant.") - }) - } -} - -func BenchmarkBuffers(b *testing.B) { - // Because we use the strconv.AppendFoo functions so liberally, we can't - // use the standard library's bytes.Buffer anyways (without incurring a - // bunch of extra allocations). Nevertheless, let's make sure that we're - // not losing any precious nanoseconds. - str := strings.Repeat("a", 1024) - slice := make([]byte, 1024) - buf := bytes.NewBuffer(slice) - custom := NewPool(0).Get() - b.Run("ByteSlice", func(b *testing.B) { - for i := 0; i < b.N; i++ { - slice = append(slice, str...) - slice = slice[:0] - } - }) - b.Run("BytesBuffer", func(b *testing.B) { - for i := 0; i < b.N; i++ { - buf.WriteString(str) - buf.Reset() - } - }) - b.Run("CustomBuffer", func(b *testing.B) { - for i := 0; i < b.N; i++ { - custom.AppendString(str) - custom.Reset() - } - }) -} diff --git a/pkg/log/internal/core/bufferpool.go b/pkg/log/internal/core/bufferpool.go deleted file mode 100644 index d1ee3caaf..000000000 --- a/pkg/log/internal/core/bufferpool.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package core houses zap's shared internal buffer pool. Third-party -// packages can recreate the same functionality with buffers.NewPool. -package core - -var ( - _pool = NewPool(_size) - // GetPool retrieves a buffer from the pool, creating one if necessary. - GetPool = _pool.Get -) diff --git a/pkg/log/internal/core/encoder.go b/pkg/log/internal/core/encoder.go deleted file mode 100644 index 912245244..000000000 --- a/pkg/log/internal/core/encoder.go +++ /dev/null @@ -1,187 +0,0 @@ -package core - -import ( - "time" -) - -// DefaultLineEnding defines the default line ending when writing logs. -// Alternate line endings specified in EncoderConfig can override this -// behavior. -const DefaultLineEnding = "\n" - -// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a -// map- or struct-like object to the logging context. Like maps, ObjectEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ObjectEncoder interface { - // Logging-specific marshalers. - AddArray(key string, marshaler ArrayMarshaler) error - AddObject(key string, marshaler ObjectMarshaler) error - - // Built-in types. - AddBinary(key string, value []byte) // for arbitrary bytes - AddByteString(key string, value []byte) // for UTF-8 encoded bytes - AddBool(key string, value bool) - AddComplex128(key string, value complex128) - AddComplex64(key string, value complex64) - AddDuration(key string, value time.Duration) - AddFloat64(key string, value float64) - AddFloat32(key string, value float32) - AddInt(key string, value int) - AddInt64(key string, value int64) - AddInt32(key string, value int32) - AddInt16(key string, value int16) - AddInt8(key string, value int8) - AddString(key, value string) - AddTime(key string, value time.Time) - AddUint(key string, value uint) - AddUint64(key string, value uint64) - AddUint32(key string, value uint32) - AddUint16(key string, value uint16) - AddUint8(key string, value uint8) - AddUintptr(key string, value uintptr) - - // AddReflected uses reflection to serialize arbitrary objects, so it's slow - // and allocation-heavy. - AddReflected(key string, value interface{}) error - // OpenNamespace opens an isolated namespace where all subsequent fields will - // be added. Applications can use namespaces to prevent key collisions when - // injecting loggers into sub-components or third-party libraries. - OpenNamespace(key string) -} - -// ObjectMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -type ObjectMarshaler interface { - MarshalLogObject(ObjectEncoder) error -} - -// ObjectMarshalerFunc is a type adapter that turns a function into an -// ObjectMarshaler. -type ObjectMarshalerFunc func(ObjectEncoder) error - -// MarshalLogObject calls the underlying function. -func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { - return f(enc) -} - -// ArrayMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -type ArrayMarshaler interface { - MarshalLogArray(ArrayEncoder) error -} - -// ArrayMarshalerFunc is a type adapter that turns a function into an -// ArrayMarshaler. -type ArrayMarshalerFunc func(ArrayEncoder) error - -// MarshalLogArray calls the underlying function. -func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { - return f(enc) -} - -// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding -// array-like objects to the logging context. Of note, it supports mixed-type -// arrays even though they aren't typical in Go. Like slices, ArrayEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ArrayEncoder interface { - // Built-in types. - PrimitiveArrayEncoder - - // Time-related types. - AppendDuration(time.Duration) - AppendTime(time.Time) - - // Logging-specific marshalers. - AppendArray(ArrayMarshaler) error - AppendObject(ObjectMarshaler) error - - // AppendReflected uses reflection to serialize arbitrary objects, so it's - // slow and allocation-heavy. - AppendReflected(value interface{}) error -} - -// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals -// only in Go's built-in types. It's included only so that Duration- and -// TimeEncoders cannot trigger infinite recursion. -type PrimitiveArrayEncoder interface { - // Built-in types. - AppendBool(bool) - AppendByteString([]byte) // for UTF-8 encoded bytes - AppendComplex128(complex128) - AppendComplex64(complex64) - AppendFloat64(float64) - AppendFloat32(float32) - AppendInt(int) - AppendInt64(int64) - AppendInt32(int32) - AppendInt16(int16) - AppendInt8(int8) - AppendString(string) - AppendUint(uint) - AppendUint64(uint64) - AppendUint32(uint32) - AppendUint16(uint16) - AppendUint8(uint8) - AppendUintptr(uintptr) -} - -// An EncoderConfig allows users to configure the concrete encoders supplied by -// zapcore. -type EncoderConfig struct { - EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` - EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` - // Configure the primitive representations of common complex types. For - // example, some users may want all time.Times serialized as floating-point - // seconds since epoch, while others may prefer ISO8601 strings. - /*EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` - EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` - EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` - EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` - // Unlike the other primitive type encoders, EncodeName is optional. The - // zero value falls back to FullNameEncoder. - EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`*/ -} - -// Encoder is a format-agnostic interface for all log entry marshalers. Since -// log encoders don't need to support the same wide range of use cases as -// general-purpose marshalers, it's possible to make them faster and -// lower-allocation. -// -// Implementations of the ObjectEncoder interface's methods can, of course, -// freely modify the receiver. However, the Clone and EncodeEntry methods will -// be called concurrently and shouldn't modify the receiver. -type Encoder interface { - ObjectEncoder - - // Clone copies the encoder, ensuring that adding fields to the copy doesn't - // affect the original. - Clone() Encoder - - // EncodeEntry encodes an entry and fields, along with any accumulated - // context, into a byte buffer and returns it. - Encode(*Buffer, ...Field) error -} - -// A TimeEncoder serializes a time.Time to a primitive type. -type TimeEncoder func(time.Time, PrimitiveArrayEncoder) - -// A DurationEncoder serializes a time.Duration to a primitive type. -type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) - -// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds -// since the Unix epoch. -func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - //var d []byte - enc.AppendString(t.Format("2006-01-02T15:04:05.999999")) - //enc.AppendByteString(t.AppendFormat(d, "2006-01-02T15:04:05.999999")) - /*nanos := t.UnixNano() - sec := float64(nanos) / float64(time.Second) - enc.AppendFloat64(sec)*/ -} - -// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. -func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendFloat64(float64(d) / float64(time.Second)) -} diff --git a/pkg/log/internal/core/field.go b/pkg/log/internal/core/field.go deleted file mode 100644 index 66d1c5992..000000000 --- a/pkg/log/internal/core/field.go +++ /dev/null @@ -1,122 +0,0 @@ -package core - -import ( - "fmt" - "math" - "time" - - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// FieldType represent D value type -type FieldType int32 - -// DType enum -const ( - UnknownType FieldType = iota - StringType - IntTpye - Int64Type - UintType - Uint64Type - Float32Type - Float64Type - DurationType -) - -// Field is for encoder -type Field struct { - Key string - Value interface{} - Type FieldType - StringVal string - Int64Val int64 -} - -// AddTo exports a field through the ObjectEncoder interface. It's primarily -// useful to library authors, and shouldn't be necessary in most applications. -func (f Field) AddTo(enc ObjectEncoder) { - if f.Type == UnknownType { - f.assertAddTo(enc) - return - } - switch f.Type { - case StringType: - enc.AddString(f.Key, f.StringVal) - case IntTpye: - enc.AddInt(f.Key, int(f.Int64Val)) - case Int64Type: - enc.AddInt64(f.Key, f.Int64Val) - case UintType: - enc.AddUint(f.Key, uint(f.Int64Val)) - case Uint64Type: - enc.AddUint64(f.Key, uint64(f.Int64Val)) - case Float32Type: - enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Int64Val))) - case Float64Type: - enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Int64Val))) - case DurationType: - enc.AddDuration(f.Key, time.Duration(f.Int64Val)) - default: - panic(fmt.Sprintf("unknown field type: %v", f)) - } -} - -func (f Field) assertAddTo(enc ObjectEncoder) { - // assert interface - switch val := f.Value.(type) { - case bool: - enc.AddBool(f.Key, val) - case complex128: - enc.AddComplex128(f.Key, val) - case complex64: - enc.AddComplex64(f.Key, val) - case float64: - enc.AddFloat64(f.Key, val) - case float32: - enc.AddFloat32(f.Key, val) - case int: - enc.AddInt(f.Key, val) - case int64: - enc.AddInt64(f.Key, val) - case int32: - enc.AddInt32(f.Key, val) - case int16: - enc.AddInt16(f.Key, val) - case int8: - enc.AddInt8(f.Key, val) - case string: - enc.AddString(f.Key, val) - case uint: - enc.AddUint(f.Key, val) - case uint64: - enc.AddUint64(f.Key, val) - case uint32: - enc.AddUint32(f.Key, val) - case uint16: - enc.AddUint16(f.Key, val) - case uint8: - enc.AddUint8(f.Key, val) - case []byte: - enc.AddByteString(f.Key, val) - case uintptr: - enc.AddUintptr(f.Key, val) - case time.Time: - enc.AddTime(f.Key, val) - case xtime.Time: - enc.AddTime(f.Key, val.Time()) - case time.Duration: - enc.AddDuration(f.Key, val) - case xtime.Duration: - enc.AddDuration(f.Key, time.Duration(val)) - case error: - enc.AddString(f.Key, val.Error()) - case fmt.Stringer: - enc.AddString(f.Key, val.String()) - default: - err := enc.AddReflected(f.Key, val) - if err != nil { - enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) - } - } -} diff --git a/pkg/log/internal/core/json_encoder.go b/pkg/log/internal/core/json_encoder.go deleted file mode 100644 index 1f9dadc20..000000000 --- a/pkg/log/internal/core/json_encoder.go +++ /dev/null @@ -1,424 +0,0 @@ -package core - -import ( - "encoding/base64" - "encoding/json" - "math" - "sync" - "time" - "unicode/utf8" -) - -// For JSON-escaping; see jsonEncoder.safeAddString below. -const _hex = "0123456789abcdef" - -var _ ObjectEncoder = &jsonEncoder{} -var _jsonPool = sync.Pool{New: func() interface{} { - return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} - -func putJSONEncoder(enc *jsonEncoder) { - if enc.reflectBuf != nil { - enc.reflectBuf.Free() - } - enc.EncoderConfig = nil - enc.buf = nil - enc.spaced = false - enc.openNamespaces = 0 - enc.reflectBuf = nil - enc.reflectEnc = nil - _jsonPool.Put(enc) -} - -type jsonEncoder struct { - *EncoderConfig - buf *Buffer - spaced bool // include spaces after colons and commas - openNamespaces int - - // for encoding generic values by reflection - reflectBuf *Buffer - reflectEnc *json.Encoder -} - -// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder -// appropriately escapes all field keys and values. -// -// Note that the encoder doesn't deduplicate keys, so it's possible to produce -// a message like -// {"foo":"bar","foo":"baz"} -// This is permitted by the JSON specification, but not encouraged. Many -// libraries will ignore duplicate key-value pairs (typically keeping the last -// pair) when unmarshaling, but users should attempt to avoid adding duplicate -// keys. -func NewJSONEncoder(cfg EncoderConfig, buf *Buffer) Encoder { - return newJSONEncoder(cfg, false, buf) -} - -func newJSONEncoder(cfg EncoderConfig, spaced bool, buf *Buffer) *jsonEncoder { - return &jsonEncoder{ - EncoderConfig: &cfg, - buf: buf, - spaced: spaced, - } -} - -func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { - enc.addKey(key) - return enc.AppendArray(arr) -} - -func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { - enc.addKey(key) - return enc.AppendObject(obj) -} - -func (enc *jsonEncoder) AddBinary(key string, val []byte) { - enc.AddString(key, base64.StdEncoding.EncodeToString(val)) -} - -func (enc *jsonEncoder) AddByteString(key string, val []byte) { - enc.addKey(key) - enc.AppendByteString(val) -} - -func (enc *jsonEncoder) AddBool(key string, val bool) { - enc.addKey(key) - enc.AppendBool(val) -} - -func (enc *jsonEncoder) AddComplex128(key string, val complex128) { - enc.addKey(key) - enc.AppendComplex128(val) -} - -func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { - enc.addKey(key) - enc.AppendDuration(val) -} - -func (enc *jsonEncoder) AddFloat64(key string, val float64) { - enc.addKey(key) - enc.AppendFloat64(val) -} - -func (enc *jsonEncoder) AddInt64(key string, val int64) { - enc.addKey(key) - enc.AppendInt64(val) -} - -func (enc *jsonEncoder) resetReflectBuf() { - if enc.reflectBuf == nil { - enc.reflectBuf = GetPool() - enc.reflectEnc = json.NewEncoder(enc.reflectBuf) - } else { - enc.reflectBuf.Reset() - } -} - -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(obj) - if err != nil { - return err - } - enc.reflectBuf.TrimNewline() - enc.addKey(key) - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) - return err -} - -func (enc *jsonEncoder) OpenNamespace(key string) { - enc.addKey(key) - enc.buf.AppendByte('{') - enc.openNamespaces++ -} - -func (enc *jsonEncoder) AddString(key, val string) { - enc.addKey(key) - enc.AppendString(val) -} - -func (enc *jsonEncoder) AddTime(key string, val time.Time) { - enc.addKey(key) - enc.AppendTime(val) -} - -func (enc *jsonEncoder) AddUint64(key string, val uint64) { - enc.addKey(key) - enc.AppendUint64(val) -} - -func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('[') - err := arr.MarshalLogArray(enc) - enc.buf.AppendByte(']') - return err -} - -func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('{') - err := obj.MarshalLogObject(enc) - enc.buf.AppendByte('}') - return err -} - -func (enc *jsonEncoder) AppendBool(val bool) { - enc.addElementSeparator() - enc.buf.AppendBool(val) -} - -func (enc *jsonEncoder) AppendByteString(val []byte) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddByteString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendComplex128(val complex128) { - enc.addElementSeparator() - // Cast to a platform-independent, fixed-size type. - r, i := float64(real(val)), float64(imag(val)) - enc.buf.AppendByte('"') - // Because we're always in a quoted string, we can use strconv without - // special-casing NaN and +/-Inf. - enc.buf.AppendFloat(r, 64) - enc.buf.AppendByte('+') - enc.buf.AppendFloat(i, 64) - enc.buf.AppendByte('i') - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendDuration(val time.Duration) { - cur := enc.buf.Len() - enc.EncodeDuration(val, enc) - if cur == enc.buf.Len() { - // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep - // JSON valid. - enc.AppendInt64(int64(val)) - } -} - -func (enc *jsonEncoder) AppendInt64(val int64) { - enc.addElementSeparator() - enc.buf.AppendInt(val) -} - -func (enc *jsonEncoder) AppendReflected(val interface{}) error { - enc.resetReflectBuf() - err := enc.reflectEnc.Encode(val) - if err != nil { - return err - } - enc.reflectBuf.TrimNewline() - enc.addElementSeparator() - _, err = enc.buf.Write(enc.reflectBuf.Bytes()) - return err -} - -func (enc *jsonEncoder) AppendString(val string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendTime(val time.Time) { - cur := enc.buf.Len() - enc.EncodeTime(val, enc) - if cur == enc.buf.Len() { - // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep - // output JSON valid. - enc.AppendInt64(val.UnixNano()) - } -} - -func (enc *jsonEncoder) AppendUint64(val uint64) { - enc.addElementSeparator() - enc.buf.AppendUint(val) -} - -func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } -func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } -func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } -func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } -func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } - -func (enc *jsonEncoder) Clone() Encoder { - clone := enc.clone() - return clone -} - -func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() - clone.EncoderConfig = enc.EncoderConfig - clone.spaced = enc.spaced - clone.openNamespaces = enc.openNamespaces - clone.buf = GetPool() - return clone -} - -func (enc *jsonEncoder) Encode(buf *Buffer, fields ...Field) error { - final := enc.clone() - final.buf = buf - final.buf.AppendByte('{') - if enc.buf.Len() > 0 { - final.addElementSeparator() - final.buf.Write(enc.buf.Bytes()) - } - - for i := range fields { - fields[i].AddTo(final) - } - - final.closeOpenNamespaces() - final.buf.AppendString("}\n") - putJSONEncoder(final) - return nil -} - -func (enc *jsonEncoder) closeOpenNamespaces() { - for i := 0; i < enc.openNamespaces; i++ { - enc.buf.AppendByte('}') - } -} - -func (enc *jsonEncoder) addKey(key string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(key) - enc.buf.AppendByte('"') - enc.buf.AppendByte(':') - if enc.spaced { - enc.buf.AppendByte(' ') - } -} - -func (enc *jsonEncoder) addElementSeparator() { - last := enc.buf.Len() - 1 - if last < 0 { - return - } - switch enc.buf.Bytes()[last] { - case '{', '[', ':', ',', ' ': - return - default: - enc.buf.AppendByte(',') - if enc.spaced { - enc.buf.AppendByte(' ') - } - } -} - -func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { - enc.addElementSeparator() - switch { - case math.IsNaN(val): - enc.buf.AppendString(`"NaN"`) - case math.IsInf(val, 1): - enc.buf.AppendString(`"+Inf"`) - case math.IsInf(val, -1): - enc.buf.AppendString(`"-Inf"`) - default: - enc.buf.AppendFloat(val, bitSize) - } -} - -// safeAddString JSON-escapes a string and appends it to the internal buffer. -// Unlike the standard library's encoder, it doesn't attempt to protect the -// user from browser vulnerabilities or JSONP-related problems. -func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } -} - -// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. -func (enc *jsonEncoder) safeAddByteString(s []byte) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) - } - return true -} - -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false -} diff --git a/pkg/log/internal/core/pool.go b/pkg/log/internal/core/pool.go deleted file mode 100644 index 6644d5067..000000000 --- a/pkg/log/internal/core/pool.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package core - -import "sync" - -// A Pool is a type-safe wrapper around a sync.Pool. -type Pool struct { - p *sync.Pool -} - -// NewPool constructs a new Pool. -func NewPool(size int) Pool { - if size == 0 { - size = _size - } - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, size)} - }, - }} -} - -// Get retrieves a Buffer from the pool, creating one if necessary. -func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) - buf.Reset() - buf.pool = p - return buf -} - -func (p Pool) put(buf *Buffer) { - p.p.Put(buf) -} diff --git a/pkg/log/internal/core/pool_test.go b/pkg/log/internal/core/pool_test.go deleted file mode 100644 index 33bb4ff52..000000000 --- a/pkg/log/internal/core/pool_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package core - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestBuffers(t *testing.T) { - const dummyData = "dummy data" - p := NewPool(0) - - var wg sync.WaitGroup - for g := 0; g < 10; g++ { - wg.Add(1) - go func() { - for i := 0; i < 100; i++ { - buf := p.Get() - assert.Zero(t, buf.Len(), "Expected truncated buffer") - assert.NotZero(t, buf.Cap(), "Expected non-zero capacity") - - buf.AppendString(dummyData) - assert.Equal(t, buf.Len(), len(dummyData), "Expected buffer to contain dummy data") - - buf.Free() - } - wg.Done() - }() - } - wg.Wait() -} diff --git a/pkg/log/internal/filewriter/filewriter.go b/pkg/log/internal/filewriter/filewriter.go deleted file mode 100644 index f981cc9fe..000000000 --- a/pkg/log/internal/filewriter/filewriter.go +++ /dev/null @@ -1,344 +0,0 @@ -package filewriter - -import ( - "bytes" - "container/list" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// FileWriter create file log writer -type FileWriter struct { - opt option - dir string - fname string - ch chan *bytes.Buffer - stdlog *log.Logger - pool *sync.Pool - - lastRotateFormat string - lastSplitNum int - - current *wrapFile - files *list.List - - closed int32 - wg sync.WaitGroup -} - -type rotateItem struct { - rotateTime int64 - rotateNum int - fname string -} - -func parseRotateItem(dir, fname, rotateFormat string) (*list.List, error) { - fis, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - - // parse exists log file filename - parse := func(s string) (rt rotateItem, err error) { - // remove filename and left "." error.log.2018-09-12.001 -> 2018-09-12.001 - rt.fname = s - s = strings.TrimLeft(s[len(fname):], ".") - seqs := strings.Split(s, ".") - var t time.Time - switch len(seqs) { - case 2: - if rt.rotateNum, err = strconv.Atoi(seqs[1]); err != nil { - return - } - fallthrough - case 1: - if t, err = time.Parse(rotateFormat, seqs[0]); err != nil { - return - } - rt.rotateTime = t.Unix() - } - return - } - - var items []rotateItem - for _, fi := range fis { - if strings.HasPrefix(fi.Name(), fname) && fi.Name() != fname { - rt, err := parse(fi.Name()) - if err != nil { - // TODO deal with error - continue - } - items = append(items, rt) - } - } - sort.Slice(items, func(i, j int) bool { - if items[i].rotateTime == items[j].rotateTime { - return items[i].rotateNum > items[j].rotateNum - } - return items[i].rotateTime > items[j].rotateTime - }) - l := list.New() - - for _, item := range items { - l.PushBack(item) - } - return l, nil -} - -type wrapFile struct { - fsize int64 - fp *os.File -} - -func (w *wrapFile) size() int64 { - return w.fsize -} - -func (w *wrapFile) write(p []byte) (n int, err error) { - n, err = w.fp.Write(p) - w.fsize += int64(n) - return -} - -func newWrapFile(fpath string) (*wrapFile, error) { - fp, err := os.OpenFile(fpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - fi, err := fp.Stat() - if err != nil { - return nil, err - } - return &wrapFile{fp: fp, fsize: fi.Size()}, nil -} - -// New FileWriter A FileWriter is safe for use by multiple goroutines simultaneously. -func New(fpath string, fns ...Option) (*FileWriter, error) { - opt := defaultOption - for _, fn := range fns { - fn(&opt) - } - - fname := filepath.Base(fpath) - if fname == "" { - return nil, fmt.Errorf("filename can't empty") - } - dir := filepath.Dir(fpath) - fi, err := os.Stat(dir) - if err == nil && !fi.IsDir() { - return nil, fmt.Errorf("%s already exists and not a directory", dir) - } - if os.IsNotExist(err) { - if err = os.MkdirAll(dir, 0755); err != nil { - return nil, fmt.Errorf("create dir %s error: %s", dir, err.Error()) - } - } - - current, err := newWrapFile(fpath) - if err != nil { - return nil, err - } - - stdlog := log.New(os.Stderr, "flog ", log.LstdFlags) - ch := make(chan *bytes.Buffer, opt.ChanSize) - - files, err := parseRotateItem(dir, fname, opt.RotateFormat) - if err != nil { - // set files a empty list - files = list.New() - stdlog.Printf("parseRotateItem error: %s", err) - } - - lastRotateFormat := time.Now().Format(opt.RotateFormat) - var lastSplitNum int - if files.Len() > 0 { - rt := files.Front().Value.(rotateItem) - // check contains is mush esay than compared with timestamp - if strings.Contains(rt.fname, lastRotateFormat) { - lastSplitNum = rt.rotateNum - } - } - - fw := &FileWriter{ - opt: opt, - dir: dir, - fname: fname, - stdlog: stdlog, - ch: ch, - pool: &sync.Pool{New: func() interface{} { return new(bytes.Buffer) }}, - - lastSplitNum: lastSplitNum, - lastRotateFormat: lastRotateFormat, - - files: files, - current: current, - } - - fw.wg.Add(1) - go fw.daemon() - - return fw, nil -} - -// Write write data to log file, return write bytes is pseudo just for implement io.Writer. -func (f *FileWriter) Write(p []byte) (int, error) { - // atomic is not necessary - if atomic.LoadInt32(&f.closed) == 1 { - f.stdlog.Printf("%s", p) - return 0, fmt.Errorf("filewriter already closed") - } - // because write to file is asynchronousc, - // copy p to internal buf prevent p be change on outside - buf := f.getBuf() - buf.Write(p) - - if f.opt.WriteTimeout == 0 { - select { - case f.ch <- buf: - return len(p), nil - default: - // TODO: write discard log to to stdout? - return 0, fmt.Errorf("log channel is full, discard log") - } - } - - // write log with timeout - timeout := time.NewTimer(f.opt.WriteTimeout) - select { - case f.ch <- buf: - return len(p), nil - case <-timeout.C: - // TODO: write discard log to to stdout? - return 0, fmt.Errorf("log channel is full, discard log") - } -} - -func (f *FileWriter) daemon() { - // TODO: check aggsbuf size prevent it too big - aggsbuf := &bytes.Buffer{} - tk := time.NewTicker(f.opt.RotateInterval) - // TODO: make it configrable - aggstk := time.NewTicker(10 * time.Millisecond) - var err error - for { - select { - case t := <-tk.C: - f.checkRotate(t) - case buf, ok := <-f.ch: - if ok { - aggsbuf.Write(buf.Bytes()) - f.putBuf(buf) - } - case <-aggstk.C: - if aggsbuf.Len() > 0 { - if err = f.write(aggsbuf.Bytes()); err != nil { - f.stdlog.Printf("write log error: %s", err) - } - aggsbuf.Reset() - } - } - if atomic.LoadInt32(&f.closed) != 1 { - continue - } - // read all buf from channel and break loop - if err = f.write(aggsbuf.Bytes()); err != nil { - f.stdlog.Printf("write log error: %s", err) - } - for buf := range f.ch { - if err = f.write(buf.Bytes()); err != nil { - f.stdlog.Printf("write log error: %s", err) - } - f.putBuf(buf) - } - break - } - f.wg.Done() -} - -// Close close file writer -func (f *FileWriter) Close() error { - atomic.StoreInt32(&f.closed, 1) - close(f.ch) - f.wg.Wait() - return nil -} - -func (f *FileWriter) checkRotate(t time.Time) { - formatFname := func(format string, num int) string { - if num == 0 { - return fmt.Sprintf("%s.%s", f.fname, format) - } - return fmt.Sprintf("%s.%s.%03d", f.fname, format, num) - } - format := t.Format(f.opt.RotateFormat) - - if f.opt.MaxFile != 0 { - for f.files.Len() > f.opt.MaxFile { - rt := f.files.Remove(f.files.Front()).(rotateItem) - fpath := filepath.Join(f.dir, rt.fname) - if err := os.Remove(fpath); err != nil { - f.stdlog.Printf("remove file %s error: %s", fpath, err) - } - } - } - - if format != f.lastRotateFormat || (f.opt.MaxSize != 0 && f.current.size() > f.opt.MaxSize) { - var err error - // close current file first - if err = f.current.fp.Close(); err != nil { - f.stdlog.Printf("close current file error: %s", err) - } - - // rename file - fname := formatFname(f.lastRotateFormat, f.lastSplitNum) - oldpath := filepath.Join(f.dir, f.fname) - newpath := filepath.Join(f.dir, fname) - if err = os.Rename(oldpath, newpath); err != nil { - f.stdlog.Printf("rename file %s to %s error: %s", oldpath, newpath, err) - return - } - - f.files.PushBack(rotateItem{fname: fname /*rotateNum: f.lastSplitNum, rotateTime: t.Unix() unnecessary*/}) - - if format != f.lastRotateFormat { - f.lastRotateFormat = format - f.lastSplitNum = 0 - } else { - f.lastSplitNum++ - } - - // recreate current file - f.current, err = newWrapFile(filepath.Join(f.dir, f.fname)) - if err != nil { - f.stdlog.Printf("create log file error: %s", err) - } - } -} - -func (f *FileWriter) write(p []byte) error { - // f.current may be nil, if newWrapFile return err in checkRotate, redirect log to stderr - if f.current == nil { - f.stdlog.Printf("can't write log to file, please check stderr log for detail") - f.stdlog.Printf("%s", p) - } - _, err := f.current.write(p) - return err -} - -func (f *FileWriter) putBuf(buf *bytes.Buffer) { - buf.Reset() - f.pool.Put(buf) -} - -func (f *FileWriter) getBuf() *bytes.Buffer { - return f.pool.Get().(*bytes.Buffer) -} diff --git a/pkg/log/internal/filewriter/filewriter_test.go b/pkg/log/internal/filewriter/filewriter_test.go deleted file mode 100644 index 6178da78b..000000000 --- a/pkg/log/internal/filewriter/filewriter_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package filewriter - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -const logdir = "testlog" - -func touch(dir, name string) { - os.MkdirAll(dir, 0755) - fp, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE, 0644) - if err != nil { - panic(err) - } - fp.Close() -} - -func TestMain(m *testing.M) { - ret := m.Run() - os.RemoveAll(logdir) - os.Exit(ret) -} - -func TestParseRotate(t *testing.T) { - touch := func(dir, name string) { - os.MkdirAll(dir, 0755) - fp, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE, 0644) - if err != nil { - t.Fatal(err) - } - fp.Close() - } - dir := filepath.Join(logdir, "test-parse-rotate") - names := []string{"info.log.2018-11-11", "info.log.2018-11-11.001", "info.log.2018-11-11.002", "info.log." + time.Now().Format("2006-01-02") + ".005"} - for _, name := range names { - touch(dir, name) - } - l, err := parseRotateItem(dir, "info.log", "2006-01-02") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, len(names), l.Len()) - - rt := l.Front().Value.(rotateItem) - - assert.Equal(t, 5, rt.rotateNum) -} - -func TestRotateExists(t *testing.T) { - dir := filepath.Join(logdir, "test-rotate-exists") - names := []string{"info.log." + time.Now().Format("2006-01-02") + ".005"} - for _, name := range names { - touch(dir, name) - } - fw, err := New(logdir+"/test-rotate-exists/info.log", - MaxSize(1024*1024), - func(opt *option) { opt.RotateInterval = time.Millisecond }, - ) - if err != nil { - t.Fatal(err) - } - data := make([]byte, 1024) - for i := range data { - data[i] = byte(i) - } - for i := 0; i < 10; i++ { - for i := 0; i < 1024; i++ { - _, err = fw.Write(data) - if err != nil { - t.Error(err) - } - } - time.Sleep(10 * time.Millisecond) - } - fw.Close() - fis, err := ioutil.ReadDir(logdir + "/test-rotate-exists") - if err != nil { - t.Fatal(err) - } - var fnams []string - for _, fi := range fis { - fnams = append(fnams, fi.Name()) - } - assert.Contains(t, fnams, "info.log."+time.Now().Format("2006-01-02")+".006") -} - -func TestSizeRotate(t *testing.T) { - fw, err := New(logdir+"/test-rotate/info.log", - MaxSize(1024*1024), - func(opt *option) { opt.RotateInterval = 1 * time.Millisecond }, - ) - if err != nil { - t.Fatal(err) - } - data := make([]byte, 1024) - for i := range data { - data[i] = byte(i) - } - for i := 0; i < 10; i++ { - for i := 0; i < 1024; i++ { - _, err = fw.Write(data) - if err != nil { - t.Error(err) - } - } - time.Sleep(10 * time.Millisecond) - } - fw.Close() - fis, err := ioutil.ReadDir(logdir + "/test-rotate") - if err != nil { - t.Fatal(err) - } - assert.True(t, len(fis) > 5, "expect more than 5 file get %d", len(fis)) -} - -func TestMaxFile(t *testing.T) { - fw, err := New(logdir+"/test-maxfile/info.log", - MaxSize(1024*1024), - MaxFile(1), - func(opt *option) { opt.RotateInterval = 1 * time.Millisecond }, - ) - if err != nil { - t.Fatal(err) - } - data := make([]byte, 1024) - for i := range data { - data[i] = byte(i) - } - for i := 0; i < 10; i++ { - for i := 0; i < 1024; i++ { - _, err = fw.Write(data) - if err != nil { - t.Error(err) - } - } - time.Sleep(10 * time.Millisecond) - } - fw.Close() - fis, err := ioutil.ReadDir(logdir + "/test-maxfile") - if err != nil { - t.Fatal(err) - } - assert.True(t, len(fis) <= 2, fmt.Sprintf("expect 2 file get %d", len(fis))) -} - -func TestMaxFile2(t *testing.T) { - files := []string{ - "info.log.2018-12-01", - "info.log.2018-12-02", - "info.log.2018-12-03", - "info.log.2018-12-04", - "info.log.2018-12-05", - "info.log.2018-12-05.001", - } - for _, file := range files { - touch(logdir+"/test-maxfile2", file) - } - fw, err := New(logdir+"/test-maxfile2/info.log", - MaxSize(1024*1024), - MaxFile(3), - func(opt *option) { opt.RotateInterval = 1 * time.Millisecond }, - ) - if err != nil { - t.Fatal(err) - } - data := make([]byte, 1024) - for i := range data { - data[i] = byte(i) - } - for i := 0; i < 10; i++ { - for i := 0; i < 1024; i++ { - _, err = fw.Write(data) - if err != nil { - t.Error(err) - } - } - time.Sleep(10 * time.Millisecond) - } - fw.Close() - fis, err := ioutil.ReadDir(logdir + "/test-maxfile2") - if err != nil { - t.Fatal(err) - } - assert.True(t, len(fis) == 4, fmt.Sprintf("expect 4 file get %d", len(fis))) -} - -func TestFileWriter(t *testing.T) { - fw, err := New("testlog/info.log") - if err != nil { - t.Fatal(err) - } - defer fw.Close() - _, err = fw.Write([]byte("Hello World!\n")) - if err != nil { - t.Error(err) - } -} - -func BenchmarkFileWriter(b *testing.B) { - fw, err := New("testlog/bench/info.log", - func(opt *option) { opt.WriteTimeout = time.Second }, MaxSize(1024*1024*8), /*32MB*/ - func(opt *option) { opt.RotateInterval = 10 * time.Millisecond }, - ) - if err != nil { - b.Fatal(err) - } - for i := 0; i < b.N; i++ { - _, err = fw.Write([]byte("Hello World!\n")) - if err != nil { - b.Error(err) - } - } -} diff --git a/pkg/log/internal/filewriter/option.go b/pkg/log/internal/filewriter/option.go deleted file mode 100644 index dbe19d671..000000000 --- a/pkg/log/internal/filewriter/option.go +++ /dev/null @@ -1,69 +0,0 @@ -package filewriter - -import ( - "fmt" - "strings" - "time" -) - -// RotateFormat -const ( - RotateDaily = "2006-01-02" -) - -var defaultOption = option{ - RotateFormat: RotateDaily, - MaxSize: 1 << 30, - ChanSize: 1024 * 8, - RotateInterval: 10 * time.Second, -} - -type option struct { - RotateFormat string - MaxFile int - MaxSize int64 - ChanSize int - - // TODO export Option - RotateInterval time.Duration - WriteTimeout time.Duration -} - -// Option filewriter option -type Option func(opt *option) - -// RotateFormat e.g 2006-01-02 meaning rotate log file every day. -// NOTE: format can't contain ".", "." will cause panic ヽ(*。>Д<)o゜. -func RotateFormat(format string) Option { - if strings.Contains(format, ".") { - panic(fmt.Sprintf("rotate format can't contain '.' format: %s", format)) - } - return func(opt *option) { - opt.RotateFormat = format - } -} - -// MaxFile default 999, 0 meaning unlimit. -// TODO: don't create file list if MaxSize is unlimt. -func MaxFile(n int) Option { - return func(opt *option) { - opt.MaxFile = n - } -} - -// MaxSize set max size for single log file, -// defult 1GB, 0 meaning unlimit. -func MaxSize(n int64) Option { - return func(opt *option) { - opt.MaxSize = n - } -} - -// ChanSize set internal chan size default 8192 use about 64k memory on x64 platform static, -// because filewriter has internal object pool, change chan size bigger may cause filewriter use -// a lot of memory, because sync.Pool can't set expire time memory won't free until program exit. -func ChanSize(n int) Option { - return func(opt *option) { - opt.ChanSize = n - } -} diff --git a/pkg/log/level.go b/pkg/log/level.go deleted file mode 100644 index fa4c98eaf..000000000 --- a/pkg/log/level.go +++ /dev/null @@ -1,29 +0,0 @@ -package log - -// Level of severity. -type Level int - -// Verbose is a boolean type that implements Info, Infov (like Printf) etc. -type Verbose bool - -// common log level. -const ( - _debugLevel Level = iota - _infoLevel - _warnLevel - _errorLevel - _fatalLevel -) - -var levelNames = [...]string{ - _debugLevel: "DEBUG", - _infoLevel: "INFO", - _warnLevel: "WARN", - _errorLevel: "ERROR", - _fatalLevel: "FATAL", -} - -// String implementation. -func (l Level) String() string { - return levelNames[l] -} diff --git a/pkg/log/log.go b/pkg/log/log.go deleted file mode 100644 index 9d5c30f79..000000000 --- a/pkg/log/log.go +++ /dev/null @@ -1,325 +0,0 @@ -package log - -import ( - "context" - "flag" - "fmt" - "io" - "os" - "strconv" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/stat/metric" -) - -// Config log config. -type Config struct { - Family string - Host string - - // stdout - Stdout bool - - // file - Dir string - // buffer size - FileBufferSize int64 - // MaxLogFile - MaxLogFile int - // RotateSize - RotateSize int64 - - // V Enable V-leveled logging at the specified level. - V int32 - // Module="" - // The syntax of the argument is a map of pattern=N, - // where pattern is a literal file name (minus the ".go" suffix) or - // "glob" pattern and N is a V level. For instance: - // [module] - // "service" = 1 - // "dao*" = 2 - // sets the V level to 2 in all Go files whose names begin "dao". - Module map[string]int32 - // Filter tell log handler which field are sensitive message, use * instead. - Filter []string -} - -// metricErrCount prometheus error counter. -var ( - metricErrCount = metric.NewBusinessMetricCount("log_error_total", "source") -) - -// Render render log output -type Render interface { - Render(io.Writer, map[string]interface{}) error - RenderString(map[string]interface{}) string -} - -var ( - h Handler - c *Config -) - -func init() { - host, _ := os.Hostname() - c = &Config{ - Family: env.AppID, - Host: host, - } - h = newHandlers([]string{}, NewStdout()) - - addFlag(flag.CommandLine) -} - -var ( - _v int - _stdout bool - _dir string - _agentDSN string - _filter logFilter - _module = verboseModule{} - _noagent bool -) - -// addFlag init log from dsn. -func addFlag(fs *flag.FlagSet) { - if lv, err := strconv.ParseInt(os.Getenv("LOG_V"), 10, 64); err == nil { - _v = int(lv) - } - _stdout, _ = strconv.ParseBool(os.Getenv("LOG_STDOUT")) - _dir = os.Getenv("LOG_DIR") - if tm := os.Getenv("LOG_MODULE"); len(tm) > 0 { - _module.Set(tm) - } - if tf := os.Getenv("LOG_FILTER"); len(tf) > 0 { - _filter.Set(tf) - } - _noagent, _ = strconv.ParseBool(os.Getenv("LOG_NO_AGENT")) - // get val from flag - fs.IntVar(&_v, "log.v", _v, "log verbose level, or use LOG_V env variable.") - fs.BoolVar(&_stdout, "log.stdout", _stdout, "log enable stdout or not, or use LOG_STDOUT env variable.") - fs.StringVar(&_dir, "log.dir", _dir, "log file `path, or use LOG_DIR env variable.") - fs.StringVar(&_agentDSN, "log.agent", _agentDSN, "log agent dsn, or use LOG_AGENT env variable.") - fs.Var(&_module, "log.module", "log verbose for specified module, or use LOG_MODULE env variable, format: file=1,file2=2.") - fs.Var(&_filter, "log.filter", "log field for sensitive message, or use LOG_FILTER env variable, format: field1,field2.") - fs.BoolVar(&_noagent, "log.noagent", _noagent, "force disable log agent print log to stderr, or use LOG_NO_AGENT") -} - -// Init create logger with context. -func Init(conf *Config) { - var isNil bool - if conf == nil { - isNil = true - conf = &Config{ - Stdout: _stdout, - Dir: _dir, - V: int32(_v), - Module: _module, - Filter: _filter, - } - } - if len(env.AppID) != 0 { - conf.Family = env.AppID // for caster - } - conf.Host = env.Hostname - if len(conf.Host) == 0 { - host, _ := os.Hostname() - conf.Host = host - } - var hs []Handler - // when env is dev - if conf.Stdout || (isNil && (env.DeployEnv == "" || env.DeployEnv == env.DeployEnvDev)) || _noagent { - hs = append(hs, NewStdout()) - } - if conf.Dir != "" { - hs = append(hs, NewFile(conf.Dir, conf.FileBufferSize, conf.RotateSize, conf.MaxLogFile)) - } - h = newHandlers(conf.Filter, hs...) - c = conf -} - -// Debug logs a message at the debug log level. -func Debug(format string, args ...interface{}) { - if int32(_debugLevel) >= c.V { - h.Log(context.Background(), _debugLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Info logs a message at the info log level. -func Info(format string, args ...interface{}) { - if int32(_infoLevel) >= c.V { - h.Log(context.Background(), _infoLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Warn logs a message at the warning log level. -func Warn(format string, args ...interface{}) { - if int32(_warnLevel) >= c.V { - h.Log(context.Background(), _warnLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Error logs a message at the error log level. -func Error(format string, args ...interface{}) { - if int32(_errorLevel) >= c.V { - h.Log(context.Background(), _errorLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Fatal logs a message at the fatal log level. -func Fatal(format string, args ...interface{}) { - if int32(_fatalLevel) >= c.V { - h.Log(context.Background(), _fatalLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Debugc logs a message at the debug log level. -func Debugc(ctx context.Context, format string, args ...interface{}) { - if int32(_debugLevel) >= c.V { - h.Log(ctx, _debugLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Infoc logs a message at the info log level. -func Infoc(ctx context.Context, format string, args ...interface{}) { - if int32(_infoLevel) >= c.V { - h.Log(ctx, _infoLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Errorc logs a message at the error log level. -func Errorc(ctx context.Context, format string, args ...interface{}) { - if int32(_errorLevel) >= c.V { - h.Log(ctx, _errorLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Warnc logs a message at the warning log level. -func Warnc(ctx context.Context, format string, args ...interface{}) { - if int32(_warnLevel) >= c.V { - h.Log(ctx, _warnLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Fatalc logs a message at the fatal log level. -func Fatalc(ctx context.Context, format string, args ...interface{}) { - if int32(_fatalLevel) >= c.V { - h.Log(ctx, _fatalLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Debugv logs a message at the debug log level. -func Debugv(ctx context.Context, args ...D) { - if int32(_debugLevel) >= c.V { - h.Log(ctx, _debugLevel, args...) - } -} - -// Infov logs a message at the info log level. -func Infov(ctx context.Context, args ...D) { - if int32(_infoLevel) >= c.V { - h.Log(ctx, _infoLevel, args...) - } -} - -// Warnv logs a message at the warning log level. -func Warnv(ctx context.Context, args ...D) { - if int32(_warnLevel) >= c.V { - h.Log(ctx, _warnLevel, args...) - } -} - -// Errorv logs a message at the error log level. -func Errorv(ctx context.Context, args ...D) { - if int32(_errorLevel) >= c.V { - h.Log(ctx, _errorLevel, args...) - } -} - -// Fatalv logs a message at the error log level. -func Fatalv(ctx context.Context, args ...D) { - if int32(_fatalLevel) >= c.V { - h.Log(ctx, _fatalLevel, args...) - } -} - -func logw(args []interface{}) []D { - if len(args)%2 != 0 { - Warn("log: the variadic must be plural, the last one will ignored") - } - ds := make([]D, 0, len(args)/2) - for i := 0; i < len(args)-1; i = i + 2 { - if key, ok := args[i].(string); ok { - ds = append(ds, KV(key, args[i+1])) - } else { - Warn("log: key must be string, get %T, ignored", args[i]) - } - } - return ds -} - -// Debugw logs a message with some additional context. The variadic key-value pairs are treated as they are in With. -func Debugw(ctx context.Context, args ...interface{}) { - if int32(_debugLevel) >= c.V { - h.Log(ctx, _debugLevel, logw(args)...) - } -} - -// Infow logs a message with some additional context. The variadic key-value pairs are treated as they are in With. -func Infow(ctx context.Context, args ...interface{}) { - if int32(_infoLevel) >= c.V { - h.Log(ctx, _infoLevel, logw(args)...) - } -} - -// Warnw logs a message with some additional context. The variadic key-value pairs are treated as they are in With. -func Warnw(ctx context.Context, args ...interface{}) { - if int32(_warnLevel) >= c.V { - h.Log(ctx, _warnLevel, logw(args)...) - } -} - -// Errorw logs a message with some additional context. The variadic key-value pairs are treated as they are in With. -func Errorw(ctx context.Context, args ...interface{}) { - if int32(_errorLevel) >= c.V { - h.Log(ctx, _errorLevel, logw(args)...) - } -} - -// Fatalw logs a message with some additional context. The variadic key-value pairs are treated as they are in With. -func Fatalw(ctx context.Context, args ...interface{}) { - if int32(_fatalLevel) >= c.V { - h.Log(ctx, _fatalLevel, logw(args)...) - } -} - -// SetFormat only effective on stdout and file handler -// %T time format at "15:04:05.999" on stdout handler, "15:04:05 MST" on file handler -// %t time format at "15:04:05" on stdout handler, "15:04" on file on file handler -// %D data format at "2006/01/02" -// %d data format at "01/02" -// %L log level e.g. INFO WARN ERROR -// %M log message and additional fields: key=value this is log message -// NOTE below pattern not support on file handler -// %f function name and line number e.g. model.Get:121 -// %i instance id -// %e deploy env e.g. dev uat fat prod -// %z zone -// %S full file name and line number: /a/b/c/d.go:23 -// %s final file name element and line number: d.go:23 -func SetFormat(format string) { - h.SetFormat(format) -} - -// Close close resource. -func Close() (err error) { - err = h.Close() - h = _defaultStdout - return -} - -func errIncr(lv Level, source string) { - if lv == _errorLevel { - metricErrCount.Inc(source) - } -} diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go deleted file mode 100644 index 65f503853..000000000 --- a/pkg/log/log_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package log - -import ( - "context" - "testing" - - "github.com/go-kratos/kratos/pkg/net/metadata" - - "github.com/stretchr/testify/assert" -) - -func initStdout() { - conf := &Config{ - Stdout: true, - } - Init(conf) -} - -func initFile() { - conf := &Config{ - Dir: "/tmp", - // VLevel: 2, - Module: map[string]int32{"log_test": 1}, - } - Init(conf) -} - -type TestLog struct { - A string - B int - C string - D string -} - -func testLog(t *testing.T) { - t.Run("Fatal", func(t *testing.T) { - Fatal("hello %s", "world") - Fatalv(context.Background(), KV("key", 2222222), KV("test2", "test")) - Fatalc(context.Background(), "keys: %s %s...", "key1", "key2") - }) - t.Run("Error", func(t *testing.T) { - Error("hello %s", "world") - Errorv(context.Background(), KV("key", 2222222), KV("test2", "test")) - Errorc(context.Background(), "keys: %s %s...", "key1", "key2") - }) - t.Run("Warn", func(t *testing.T) { - Warn("hello %s", "world") - Warnv(context.Background(), KV("key", 2222222), KV("test2", "test")) - Warnc(context.Background(), "keys: %s %s...", "key1", "key2") - }) - t.Run("Info", func(t *testing.T) { - Info("hello %s", "world") - Infov(context.Background(), KV("key", 2222222), KV("test2", "test")) - Infoc(context.Background(), "keys: %s %s...", "key1", "key2") - }) - t.Run("Debug", func(t *testing.T) { - Debug("hello %s", "world") - Debugv(context.Background(), KV("key", 2222222), KV("test2", "test")) - Debugc(context.Background(), "keys: %s %s...", "key1", "key2") - }) -} - -func TestFile(t *testing.T) { - initFile() - testLog(t) - assert.Equal(t, nil, Close()) -} - -func TestStdout(t *testing.T) { - initStdout() - testLog(t) - assert.Equal(t, nil, Close()) -} - -func TestLogW(t *testing.T) { - D := logw([]interface{}{"i", "like", "a", "dog"}) - if len(D) != 2 || D[0].Key != "i" || D[0].Value != "like" || D[1].Key != "a" || D[1].Value != "dog" { - t.Fatalf("logw out put should be ' {i like} {a dog}'") - } - D = logw([]interface{}{"i", "like", "dog"}) - if len(D) != 1 || D[0].Key != "i" || D[0].Value != "like" { - t.Fatalf("logw out put should be ' {i like}'") - } -} - -func TestLogWithMirror(t *testing.T) { - Info("test log") - mdcontext := metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "true"}) - Infov(mdcontext, KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content")) - - mdcontext = metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "***"}) - Infov(mdcontext, KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content")) - - Infov(context.Background(), KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content")) -} - -func TestOverwriteSouce(t *testing.T) { - ctx := context.Background() - t.Run("test source kv string", func(t *testing.T) { - Infov(ctx, KVString("source", "test")) - }) - t.Run("test source kv string", func(t *testing.T) { - Infov(ctx, KV("source", "test")) - }) -} - -func BenchmarkLog(b *testing.B) { - ctx := context.Background() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - Infov(ctx, KVString("test", "hello"), KV("int", 34), KV("hhh", "hhhh")) - } - }) -} diff --git a/pkg/log/logrus.go b/pkg/log/logrus.go deleted file mode 100644 index 96523077f..000000000 --- a/pkg/log/logrus.go +++ /dev/null @@ -1,61 +0,0 @@ -package log - -import ( - "context" - "io/ioutil" - "os" - - "github.com/sirupsen/logrus" -) - -func init() { - redirectLogrus() -} - -func redirectLogrus() { - // FIXME: because of different stack depth call runtime.Caller will get error function name. - logrus.AddHook(redirectHook{}) - if os.Getenv("LOGRUS_STDOUT") == "" { - logrus.SetOutput(ioutil.Discard) - } -} - -type redirectHook struct{} - -func (redirectHook) Levels() []logrus.Level { - return logrus.AllLevels -} - -func (redirectHook) Fire(entry *logrus.Entry) error { - lv := _infoLevel - var logrusLv string - var verbose int32 - switch entry.Level { - case logrus.FatalLevel, logrus.PanicLevel: - logrusLv = entry.Level.String() - fallthrough - case logrus.ErrorLevel: - lv = _errorLevel - case logrus.WarnLevel: - lv = _warnLevel - case logrus.InfoLevel: - lv = _infoLevel - case logrus.DebugLevel: - // use verbose log replace of debuglevel - verbose = 10 - } - args := make([]D, 0, len(entry.Data)+1) - args = append(args, D{Key: _log, Value: entry.Message}) - for k, v := range entry.Data { - args = append(args, D{Key: k, Value: v}) - } - if logrusLv != "" { - args = append(args, D{Key: "logrus_lv", Value: logrusLv}) - } - if verbose != 0 { - V(verbose).Infov(context.Background(), args...) - } else { - h.Log(context.Background(), lv, args...) - } - return nil -} diff --git a/pkg/log/pattern.go b/pkg/log/pattern.go deleted file mode 100644 index 3863e6052..000000000 --- a/pkg/log/pattern.go +++ /dev/null @@ -1,164 +0,0 @@ -package log - -import ( - "fmt" - "io" - "path" - "strings" - "sync" - "time" -) - -var patternMap = map[string]func(map[string]interface{}) string{ - "T": longTime, - "t": shortTime, - "D": longDate, - "d": shortDate, - "L": keyFactory(_level), - "f": keyFactory(_source), - "i": keyFactory(_instanceID), - "e": keyFactory(_deplyEnv), - "z": keyFactory(_zone), - "S": longSource, - "s": shortSource, - "M": message, -} - -// newPatternRender new pattern render -func newPatternRender(format string) Render { - p := &pattern{ - bufPool: sync.Pool{New: func() interface{} { return &strings.Builder{} }}, - } - b := make([]byte, 0, len(format)) - for i := 0; i < len(format); i++ { - if format[i] != '%' { - b = append(b, format[i]) - continue - } - if i+1 >= len(format) { - b = append(b, format[i]) - continue - } - f, ok := patternMap[string(format[i+1])] - if !ok { - b = append(b, format[i]) - continue - } - if len(b) != 0 { - p.funcs = append(p.funcs, textFactory(string(b))) - b = b[:0] - } - p.funcs = append(p.funcs, f) - i++ - } - if len(b) != 0 { - p.funcs = append(p.funcs, textFactory(string(b))) - } - return p -} - -type pattern struct { - funcs []func(map[string]interface{}) string - bufPool sync.Pool -} - -// Render implements Formater -func (p *pattern) Render(w io.Writer, d map[string]interface{}) error { - builder := p.bufPool.Get().(*strings.Builder) - defer func() { - builder.Reset() - p.bufPool.Put(builder) - }() - for _, f := range p.funcs { - builder.WriteString(f(d)) - } - - _, err := w.Write([]byte(builder.String())) - return err -} - -// Render implements Formater as string -func (p *pattern) RenderString(d map[string]interface{}) string { - builder := p.bufPool.Get().(*strings.Builder) - defer func() { - builder.Reset() - p.bufPool.Put(builder) - }() - for _, f := range p.funcs { - builder.WriteString(f(d)) - } - - return builder.String() -} - -func textFactory(text string) func(map[string]interface{}) string { - return func(map[string]interface{}) string { - return text - } -} -func keyFactory(key string) func(map[string]interface{}) string { - return func(d map[string]interface{}) string { - if v, ok := d[key]; ok { - if s, ok := v.(string); ok { - return s - } - return fmt.Sprint(v) - } - return "" - } -} - -func longSource(d map[string]interface{}) string { - if fn, ok := d[_source].(string); ok { - return fn - } - return "unknown:0" -} - -func shortSource(d map[string]interface{}) string { - if fn, ok := d[_source].(string); ok { - return path.Base(fn) - } - return "unknown:0" -} - -func longTime(map[string]interface{}) string { - return time.Now().Format("15:04:05.000") -} - -func shortTime(map[string]interface{}) string { - return time.Now().Format("15:04") -} - -func longDate(map[string]interface{}) string { - return time.Now().Format("2006/01/02") -} - -func shortDate(map[string]interface{}) string { - return time.Now().Format("01/02") -} - -func isInternalKey(k string) bool { - switch k { - case _level, _levelValue, _time, _source, _instanceID, _appID, _deplyEnv, _zone: - return true - } - return false -} - -func message(d map[string]interface{}) string { - var m string - var s []string - for k, v := range d { - if k == _log { - m = fmt.Sprint(v) - continue - } - if isInternalKey(k) { - continue - } - s = append(s, fmt.Sprintf("%s=%v", k, v)) - } - s = append(s, m) - return strings.Join(s, " ") -} diff --git a/pkg/log/pattern_test.go b/pkg/log/pattern_test.go deleted file mode 100644 index a6aa6be00..000000000 --- a/pkg/log/pattern_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package log - -import ( - "bytes" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestPatternDefault(t *testing.T) { - buf := &bytes.Buffer{} - p := newPatternRender("%L %T %f %M") - p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "hello", _time: time.Now().Format(_timeFormat), _source: "xxx:123"}) - - fields := strings.Fields(buf.String()) - assert.Equal(t, 4, len(fields)) - assert.Equal(t, "INFO", fields[0]) - assert.Equal(t, "hello", fields[3]) -} - -func TestKV(t *testing.T) { - buf := &bytes.Buffer{} - p := newPatternRender("%M") - p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "2233", "hello": "test"}) - assert.Equal(t, "hello=test 2233", buf.String()) -} - -func TestBadSymbol(t *testing.T) { - buf := &bytes.Buffer{} - p := newPatternRender("%12 %% %xd %M") - p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "2233"}) - assert.Equal(t, "%12 %% %xd 2233", buf.String()) -} diff --git a/pkg/log/stdout.go b/pkg/log/stdout.go deleted file mode 100644 index ff9630426..000000000 --- a/pkg/log/stdout.go +++ /dev/null @@ -1,53 +0,0 @@ -package log - -import ( - "context" - "os" - "time" -) - -const defaultPattern = "%L %d-%T %f %M" - -var _defaultStdout = NewStdout() - -// StdoutHandler stdout log handler -type StdoutHandler struct { - render Render -} - -// NewStdout create a stdout log handler -func NewStdout() *StdoutHandler { - return &StdoutHandler{render: newPatternRender(defaultPattern)} -} - -// Log stdout loging, only for developing env. -func (h *StdoutHandler) Log(ctx context.Context, lv Level, args ...D) { - d := toMap(args...) - // add extra fields - addExtraField(ctx, d) - d[_time] = time.Now().Format(_timeFormat) - h.render.Render(os.Stderr, d) - os.Stderr.Write([]byte("\n")) -} - -// Close stdout loging -func (h *StdoutHandler) Close() error { - return nil -} - -// SetFormat set stdout log output format -// %T time format at "15:04:05.999" -// %t time format at "15:04:05" -// %D data format at "2006/01/02" -// %d data format at "01/02" -// %L log level e.g. INFO WARN ERROR -// %f function name and line number e.g. model.Get:121 -// %i instance id -// %e deploy env e.g. dev uat fat prod -// %z zone -// %S full file name and line number: /a/b/c/d.go:23 -// %s final file name element and line number: d.go:23 -// %M log message and additional fields: key=value this is log message -func (h *StdoutHandler) SetFormat(format string) { - h.render = newPatternRender(format) -} diff --git a/pkg/log/util.go b/pkg/log/util.go deleted file mode 100644 index 4d21fec48..000000000 --- a/pkg/log/util.go +++ /dev/null @@ -1,69 +0,0 @@ -package log - -import ( - "context" - "math" - "runtime" - "strconv" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/log/internal/core" - "github.com/go-kratos/kratos/pkg/net/metadata" - "github.com/go-kratos/kratos/pkg/net/trace" -) - -func addExtraField(ctx context.Context, fields map[string]interface{}) { - if t, ok := trace.FromContext(ctx); ok { - fields[_tid] = t.TraceID() - } - if caller := metadata.String(ctx, metadata.Caller); caller != "" { - fields[_caller] = caller - } - if color := metadata.String(ctx, metadata.Color); color != "" { - fields[_color] = color - } - if env.Color != "" { - fields[_envColor] = env.Color - } - if cluster := metadata.String(ctx, metadata.Cluster); cluster != "" { - fields[_cluster] = cluster - } - fields[_deplyEnv] = env.DeployEnv - fields[_zone] = env.Zone - fields[_appID] = c.Family - fields[_instanceID] = c.Host - if metadata.String(ctx, metadata.Mirror) != "" { - fields[_mirror] = true - } -} - -// funcName get func name. -func funcName(skip int) (name string) { - if _, file, lineNo, ok := runtime.Caller(skip); ok { - return file + ":" + strconv.Itoa(lineNo) - } - return "unknown:0" -} - -// toMap convert D slice to map[string]interface{} for legacy file and stdout. -func toMap(args ...D) map[string]interface{} { - d := make(map[string]interface{}, 10+len(args)) - for _, arg := range args { - switch arg.Type { - case core.UintType, core.Uint64Type, core.IntTpye, core.Int64Type: - d[arg.Key] = arg.Int64Val - case core.StringType: - d[arg.Key] = arg.StringVal - case core.Float32Type: - d[arg.Key] = math.Float32frombits(uint32(arg.Int64Val)) - case core.Float64Type: - d[arg.Key] = math.Float64frombits(uint64(arg.Int64Val)) - case core.DurationType: - d[arg.Key] = time.Duration(arg.Int64Val) - default: - d[arg.Key] = arg.Value - } - } - return d -} diff --git a/pkg/log/util_test.go b/pkg/log/util_test.go deleted file mode 100644 index 546fd6b1c..000000000 --- a/pkg/log/util_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package log - -import ( - "reflect" - "strings" - "testing" - "time" -) - -func TestFuncName(t *testing.T) { - name := funcName(1) - if !strings.Contains(name, "util_test.go:11") { - t.Errorf("expect contains util_test.go:11 got %s", name) - } -} - -func Test_toMap(t *testing.T) { - type args struct { - args []D - } - tests := []struct { - name string - args args - want map[string]interface{} - }{ - { - args: args{[]D{KVString("test", "hello")}}, - want: map[string]interface{}{"test": "hello"}, - }, - { - args: args{[]D{KVInt64("test", 123)}}, - want: map[string]interface{}{"test": int64(123)}, - }, - { - args: args{[]D{KVFloat32("test", float32(1.01))}}, - want: map[string]interface{}{"test": float32(1.01)}, - }, - { - args: args{[]D{KVFloat32("test", float32(1.01))}}, - want: map[string]interface{}{"test": float32(1.01)}, - }, - { - args: args{[]D{KVDuration("test", time.Second)}}, - want: map[string]interface{}{"test": time.Second}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := toMap(tt.args.args...); !reflect.DeepEqual(got, tt.want) { - t.Errorf("toMap() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/log/verbose.go b/pkg/log/verbose.go deleted file mode 100644 index 44124c6c6..000000000 --- a/pkg/log/verbose.go +++ /dev/null @@ -1,83 +0,0 @@ -package log - -import ( - "context" - "fmt" - "path/filepath" - "runtime" - "strings" -) - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infov etc. -// These methods will write to the Info log if called. -// Thus, one may write either -// if log.V(2) { log.Info("log this") } -// or -// log.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the Config.VLevel and Config.Module flags; both are off by default. If the level in the call to -// V is at least the value of Config.VLevel, or of Config.Module for the source file containing the -// call, the V call will log. -// v must be more than 0. -func V(v int32) Verbose { - var ( - file string - ) - if v < 0 { - return Verbose(false) - } else if c.V >= v { - return Verbose(true) - } - if pc, _, _, ok := runtime.Caller(1); ok { - file, _ = runtime.FuncForPC(pc).FileLine(pc) - } - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for filter, lvl := range c.Module { - var match bool - if match = filter == file; !match { - match, _ = filepath.Match(filter, file) - } - if match { - return Verbose(lvl >= v) - } - } - return Verbose(false) -} - -// Info logs a message at the info log level. -func (v Verbose) Info(format string, args ...interface{}) { - if v { - h.Log(context.Background(), _infoLevel, KVString(_log, fmt.Sprintf(format, args...))) - } -} - -// Infov logs a message at the info log level. -func (v Verbose) Infov(ctx context.Context, args ...D) { - if v { - h.Log(ctx, _infoLevel, args...) - } -} - -// Infow logs a message with some additional context. The variadic key-value pairs are treated as they are in With. -func (v Verbose) Infow(ctx context.Context, args ...interface{}) { - if v { - h.Log(ctx, _infoLevel, logw(args)...) - } -} - -// Close close resource. -func (v Verbose) Close() (err error) { - if h == nil { - return - } - return h.Close() -} diff --git a/pkg/naming/README.md b/pkg/naming/README.md deleted file mode 100644 index 959efc91d..000000000 --- a/pkg/naming/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# naming - -## 项目简介 - -服务发现、服务注册相关的SDK集合 - -## 现状 - -目前默认实现了B站开源的[Discovery](https://github.com/bilibili/discovery)服务注册与发现SDK。 -但在使用之前,请确认discovery服务部署完成,并将该discovery.go内`fixConfig`方法的默认配置进行完善。 - -## 使用 - -可实现`naming`内的`Builder`&`Resolver`&`Registry`接口用于服务注册与发现,比如B站内部还实现了zk的。 diff --git a/pkg/naming/discovery/discovery.go b/pkg/naming/discovery/discovery.go deleted file mode 100644 index 21a5de118..000000000 --- a/pkg/naming/discovery/discovery.go +++ /dev/null @@ -1,709 +0,0 @@ -package discovery - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "math/rand" - "net/url" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/naming" - http "github.com/go-kratos/kratos/pkg/net/http/blademaster" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -const ( - _registerURL = "http://%s/discovery/register" - _setURL = "http://%s/discovery/set" - _cancelURL = "http://%s/discovery/cancel" - _renewURL = "http://%s/discovery/renew" - _pollURL = "http://%s/discovery/polls" - - _registerGap = 30 * time.Second - - _statusUP = "1" - - _appid = "infra.discovery" -) - -var ( - _ naming.Builder = &Discovery{} - _ naming.Registry = &Discovery{} - _ naming.Resolver = &Resolve{} - - // ErrDuplication duplication treeid. - ErrDuplication = errors.New("discovery: instance duplicate registration") -) - -var ( - _once sync.Once - _builder naming.Builder -) - -// Builder return default discvoery resolver builder. -func Builder() naming.Builder { - _once.Do(func() { - _builder = New(nil) - }) - return _builder -} - -// Build register resolver into default discovery. -func Build(id string) naming.Resolver { - return Builder().Build(id) -} - -// Config discovery configures. -type Config struct { - Nodes []string - Region string - Zone string - Env string - Host string -} - -// Discovery is discovery client. -type Discovery struct { - c *Config - once sync.Once - ctx context.Context - cancelFunc context.CancelFunc - httpClient *http.Client - - node atomic.Value - nodeIdx uint64 - - mutex sync.RWMutex - apps map[string]*appInfo - registry map[string]struct{} - lastHost string - cancelPolls context.CancelFunc - - delete chan *appInfo -} - -type appInfo struct { - resolver map[*Resolve]struct{} - zoneIns atomic.Value - lastTs int64 // latest timestamp -} - -func fixConfig(c *Config) error { - if len(c.Nodes) == 0 && env.DiscoveryNodes != "" { - c.Nodes = strings.Split(env.DiscoveryNodes, ",") - } - if c.Region == "" { - c.Region = env.Region - } - if c.Zone == "" { - c.Zone = env.Zone - } - if c.Env == "" { - c.Env = env.DeployEnv - } - if c.Host == "" { - c.Host = env.Hostname - } - if len(c.Nodes) == 0 || c.Region == "" || c.Zone == "" || c.Env == "" || c.Host == "" { - return fmt.Errorf( - "invalid discovery config nodes:%+v region:%s zone:%s deployEnv:%s host:%s", - c.Nodes, - c.Region, - c.Zone, - c.Env, - c.Host, - ) - } - return nil -} - -// New new a discovery client. -func New(c *Config) (d *Discovery) { - if c == nil { - c = new(Config) - } - if err := fixConfig(c); err != nil { - panic(err) - } - ctx, cancel := context.WithCancel(context.Background()) - d = &Discovery{ - c: c, - ctx: ctx, - cancelFunc: cancel, - apps: map[string]*appInfo{}, - registry: map[string]struct{}{}, - delete: make(chan *appInfo, 10), - } - // httpClient - cfg := &http.ClientConfig{ - Dial: xtime.Duration(3 * time.Second), - Timeout: xtime.Duration(40 * time.Second), - KeepAlive: xtime.Duration(40 * time.Second), - } - d.httpClient = http.NewClient(cfg) - // discovery self - resolver := d.Build(_appid) - event := resolver.Watch() - _, ok := <-event - if !ok { - panic("discovery watch failed") - } - ins, ok := resolver.Fetch(context.Background()) - if ok { - d.newSelf(ins.Instances) - } - go d.selfproc(resolver, event) - return -} - -func (d *Discovery) selfproc(resolver naming.Resolver, event <-chan struct{}) { - for { - _, ok := <-event - if !ok { - return - } - zones, ok := resolver.Fetch(context.Background()) - if ok { - d.newSelf(zones.Instances) - } - } -} - -func (d *Discovery) newSelf(zones map[string][]*naming.Instance) { - ins, ok := zones[d.c.Zone] - if !ok { - return - } - var nodes []string - for _, in := range ins { - for _, addr := range in.Addrs { - u, err := url.Parse(addr) - if err == nil && u.Scheme == "http" { - nodes = append(nodes, u.Host) - } - } - } - // diff old nodes - var olds int - for _, n := range nodes { - if node, ok := d.node.Load().([]string); ok { - for _, o := range node { - if o == n { - olds++ - break - } - } - } - } - if len(nodes) == olds { - return - } - // FIXME: we should use rand.Shuffle() in golang 1.10 - shuffle(len(nodes), func(i, j int) { - nodes[i], nodes[j] = nodes[j], nodes[i] - }) - d.node.Store(nodes) -} - -// Build disovery resovler builder. -func (d *Discovery) Build(appid string, opts ...naming.BuildOpt) naming.Resolver { - r := &Resolve{ - id: appid, - d: d, - event: make(chan struct{}, 1), - opt: new(naming.BuildOptions), - } - for _, opt := range opts { - opt.Apply(r.opt) - } - d.mutex.Lock() - app, ok := d.apps[appid] - if !ok { - app = &appInfo{ - resolver: make(map[*Resolve]struct{}), - } - d.apps[appid] = app - cancel := d.cancelPolls - if cancel != nil { - cancel() - } - } - app.resolver[r] = struct{}{} - d.mutex.Unlock() - if ok { - select { - case r.event <- struct{}{}: - default: - } - } - log.Info("disocvery: AddWatch(%s) already watch(%v)", appid, ok) - d.once.Do(func() { - go d.serverproc() - }) - return r -} - -// Scheme return discovery's scheme -func (d *Discovery) Scheme() string { - return "discovery" -} - -// Resolve discveory resolver. -type Resolve struct { - id string - event chan struct{} - d *Discovery - opt *naming.BuildOptions -} - -// Watch watch instance. -func (r *Resolve) Watch() <-chan struct{} { - return r.event -} - -// Fetch fetch resolver instance. -func (r *Resolve) Fetch(ctx context.Context) (ins *naming.InstancesInfo, ok bool) { - r.d.mutex.RLock() - app, ok := r.d.apps[r.id] - r.d.mutex.RUnlock() - if ok { - var appIns *naming.InstancesInfo - appIns, ok = app.zoneIns.Load().(*naming.InstancesInfo) - if !ok { - return - } - ins = new(naming.InstancesInfo) - ins.LastTs = appIns.LastTs - ins.Scheduler = appIns.Scheduler - if r.opt.Filter != nil { - ins.Instances = r.opt.Filter(appIns.Instances) - } else { - ins.Instances = make(map[string][]*naming.Instance) - for zone, in := range appIns.Instances { - ins.Instances[zone] = in - } - } - if r.opt.Scheduler != nil { - ins.Instances[r.opt.ClientZone] = r.opt.Scheduler(ins) - } - if r.opt.Subset != nil && r.opt.SubsetSize != 0 { - for zone, inss := range ins.Instances { - ins.Instances[zone] = r.opt.Subset(inss, r.opt.SubsetSize) - } - } - } - return -} - -// Close close resolver. -func (r *Resolve) Close() error { - r.d.mutex.Lock() - if app, ok := r.d.apps[r.id]; ok && len(app.resolver) != 0 { - delete(app.resolver, r) - // TODO: delete app from builder - } - r.d.mutex.Unlock() - return nil -} - -// Reload reload the config -func (d *Discovery) Reload(c *Config) { - fixConfig(c) - d.mutex.Lock() - d.c = c - d.mutex.Unlock() -} - -// Close stop all running process including discovery and register -func (d *Discovery) Close() error { - d.cancelFunc() - return nil -} - -// Register Register an instance with discovery and renew automatically -func (d *Discovery) Register(ctx context.Context, ins *naming.Instance) (cancelFunc context.CancelFunc, err error) { - d.mutex.Lock() - if _, ok := d.registry[ins.AppID]; ok { - err = ErrDuplication - } else { - d.registry[ins.AppID] = struct{}{} - } - d.mutex.Unlock() - if err != nil { - return - } - - ctx, cancel := context.WithCancel(d.ctx) - if err = d.register(ctx, ins); err != nil { - d.mutex.Lock() - delete(d.registry, ins.AppID) - d.mutex.Unlock() - cancel() - return - } - ch := make(chan struct{}, 1) - cancelFunc = context.CancelFunc(func() { - cancel() - <-ch - }) - go func() { - ticker := time.NewTicker(_registerGap) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if err := d.renew(ctx, ins); err != nil && ecode.EqualError(ecode.NothingFound, err) { - _ = d.register(ctx, ins) - } - case <-ctx.Done(): - _ = d.cancel(ins) - ch <- struct{}{} - return - } - } - }() - return -} - -// register Register an instance with discovery -func (d *Discovery) register(ctx context.Context, ins *naming.Instance) (err error) { - d.mutex.RLock() - c := d.c - d.mutex.RUnlock() - - var metadata []byte - if ins.Metadata != nil { - if metadata, err = json.Marshal(ins.Metadata); err != nil { - log.Error("discovery:register instance Marshal metadata(%v) failed!error(%v)", ins.Metadata, err) - } - } - res := new(struct { - Code int `json:"code"` - Message string `json:"message"` - }) - uri := fmt.Sprintf(_registerURL, d.pickNode()) - params := d.newParams(c) - params.Set("appid", ins.AppID) - for _, addr := range ins.Addrs { - params.Add("addrs", addr) - } - params.Set("version", ins.Version) - if ins.Status == 0 { - params.Set("status", _statusUP) - } else { - params.Set("status", strconv.FormatInt(ins.Status, 10)) - } - params.Set("metadata", string(metadata)) - if err = d.httpClient.Post(ctx, uri, "", params, &res); err != nil { - d.switchNode() - log.Error("discovery: register client.Get(%v) zone(%s) env(%s) appid(%s) addrs(%v) error(%v)", - uri, c.Zone, c.Env, ins.AppID, ins.Addrs, err) - return - } - if ec := ecode.Int(res.Code); !ecode.Equal(ecode.OK, ec) { - log.Warn("discovery: register client.Get(%v) env(%s) appid(%s) addrs(%v) code(%v)", uri, c.Env, ins.AppID, ins.Addrs, res.Code) - err = ec - return - } - log.Info("discovery: register client.Get(%v) env(%s) appid(%s) addrs(%s) success", uri, c.Env, ins.AppID, ins.Addrs) - return -} - -// renew Renew an instance with discovery -func (d *Discovery) renew(ctx context.Context, ins *naming.Instance) (err error) { - d.mutex.RLock() - c := d.c - d.mutex.RUnlock() - - res := new(struct { - Code int `json:"code"` - Message string `json:"message"` - }) - uri := fmt.Sprintf(_renewURL, d.pickNode()) - params := d.newParams(c) - params.Set("appid", ins.AppID) - if err = d.httpClient.Post(ctx, uri, "", params, &res); err != nil { - d.switchNode() - log.Error("discovery: renew client.Get(%v) env(%s) appid(%s) hostname(%s) error(%v)", - uri, c.Env, ins.AppID, c.Host, err) - return - } - if ec := ecode.Int(res.Code); !ecode.Equal(ecode.OK, ec) { - err = ec - if ecode.Equal(ecode.NothingFound, ec) { - return - } - log.Error("discovery: renew client.Get(%v) env(%s) appid(%s) hostname(%s) code(%v)", - uri, c.Env, ins.AppID, c.Host, res.Code) - return - } - return -} - -// cancel Remove the registered instance from discovery -func (d *Discovery) cancel(ins *naming.Instance) (err error) { - d.mutex.RLock() - c := d.c - d.mutex.RUnlock() - - res := new(struct { - Code int `json:"code"` - Message string `json:"message"` - }) - uri := fmt.Sprintf(_cancelURL, d.pickNode()) - params := d.newParams(c) - params.Set("appid", ins.AppID) - // request - if err = d.httpClient.Post(context.TODO(), uri, "", params, &res); err != nil { - d.switchNode() - log.Error("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) error(%v)", - uri, c.Env, ins.AppID, c.Host, err) - return - } - if ec := ecode.Int(res.Code); !ecode.Equal(ecode.OK, ec) { - log.Warn("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) code(%v)", - uri, c.Env, ins.AppID, c.Host, res.Code) - err = ec - return - } - log.Info("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) success", - uri, c.Env, ins.AppID, c.Host) - return -} - -// Set set ins status and metadata. -func (d *Discovery) Set(ins *naming.Instance) error { - return d.set(context.Background(), ins) -} - -// set set instance info with discovery -func (d *Discovery) set(ctx context.Context, ins *naming.Instance) (err error) { - d.mutex.RLock() - conf := d.c - d.mutex.RUnlock() - res := new(struct { - Code int `json:"code"` - Message string `json:"message"` - }) - uri := fmt.Sprintf(_setURL, d.pickNode()) - params := d.newParams(conf) - params.Set("appid", ins.AppID) - params.Set("version", ins.Version) - params.Set("status", strconv.FormatInt(ins.Status, 10)) - if ins.Metadata != nil { - var metadata []byte - if metadata, err = json.Marshal(ins.Metadata); err != nil { - log.Error("discovery:set instance Marshal metadata(%v) failed!error(%v)", ins.Metadata, err) - return - } - params.Set("metadata", string(metadata)) - } - if err = d.httpClient.Post(ctx, uri, "", params, &res); err != nil { - d.switchNode() - log.Error("discovery: set client.Get(%v) zone(%s) env(%s) appid(%s) addrs(%v) error(%v)", - uri, conf.Zone, conf.Env, ins.AppID, ins.Addrs, err) - return - } - if ec := ecode.Int(res.Code); !ecode.Equal(ecode.OK, ec) { - log.Warn("discovery: set client.Get(%v) env(%s) appid(%s) addrs(%v) code(%v)", - uri, conf.Env, ins.AppID, ins.Addrs, res.Code) - err = ec - return - } - log.Info("discovery: set client.Get(%v) env(%s) appid(%s) addrs(%s) success", uri+"?"+params.Encode(), conf.Env, ins.AppID, ins.Addrs) - return -} - -func (d *Discovery) serverproc() { - var ( - retry int - ctx context.Context - cancel context.CancelFunc - ) - ticker := time.NewTicker(time.Minute * 30) - defer ticker.Stop() - for { - if ctx == nil { - ctx, cancel = context.WithCancel(d.ctx) - d.mutex.Lock() - d.cancelPolls = cancel - d.mutex.Unlock() - } - select { - case <-d.ctx.Done(): - return - case <-ticker.C: - d.switchNode() - default: - } - apps, err := d.polls(ctx) - if err != nil { - d.switchNode() - if ctx.Err() == context.Canceled { - ctx = nil - continue - } - time.Sleep(time.Second) - retry++ - continue - } - retry = 0 - d.broadcast(apps) - } -} - -func (d *Discovery) pickNode() string { - nodes, ok := d.node.Load().([]string) - if !ok || len(nodes) == 0 { - return d.c.Nodes[rand.Intn(len(d.c.Nodes))] - } - return nodes[atomic.LoadUint64(&d.nodeIdx)%uint64(len(nodes))] -} - -func (d *Discovery) switchNode() { - atomic.AddUint64(&d.nodeIdx, 1) -} - -func (d *Discovery) polls(ctx context.Context) (apps map[string]*naming.InstancesInfo, err error) { - var ( - lastTss []int64 - appIDs []string - host = d.pickNode() - changed bool - ) - if host != d.lastHost { - d.lastHost = host - changed = true - } - d.mutex.RLock() - c := d.c - for k, v := range d.apps { - if changed { - v.lastTs = 0 - } - appIDs = append(appIDs, k) - lastTss = append(lastTss, v.lastTs) - } - d.mutex.RUnlock() - if len(appIDs) == 0 { - return - } - uri := fmt.Sprintf(_pollURL, host) - res := new(struct { - Code int `json:"code"` - Data map[string]*naming.InstancesInfo `json:"data"` - }) - params := url.Values{} - params.Set("env", c.Env) - params.Set("hostname", c.Host) - for _, appid := range appIDs { - params.Add("appid", appid) - } - for _, ts := range lastTss { - params.Add("latest_timestamp", strconv.FormatInt(ts, 10)) - } - if err = d.httpClient.Get(ctx, uri, "", params, res); err != nil { - d.switchNode() - if ctx.Err() != context.Canceled { - log.Error("discovery: client.Get(%s) error(%+v)", uri+"?"+params.Encode(), err) - } - return - } - if ec := ecode.Int(res.Code); !ecode.Equal(ecode.OK, ec) { - if !ecode.Equal(ecode.NotModified, ec) { - log.Error("discovery: client.Get(%s) get error code(%d)", uri+"?"+params.Encode(), res.Code) - err = ec - } - return - } - info, _ := json.Marshal(res.Data) - for _, app := range res.Data { - if app.LastTs == 0 { - err = ecode.ServerErr - log.Error("discovery: client.Get(%s) latest_timestamp is 0,instances:(%s)", uri+"?"+params.Encode(), info) - return - } - } - log.Info("discovery: successfully polls(%s) instances (%s)", uri+"?"+params.Encode(), info) - apps = res.Data - return -} - -func (d *Discovery) broadcast(apps map[string]*naming.InstancesInfo) { - for appID, v := range apps { - var count int - // v maybe nil in old version(less than v1.1) discovery,check incase of panic - if v == nil { - continue - } - for zone, ins := range v.Instances { - if len(ins) == 0 { - delete(v.Instances, zone) - } - count += len(ins) - } - if count == 0 { - continue - } - d.mutex.RLock() - app, ok := d.apps[appID] - d.mutex.RUnlock() - if ok { - app.lastTs = v.LastTs - app.zoneIns.Store(v) - d.mutex.RLock() - for rs := range app.resolver { - select { - case rs.event <- struct{}{}: - default: - } - } - d.mutex.RUnlock() - } - } -} - -func (d *Discovery) newParams(c *Config) url.Values { - params := url.Values{} - params.Set("region", c.Region) - params.Set("zone", c.Zone) - params.Set("env", c.Env) - params.Set("hostname", c.Host) - return params -} - -var r = rand.New(rand.NewSource(time.Now().UnixNano())) - -// shuffle pseudo-randomizes the order of elements. -// n is the number of elements. Shuffle panics if n < 0. -// swap swaps the elements with indexes i and j. -func shuffle(n int, swap func(i, j int)) { - if n < 0 { - panic("invalid argument to Shuffle") - } - - // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle - // Shuffle really ought not be called with n that doesn't fit in 32 bits. - // Not only will it take a very long time, but with 2³¹! possible permutations, - // there's no way that any PRNG can have a big enough internal state to - // generate even a minuscule percentage of the possible permutations. - // Nevertheless, the right API signature accepts an int n, so handle it as best we can. - i := n - 1 - for ; i > 1<<31-1-1; i-- { - j := int(r.Int63n(int64(i + 1))) - swap(i, j) - } - for ; i > 0; i-- { - j := int(r.Int31n(int32(i + 1))) - swap(i, j) - } -} diff --git a/pkg/naming/etcd/etcd.go b/pkg/naming/etcd/etcd.go deleted file mode 100644 index 9271cd276..000000000 --- a/pkg/naming/etcd/etcd.go +++ /dev/null @@ -1,324 +0,0 @@ -package etcd - -import ( - "context" - "encoding/json" - "errors" - "flag" - "fmt" - "os" - "strings" - "sync" - "sync/atomic" - "time" - - "go.etcd.io/etcd/clientv3" - "go.etcd.io/etcd/mvcc/mvccpb" - "google.golang.org/grpc" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/naming" -) - -var ( - //etcdPrefix is a etcd globe key prefix - endpoints string - etcdPrefix string - - //Time units is second - registerTTL = 90 - defaultDialTimeout = 30 -) - -var ( - _once sync.Once - _builder naming.Builder - //ErrDuplication is a register duplication err - ErrDuplication = errors.New("etcd: instance duplicate registration") -) - -func init() { - addFlag(flag.CommandLine) -} - -func addFlag(fs *flag.FlagSet) { - // env - fs.StringVar(&endpoints, "etcd.endpoints", os.Getenv("ETCD_ENDPOINTS"), "etcd.endpoints is etcd endpoints. value: 127.0.0.1:2379,127.0.0.2:2379 etc.") - fs.StringVar(&etcdPrefix, "etcd.prefix", defaultString("ETCD_PREFIX", "kratos_etcd"), "etcd globe key prefix or use ETCD_PREFIX env variable. value etcd_prefix etc.") -} - -func defaultString(env, value string) string { - v := os.Getenv(env) - if v == "" { - return value - } - return v -} - -// Builder return default etcd resolver builder. -func Builder(c *clientv3.Config) naming.Builder { - _once.Do(func() { - _builder, _ = New(c) - }) - return _builder -} - -// Build register resolver into default etcd. -func Build(c *clientv3.Config, id string) naming.Resolver { - return Builder(c).Build(id) -} - -// EtcdBuilder is a etcd clientv3 EtcdBuilder -type EtcdBuilder struct { - cli *clientv3.Client - ctx context.Context - cancelFunc context.CancelFunc - - mutex sync.RWMutex - apps map[string]*appInfo - registry map[string]struct{} -} -type appInfo struct { - resolver map[*Resolve]struct{} - ins atomic.Value - e *EtcdBuilder - once sync.Once -} - -// Resolve etch resolver. -type Resolve struct { - id string - event chan struct{} - e *EtcdBuilder - opt *naming.BuildOptions -} - -// New is new a etcdbuilder -func New(c *clientv3.Config) (e *EtcdBuilder, err error) { - if c == nil { - if endpoints == "" { - panic(fmt.Errorf("invalid etcd config endpoints:%+v", endpoints)) - } - c = &clientv3.Config{ - Endpoints: strings.Split(endpoints, ","), - DialTimeout: time.Second * time.Duration(defaultDialTimeout), - DialOptions: []grpc.DialOption{grpc.WithBlock()}, - } - } - cli, err := clientv3.New(*c) - if err != nil { - return nil, err - } - ctx, cancel := context.WithCancel(context.Background()) - e = &EtcdBuilder{ - cli: cli, - ctx: ctx, - cancelFunc: cancel, - apps: map[string]*appInfo{}, - registry: map[string]struct{}{}, - } - return -} - -// Build disovery resovler builder. -func (e *EtcdBuilder) Build(appid string, opts ...naming.BuildOpt) naming.Resolver { - r := &Resolve{ - id: appid, - e: e, - event: make(chan struct{}, 1), - opt: new(naming.BuildOptions), - } - e.mutex.Lock() - app, ok := e.apps[appid] - if !ok { - app = &appInfo{ - resolver: make(map[*Resolve]struct{}), - e: e, - } - e.apps[appid] = app - } - app.resolver[r] = struct{}{} - e.mutex.Unlock() - if ok { - select { - case r.event <- struct{}{}: - default: - } - } - - app.once.Do(func() { - go app.watch(appid) - log.Info("etcd: AddWatch(%s) already watch(%v)", appid, ok) - }) - return r -} - -// Scheme return etcd's scheme -func (e *EtcdBuilder) Scheme() string { - return "etcd" -} - -// Register is register instance -func (e *EtcdBuilder) Register(ctx context.Context, ins *naming.Instance) (cancelFunc context.CancelFunc, err error) { - e.mutex.Lock() - if _, ok := e.registry[ins.AppID]; ok { - err = ErrDuplication - } else { - e.registry[ins.AppID] = struct{}{} - } - e.mutex.Unlock() - if err != nil { - return - } - ctx, cancel := context.WithCancel(e.ctx) - if err = e.register(ctx, ins); err != nil { - e.mutex.Lock() - delete(e.registry, ins.AppID) - e.mutex.Unlock() - cancel() - return - } - ch := make(chan struct{}, 1) - cancelFunc = context.CancelFunc(func() { - cancel() - <-ch - }) - - go func() { - ticker := time.NewTicker(time.Duration(registerTTL/3) * time.Second) - defer ticker.Stop() - for { - select { - case <-ticker.C: - _ = e.register(ctx, ins) - case <-ctx.Done(): - _ = e.unregister(ins) - ch <- struct{}{} - return - } - } - }() - return -} - -//注册和续约公用一个操作 -func (e *EtcdBuilder) register(ctx context.Context, ins *naming.Instance) (err error) { - prefix := e.keyPrefix(ins) - val, _ := json.Marshal(ins) - - ttlResp, err := e.cli.Grant(context.TODO(), int64(registerTTL)) - if err != nil { - log.Error("etcd: register client.Grant(%v) error(%v)", registerTTL, err) - return err - } - _, err = e.cli.Put(ctx, prefix, string(val), clientv3.WithLease(ttlResp.ID)) - if err != nil { - log.Error("etcd: register client.Put(%v) appid(%s) hostname(%s) error(%v)", - prefix, ins.AppID, ins.Hostname, err) - return err - } - return nil -} -func (e *EtcdBuilder) unregister(ins *naming.Instance) (err error) { - prefix := e.keyPrefix(ins) - - if _, err = e.cli.Delete(context.TODO(), prefix); err != nil { - log.Error("etcd: unregister client.Delete(%v) appid(%s) hostname(%s) error(%v)", - prefix, ins.AppID, ins.Hostname, err) - } - log.Info("etcd: unregister client.Delete(%v) appid(%s) hostname(%s) success", - prefix, ins.AppID, ins.Hostname) - return -} - -func (e *EtcdBuilder) keyPrefix(ins *naming.Instance) string { - return fmt.Sprintf("/%s/%s/%s", etcdPrefix, ins.AppID, ins.Hostname) -} - -// Close stop all running process including etcdfetch and register -func (e *EtcdBuilder) Close() error { - e.cancelFunc() - return nil -} -func (a *appInfo) watch(appID string) { - _ = a.fetchstore(appID) - prefix := fmt.Sprintf("/%s/%s/", etcdPrefix, appID) - rch := a.e.cli.Watch(a.e.ctx, prefix, clientv3.WithPrefix()) - for wresp := range rch { - for _, ev := range wresp.Events { - if ev.Type == mvccpb.PUT || ev.Type == mvccpb.DELETE { - _ = a.fetchstore(appID) - } - } - } -} - -func (a *appInfo) fetchstore(appID string) (err error) { - prefix := fmt.Sprintf("/%s/%s/", etcdPrefix, appID) - resp, err := a.e.cli.Get(a.e.ctx, prefix, clientv3.WithPrefix()) - if err != nil { - log.Error("etcd: fetch client.Get(%s) error(%+v)", prefix, err) - return err - } - - ins, err := a.paserIns(resp) - if err != nil { - return err - } - a.store(ins) - return nil -} -func (a *appInfo) store(ins *naming.InstancesInfo) { - a.ins.Store(ins) - a.e.mutex.RLock() - for rs := range a.resolver { - select { - case rs.event <- struct{}{}: - default: - } - } - a.e.mutex.RUnlock() -} - -func (a *appInfo) paserIns(resp *clientv3.GetResponse) (ins *naming.InstancesInfo, err error) { - ins = &naming.InstancesInfo{ - Instances: make(map[string][]*naming.Instance), - } - for _, ev := range resp.Kvs { - in := new(naming.Instance) - - err := json.Unmarshal(ev.Value, in) - if err != nil { - return nil, err - } - ins.Instances[in.Zone] = append(ins.Instances[in.Zone], in) - } - return ins, nil -} - -// Watch watch instance. -func (r *Resolve) Watch() <-chan struct{} { - return r.event -} - -// Fetch fetch resolver instance. -func (r *Resolve) Fetch(ctx context.Context) (ins *naming.InstancesInfo, ok bool) { - r.e.mutex.RLock() - app, ok := r.e.apps[r.id] - r.e.mutex.RUnlock() - if ok { - ins, ok = app.ins.Load().(*naming.InstancesInfo) - return - } - return -} - -// Close close resolver. -func (r *Resolve) Close() error { - r.e.mutex.Lock() - if app, ok := r.e.apps[r.id]; ok && len(app.resolver) != 0 { - delete(app.resolver, r) - } - r.e.mutex.Unlock() - return nil -} diff --git a/pkg/naming/naming.go b/pkg/naming/naming.go deleted file mode 100644 index 936faf626..000000000 --- a/pkg/naming/naming.go +++ /dev/null @@ -1,80 +0,0 @@ -package naming - -import ( - "context" -) - -// metadata common key -const ( - MetaWeight = "weight" - MetaCluster = "cluster" - MetaZone = "zone" - MetaColor = "color" -) - -// Instance represents a server the client connects to. -type Instance struct { - // Region is region. - Region string `json:"region"` - // Zone is IDC. - Zone string `json:"zone"` - // Env prod/pre、uat/fat1 - Env string `json:"env"` - // AppID is mapping servicetree appid. - AppID string `json:"appid"` - // Hostname is hostname from docker. - Hostname string `json:"hostname"` - // Addrs is the address of app instance - // format: scheme://host - Addrs []string `json:"addrs"` - // Version is publishing version. - Version string `json:"version"` - // LastTs is instance latest updated timestamp - LastTs int64 `json:"latest_timestamp"` - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata map[string]string `json:"metadata"` - // Status instance status, eg: 1UP 2Waiting - Status int64 `json:"status"` -} - -// Resolver resolve naming service -type Resolver interface { - Fetch(context.Context) (*InstancesInfo, bool) - Watch() <-chan struct{} - Close() error -} - -// Registry Register an instance and renew automatically. -type Registry interface { - Register(ctx context.Context, ins *Instance) (cancel context.CancelFunc, err error) - Close() error -} - -// Builder resolver builder. -type Builder interface { - Build(id string, options ...BuildOpt) Resolver - Scheme() string -} - -// InstancesInfo instance info. -type InstancesInfo struct { - Instances map[string][]*Instance `json:"instances"` - LastTs int64 `json:"latest_timestamp"` - Scheduler *Scheduler `json:"scheduler"` -} - -// Scheduler scheduler. -type Scheduler struct { - Clients map[string]*ZoneStrategy `json:"clients"` -} - -// ZoneStrategy is the scheduling strategy of all zones -type ZoneStrategy struct { - Zones map[string]*Strategy `json:"zones"` -} - -// Strategy is zone scheduling strategy. -type Strategy struct { - Weight int64 `json:"weight"` -} diff --git a/pkg/naming/opt.go b/pkg/naming/opt.go deleted file mode 100644 index 9f1642339..000000000 --- a/pkg/naming/opt.go +++ /dev/null @@ -1,176 +0,0 @@ -package naming - -import ( - "encoding/json" - "fmt" - "math/rand" - "net/url" - "os" - "sort" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/log" - - "github.com/dgryski/go-farm" -) - -// BuildOptions build options. -type BuildOptions struct { - Filter func(map[string][]*Instance) map[string][]*Instance - Subset func([]*Instance, int) []*Instance - SubsetSize int - ClientZone string - Scheduler func(*InstancesInfo) []*Instance -} - -// BuildOpt build option interface. -type BuildOpt interface { - Apply(*BuildOptions) -} - -type funcOpt struct { - f func(*BuildOptions) -} - -func (f *funcOpt) Apply(opt *BuildOptions) { - f.f(opt) -} - -// Filter filter option. -func Filter(schema string, clusters map[string]struct{}) BuildOpt { - return &funcOpt{f: func(opt *BuildOptions) { - opt.Filter = func(inss map[string][]*Instance) map[string][]*Instance { - newInss := make(map[string][]*Instance) - for zone := range inss { - var instances []*Instance - for _, ins := range inss[zone] { - //如果r.clusters的长度大于0说明需要进行集群选择 - if len(clusters) > 0 { - if _, ok := clusters[ins.Metadata[MetaCluster]]; !ok { - continue - } - } - var addr string - for _, a := range ins.Addrs { - u, err := url.Parse(a) - if err == nil && u.Scheme == schema { - addr = u.Host - } - } - if addr == "" { - fmt.Fprintf(os.Stderr, "resolver: app(%s,%s) no valid grpc address(%v) found!", ins.AppID, ins.Hostname, ins.Addrs) - log.Warn("resolver: invalid rpc address(%s,%s,%v) found!", ins.AppID, ins.Hostname, ins.Addrs) - continue - } - instances = append(instances, ins) - } - newInss[zone] = instances - } - return newInss - } - }} -} - -func defulatSubset(inss []*Instance, size int) []*Instance { - backends := inss - if len(backends) <= size { - return backends - } - clientID := env.Hostname - sort.Slice(backends, func(i, j int) bool { - return backends[i].Hostname < backends[j].Hostname - }) - count := len(backends) / size - // hash得到ID - id := farm.Fingerprint64([]byte(clientID)) - // 获得rand轮数 - round := int64(id / uint64(count)) - - s := rand.NewSource(round) - ra := rand.New(s) - // 根据source洗牌 - ra.Shuffle(len(backends), func(i, j int) { - backends[i], backends[j] = backends[j], backends[i] - }) - start := (id % uint64(count)) * uint64(size) - return backends[int(start) : int(start)+size] -} - -// Subset Subset option. -func Subset(defaultSize int) BuildOpt { - return &funcOpt{f: func(opt *BuildOptions) { - opt.SubsetSize = defaultSize - opt.Subset = defulatSubset - }} -} - -// ScheduleNode ScheduleNode option. -func ScheduleNode(clientZone string) BuildOpt { - return &funcOpt{f: func(opt *BuildOptions) { - opt.ClientZone = clientZone - opt.Scheduler = func(app *InstancesInfo) (instances []*Instance) { - type Zone struct { - inss []*Instance - weight int64 - name string - score float64 - } - var zones []*Zone - - if app.Scheduler != nil { - si, err := json.Marshal(app.Scheduler) - if err == nil { - log.Info("schedule info: %s", string(si)) - } - if strategy, ok := app.Scheduler.Clients[clientZone]; ok { - var min *Zone - for name, zone := range strategy.Zones { - inss := app.Instances[name] - if len(inss) == 0 { - continue - } - z := &Zone{ - inss: inss, - weight: zone.Weight, - name: name, - score: float64(len(inss)) / float64(zone.Weight), - } - if min == nil || z.score < min.score { - min = z - } - zones = append(zones, z) - } - if opt.SubsetSize != 0 && len(min.inss) > opt.SubsetSize { - min.score = float64(opt.SubsetSize) / float64(min.weight) - } - for _, z := range zones { - nums := int(min.score * float64(z.weight)) - if nums == 0 { - nums = 1 - } - if nums < len(z.inss) { - if opt.Subset != nil { - z.inss = opt.Subset(z.inss, nums) - } else { - z.inss = defulatSubset(z.inss, nums) - } - } - } - } - } - for _, zone := range zones { - instances = append(instances, zone.inss...) - } - //如果没有拿到节点,则选择直接获取 - if len(instances) == 0 { - instances = app.Instances[clientZone] - if len(instances) == 0 { - for _, value := range app.Instances { - instances = append(instances, value...) - } - } - } - return - } - }} -} diff --git a/pkg/naming/opt_test.go b/pkg/naming/opt_test.go deleted file mode 100644 index a1631c63e..000000000 --- a/pkg/naming/opt_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package naming - -import ( - "fmt" - "reflect" - "testing" -) - -func Test_Subset(t *testing.T) { - var inss1 []*Instance - for i := 0; i < 200; i++ { - ins := &Instance{ - Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, - Metadata: map[string]string{MetaCluster: "c1"}, - } - inss1 = append(inss1, ins) - } - var opt BuildOptions - s := Subset(50) - s.Apply(&opt) - sub1 := opt.Subset(inss1, opt.SubsetSize) - if len(sub1) != 50 { - t.Fatalf("subset size should be 50") - } - sub2 := opt.Subset(inss1, opt.SubsetSize) - if !reflect.DeepEqual(sub1, sub2) { - t.Fatalf("two subsets should equal") - } -} - -func Test_FilterClusters(t *testing.T) { - inss := map[string][]*Instance{ - "sh001": {{ - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c1"}, - }, { - Addrs: []string{"http://127.0.0.2:9000"}, - Metadata: map[string]string{MetaCluster: "c1"}, - }, { - Addrs: []string{"grpc://127.0.0.3:9000"}, - Metadata: map[string]string{MetaCluster: "c2"}, - }}, - "sh002": {{ - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c3"}, - }, { - Addrs: []string{"zk://127.0.0.2:9000"}, - Metadata: map[string]string{MetaCluster: "c3"}, - }}, - } - res := map[string][]*Instance{ - "sh001": {{ - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c1"}, - }}, - "sh002": {{ - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c3"}, - }}, - } - var opt BuildOptions - f := Filter("grpc", map[string]struct{}{"c1": {}, "c3": {}}) - f.Apply(&opt) - filtered := opt.Filter(inss) - equal := reflect.DeepEqual(filtered, res) - if !equal { - t.Fatalf("Filter grpc should equal,filtered:%v expected:%v", filtered, res) - } -} - -func Test_FilterInvalidAddr(t *testing.T) { - inss := map[string][]*Instance{ - "sh001": {{ - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c1"}, - }, { - Addrs: []string{"http://127.0.0.2:9000"}, - Metadata: map[string]string{MetaCluster: "c1"}, - }, { - Addrs: []string{"grpc://127.0.0.3:9000"}, - Metadata: map[string]string{MetaCluster: "c2"}, - }}, - "sh002": {{ - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c3"}, - }, { - Addrs: []string{"zk://127.0.0.2:9000"}, - Metadata: map[string]string{MetaCluster: "c3"}, - }}, - } - res := map[string][]*Instance{ - "sh001": {{ - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c1"}, - }, { - Addrs: []string{"grpc://127.0.0.3:9000"}, - Metadata: map[string]string{MetaCluster: "c2"}, - }}, - "sh002": {{ - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c3"}, - }}, - } - var opt BuildOptions - f := Filter("grpc", nil) - f.Apply(&opt) - filtered := opt.Filter(inss) - equal := reflect.DeepEqual(filtered, res) - if !equal { - t.Fatalf("Filter grpc should equal,filtered:%v expected:%v", filtered, res) - } -} - -func Test_Schedule(t *testing.T) { - app := &InstancesInfo{ - Instances: map[string][]*Instance{ - "sh001": {{ - Zone: "sh001", - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c1"}, - }, { - Zone: "sh001", - Addrs: []string{"grpc://127.0.0.2:9000"}, - Metadata: map[string]string{MetaCluster: "c1"}, - }, { - Zone: "sh001", - Addrs: []string{"grpc://127.0.0.3:9000"}, - Metadata: map[string]string{MetaCluster: "c2"}, - }}, - "sh002": {{ - Zone: "sh002", - Addrs: []string{"grpc://127.0.0.1:9000"}, - Metadata: map[string]string{MetaCluster: "c3"}, - }, { - Zone: "sh002", - Addrs: []string{"grpc://127.0.0.2:9000"}, - Metadata: map[string]string{MetaCluster: "c3"}, - }}, - }, - Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": { - Zones: map[string]*Strategy{ - "sh001": {10}, - "sh002": {20}, - }, - }}}, - } - var opt BuildOptions - f := ScheduleNode("sh001") - f.Apply(&opt) - err := compareAddr(opt.Scheduler(app), map[string]int{"sh002": 2, "sh001": 1}) - if err != nil { - t.Fatalf(err.Error()) - } -} - -func Test_Schedule2(t *testing.T) { - app := &InstancesInfo{ - Instances: map[string][]*Instance{}, - Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": { - Zones: map[string]*Strategy{ - "sh001": {10}, - "sh002": {20}, - }, - }}}, - } - for i := 0; i < 30; i++ { - ins := &Instance{ - Zone: "sh001", - Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, - Metadata: map[string]string{MetaCluster: "c1"}, - } - app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) - } - for i := 0; i < 30; i++ { - ins := &Instance{ - Zone: "sh002", - Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, - Metadata: map[string]string{MetaCluster: "c2"}, - } - app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) - } - var opt BuildOptions - f := ScheduleNode("sh001") - f.Apply(&opt) - err := compareAddr(opt.Scheduler(app), map[string]int{"sh002": 30, "sh001": 15}) - if err != nil { - t.Fatalf(err.Error()) - } -} - -func Test_Schedule3(t *testing.T) { - app := &InstancesInfo{ - Instances: map[string][]*Instance{}, - Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": { - Zones: map[string]*Strategy{ - "sh001": {1}, - "sh002": {30}, - }, - }}}, - } - for i := 0; i < 30; i++ { - ins := &Instance{ - Zone: "sh001", - Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, - Metadata: map[string]string{MetaCluster: "c1"}, - } - app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) - } - for i := 0; i < 30; i++ { - ins := &Instance{ - Zone: "sh002", - Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, - Metadata: map[string]string{MetaCluster: "c2"}, - } - app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) - } - var opt BuildOptions - f := ScheduleNode("sh001") - f.Apply(&opt) - err := compareAddr(opt.Scheduler(app), map[string]int{"sh002": 30, "sh001": 1}) - if err != nil { - t.Fatalf(err.Error()) - } -} - -func Test_Schedule4(t *testing.T) { - app := &InstancesInfo{ - Instances: map[string][]*Instance{}, - Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": { - Zones: map[string]*Strategy{ - "sh001": {1}, - "sh002": {30}, - }, - }}}, - } - for i := 0; i < 30; i++ { - ins := &Instance{ - Zone: "sh001", - Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, - Metadata: map[string]string{MetaCluster: "c1"}, - } - app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) - } - - var opt BuildOptions - f := ScheduleNode("sh001") - f.Apply(&opt) - err := compareAddr(opt.Scheduler(app), map[string]int{"sh001": 30, "sh002": 0}) - if err != nil { - t.Fatalf(err.Error()) - } -} - -func Test_Schedule5(t *testing.T) { - app := &InstancesInfo{ - Instances: map[string][]*Instance{}, - Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": { - Zones: map[string]*Strategy{ - "sh002": {30}, - }, - }}}, - } - for i := 0; i < 30; i++ { - ins := &Instance{ - Zone: "sh001", - Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, - Metadata: map[string]string{MetaCluster: "c1"}, - } - app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) - } - for i := 0; i < 30; i++ { - ins := &Instance{ - Zone: "sh002", - Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, - Metadata: map[string]string{MetaCluster: "c2"}, - } - app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) - } - var opt BuildOptions - f := ScheduleNode("sh001") - f.Apply(&opt) - err := compareAddr(opt.Scheduler(app), map[string]int{"sh002": 30, "sh001": 0}) - if err != nil { - t.Fatalf(err.Error()) - } -} - -func compareAddr(inss []*Instance, c map[string]int) (err error) { - for _, ins := range inss { - c[ins.Zone] = c[ins.Zone] - 1 - } - for zone, v := range c { - if v != 0 { - err = fmt.Errorf("zone(%s) nums is %d", zone, v) - return - } - } - return -} diff --git a/pkg/naming/zookeeper/zookeeper.go b/pkg/naming/zookeeper/zookeeper.go deleted file mode 100644 index 3b532eb49..000000000 --- a/pkg/naming/zookeeper/zookeeper.go +++ /dev/null @@ -1,396 +0,0 @@ -package zookeeper - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/url" - "path" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/go-zookeeper/zk" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/naming" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// Config is zookeeper config. -type Config struct { - Root string `json:"root"` - Endpoints []string `json:"endpoints"` - Timeout xtime.Duration `json:"timeout"` -} - -var ( - _once sync.Once - _builder naming.Builder - - // ErrDuplication is a register duplication err - ErrDuplication = errors.New("zookeeper: instance duplicate registration") -) - -// Builder return default zookeeper resolver builder. -func Builder(c *Config) naming.Builder { - _once.Do(func() { - _builder, _ = New(c) - }) - return _builder -} - -// Build register resolver into default zookeeper. -func Build(c *Config, id string) naming.Resolver { - return Builder(c).Build(id) -} - -type appInfo struct { - resolver map[*Resolve]struct{} - ins atomic.Value - zkb *Zookeeper - once sync.Once -} - -// Resolve zookeeper resolver. -type Resolve struct { - id string - event chan struct{} - zkb *Zookeeper -} - -// Zookeeper is a zookeeper client Builder. -// path: /{root}/{appid}/{ip} -> json(instance) -type Zookeeper struct { - c *Config - cli *zk.Conn - connEvent <-chan zk.Event - ctx context.Context - cancelFunc context.CancelFunc - - mutex sync.RWMutex - apps map[string]*appInfo - registry map[string]struct{} -} - -// New is new a zookeeper builder. -func New(c *Config) (zkb *Zookeeper, err error) { - if c.Timeout == 0 { - c.Timeout = xtime.Duration(time.Second) - } - if len(c.Endpoints) == 0 { - errInfo := "zookeeper New failed, endpoints is null" - log.Error(errInfo) - return nil, errors.New(errInfo) - } - - zkConn, connEvent, err := zk.Connect(c.Endpoints, time.Duration(c.Timeout)) - if err != nil { - log.Error(fmt.Sprintf("zk Connect err:(%v)", err)) - return - } - log.Info("zk Connect ok!") - - ctx, cancel := context.WithCancel(context.Background()) - zkb = &Zookeeper{ - c: c, - cli: zkConn, - connEvent: connEvent, - ctx: ctx, - cancelFunc: cancel, - apps: map[string]*appInfo{}, - registry: map[string]struct{}{}, - } - return -} - -// Build zookeeper resovler builder. -func (z *Zookeeper) Build(appid string, options ...naming.BuildOpt) naming.Resolver { - r := &Resolve{ - id: appid, - zkb: z, - event: make(chan struct{}, 1), - } - z.mutex.Lock() - app, ok := z.apps[appid] - if !ok { - app = &appInfo{ - resolver: make(map[*Resolve]struct{}), - zkb: z, - } - z.apps[appid] = app - } - app.resolver[r] = struct{}{} - z.mutex.Unlock() - if ok { - select { - case r.event <- struct{}{}: - default: - } - } - app.once.Do(func() { - go app.watch(appid) - }) - return r -} - -// Scheme return zookeeper's scheme. -func (z *Zookeeper) Scheme() string { - return "zookeeper" -} - -// Register is register instance. -func (z *Zookeeper) Register(ctx context.Context, ins *naming.Instance) (cancelFunc context.CancelFunc, err error) { - z.mutex.Lock() - if _, ok := z.registry[ins.AppID]; ok { - err = ErrDuplication - } else { - z.registry[ins.AppID] = struct{}{} - } - z.mutex.Unlock() - if err != nil { - return - } - ctx, cancel := context.WithCancel(z.ctx) - if err = z.register(ctx, ins); err != nil { - z.mutex.Lock() - delete(z.registry, ins.AppID) - z.mutex.Unlock() - cancel() - return - } - ch := make(chan struct{}, 1) - cancelFunc = context.CancelFunc(func() { - cancel() - <-ch - }) - go func() { - for { - select { - case connEvent := <-z.connEvent: - log.Info("watch zkClient state, connEvent:(%+v)", connEvent) - if connEvent.State == zk.StateHasSession { - if err = z.register(ctx, ins); err != nil { - log.Warn(fmt.Sprintf("watch zkClient state, fail to register node error:(%v)", err)) - continue - } - } - case <-ctx.Done(): - ch <- struct{}{} - return - } - } - }() - return -} - -func (z *Zookeeper) createPath(paths string) error { - var ( - lastPath = "/" - seps = strings.Split(paths, "/") - ) - for _, part := range seps { - if part == "" { - continue - } - lastPath = path.Join(lastPath, part) - ok, _, err := z.cli.Exists(lastPath) - if err != nil { - return err - } - if ok { - continue - } - ret, err := z.cli.Create(lastPath, nil, 0, zk.WorldACL(zk.PermAll)) - if err != nil { - log.Warn(fmt.Sprintf("createPath, fail to Create node:(%s). error:(%v)", paths, err)) - } else { - log.Info(fmt.Sprintf("createPath, succeed to Create node:(%s). retStr:(%s)", paths, ret)) - } - } - return nil -} - -func (z *Zookeeper) registerPeerServer(nodePath string, ins *naming.Instance) (err error) { - var ( - str string - ) - val, err := json.Marshal(ins) - if err != nil { - return - } - log.Info(fmt.Sprintf("registerPeerServer, ins after json.Marshal:(%v)", string(val))) - ok, _, err := z.cli.Exists(nodePath) - if err != nil { - return err - } - if ok { - return nil - } - str, err = z.cli.Create(nodePath, val, zk.FlagEphemeral, zk.WorldACL(zk.PermAll)) - if err != nil { - log.Warn(fmt.Sprintf("registerPeerServer, fail to Create node:%s. error:(%v)", nodePath, err)) - } else { - log.Info(fmt.Sprintf("registerPeerServer, succeed to Create node:%s. retStr:(%s)", nodePath, str)) - } - return -} - -// register is register instance to zookeeper. -func (z *Zookeeper) register(ctx context.Context, ins *naming.Instance) (err error) { - log.Info("zookeeper register enter, instance Addrs:(%v)", ins.Addrs) - - prefix := z.keyPrefix(ins.AppID) - if err = z.createPath(prefix); err != nil { - log.Warn(fmt.Sprintf("register, fail to createPath node error:(%v)", err)) - } - for _, addr := range ins.Addrs { - u, err := url.Parse(addr) - if err != nil { - continue - } - // grpc://127.0.0.1:8000 to 127.0.0.1 - nodePath := prefix + "/" + strings.SplitN(u.Host, ":", 2)[0] - if err = z.registerPeerServer(nodePath, ins); err != nil { - log.Warn(fmt.Sprintf("registerServer, fail to RegisterPeerServer node:%s error:(%v)", addr, err)) - } else { - log.Info("registerServer, succeed to RegistServer node.") - } - } - return nil -} - -func (z *Zookeeper) unregister(ins *naming.Instance) (err error) { - log.Info("zookeeper unregister enter, instance Addrs:(%v)", ins.Addrs) - prefix := z.keyPrefix(ins.AppID) - for _, addr := range ins.Addrs { - u, err := url.Parse(addr) - if err != nil { - continue - } - // grpc://127.0.0.1:8000 to 127.0.0.1 - nodePath := prefix + "/" + strings.SplitN(u.Host, ":", 2)[0] - exists, _, err := z.cli.Exists(nodePath) - if err != nil { - log.Error("zk.Conn.Exists node:(%v), error:(%v)", nodePath, err) - continue - } - if exists { - _, s, err := z.cli.Get(nodePath) - if err != nil { - log.Error("zk.Conn.Get node:(%s), error:(%v)", nodePath, err) - continue - } - if err = z.cli.Delete(nodePath, s.Version); err != nil { - log.Error("zk.Conn.Delete node:(%s), error:(%v)", nodePath, err) - continue - } - } - - log.Info(fmt.Sprintf("unregister, client.Delete:(%v), appid:(%v), hostname:(%v) success", nodePath, ins.AppID, ins.Hostname)) - } - return -} - -func (z *Zookeeper) keyPrefix(appID string) string { - return path.Join(z.c.Root, appID) -} - -// Close stop all running process including zk fetch and register. -func (z *Zookeeper) Close() error { - z.cancelFunc() - return nil -} - -func (a *appInfo) watch(appID string) { - _ = a.fetchstore(appID) - go func() { - prefix := a.zkb.keyPrefix(appID) - for { - log.Info(fmt.Sprintf("zk ChildrenW enter, prefix:(%v)", prefix)) - snapshot, _, event, err := a.zkb.cli.ChildrenW(prefix) - if err != nil { - log.Error("zk ChildrenW fail to watch:%s error:(%v)", prefix, err) - time.Sleep(time.Second) - _ = a.fetchstore(appID) - continue - } - log.Info(fmt.Sprintf("zk ChildrenW ok, prefix:%s snapshot:(%v)", prefix, snapshot)) - for ev := range event { - log.Info(fmt.Sprintf("zk ChildrenW ok, prefix:(%v), event Path:(%v), Type:(%v)", prefix, ev.Path, ev.Type)) - if ev.Type == zk.EventNodeChildrenChanged { - _ = a.fetchstore(appID) - } - } - } - }() -} - -func (a *appInfo) fetchstore(appID string) (err error) { - prefix := a.zkb.keyPrefix(appID) - childs, _, err := a.zkb.cli.Children(prefix) - if err != nil { - log.Error(fmt.Sprintf("fetchstore, fail to get Children of node:(%v), error:(%v)", prefix, err)) - return - } - log.Info(fmt.Sprintf("fetchstore, ok to get Children of node:(%v), childs:(%v)", prefix, childs)) - ins := &naming.InstancesInfo{ - Instances: make(map[string][]*naming.Instance), - } - for _, child := range childs { - nodePath := prefix + "/" + child - resp, _, err := a.zkb.cli.Get(nodePath) - if err != nil { - log.Error("zookeeper: fetch client.Get(%s) error:(%v)", nodePath, err) - return err - } - in := new(naming.Instance) - if err = json.Unmarshal(resp, in); err != nil { - return err - } - ins.Instances[in.Zone] = append(ins.Instances[in.Zone], in) - } - a.store(ins) - return nil -} - -func (a *appInfo) store(ins *naming.InstancesInfo) { - a.ins.Store(ins) - a.zkb.mutex.RLock() - for rs := range a.resolver { - select { - case rs.event <- struct{}{}: - default: - } - } - a.zkb.mutex.RUnlock() -} - -// Watch watch instance. -func (r *Resolve) Watch() <-chan struct{} { - return r.event -} - -// Fetch fetch resolver instance. -func (r *Resolve) Fetch(ctx context.Context) (ins *naming.InstancesInfo, ok bool) { - r.zkb.mutex.RLock() - app, ok := r.zkb.apps[r.id] - r.zkb.mutex.RUnlock() - if ok { - ins, ok = app.ins.Load().(*naming.InstancesInfo) - return - } - return -} - -// Close close resolver. -func (r *Resolve) Close() error { - r.zkb.mutex.Lock() - if app, ok := r.zkb.apps[r.id]; ok && len(app.resolver) != 0 { - delete(app.resolver, r) - } - r.zkb.mutex.Unlock() - return nil -} diff --git a/pkg/net/criticality/criticality.go b/pkg/net/criticality/criticality.go deleted file mode 100644 index 9e76e4891..000000000 --- a/pkg/net/criticality/criticality.go +++ /dev/null @@ -1,57 +0,0 @@ -package criticality - -// Criticality is -type Criticality string - -// criticality -var ( - // EmptyCriticality is used to mark any invalid criticality, and the empty criticality will be parsed as the default criticality later. - EmptyCriticality = Criticality("") - // CriticalPlus is reserved for the most critical requests, those that will result in serious user-visible impact if they fail. - CriticalPlus = Criticality("CRITICAL_PLUS") - // Critical is the default value for requests sent from production jobs. These requests will result in user-visible impact, but the impact may be less severe than those of CRITICAL_PLUS. Services are expected to provision enough capacity for all expected CRITICAL and CRITICAL_PLUS traffic. - Critical = Criticality("CRITICAL") - // SheddablePlus is traffic for which partial unavailability is expected. This is the default for batch jobs, which can retry requests minutes or even hours later. - SheddablePlus = Criticality("SHEDDABLE_PLUS") - // Sheddable is traffic for which frequent partial unavailability and occasional full unavailability is expected. - Sheddable = Criticality("SHEDDABLE") - - // higher is more critical - _criticalityEnum = map[Criticality]int{ - CriticalPlus: 40, - Critical: 30, - SheddablePlus: 20, - Sheddable: 10, - } - - _defaultCriticality = Critical -) - -// Value is used to get criticality value, higher value is more critical. -func Value(in Criticality) int { - v, ok := _criticalityEnum[in] - if !ok { - return _criticalityEnum[_defaultCriticality] - } - return v -} - -// Higher will compare the input criticality with self, return true if the input is more critical than self. -func (c Criticality) Higher(in Criticality) bool { - return Value(in) > Value(c) -} - -// Parse will parse raw criticality string as valid critality. Any invalid input will parse as empty criticality. -func Parse(raw string) Criticality { - crtl := Criticality(raw) - if _, ok := _criticalityEnum[crtl]; ok { - return crtl - } - return EmptyCriticality -} - -// Exist is used to check criticality is exist in several enumeration. -func Exist(c Criticality) bool { - _, ok := _criticalityEnum[c] - return ok -} diff --git a/pkg/net/http/blademaster/README.md b/pkg/net/http/blademaster/README.md deleted file mode 100644 index 7029187d2..000000000 --- a/pkg/net/http/blademaster/README.md +++ /dev/null @@ -1,5 +0,0 @@ -#### net/http/blademaster - -##### 项目简介 - -http 框架,带来如飞一般的体验。 diff --git a/pkg/net/http/blademaster/binding/binding.go b/pkg/net/http/blademaster/binding/binding.go deleted file mode 100644 index ea2c8e017..000000000 --- a/pkg/net/http/blademaster/binding/binding.go +++ /dev/null @@ -1,88 +0,0 @@ -package binding - -import ( - "net/http" - "strings" - - "gopkg.in/go-playground/validator.v9" -) - -// MIME -const ( - MIMEJSON = "application/json" - MIMEHTML = "text/html" - MIMEXML = "application/xml" - MIMEXML2 = "text/xml" - MIMEPlain = "text/plain" - MIMEPOSTForm = "application/x-www-form-urlencoded" - MIMEMultipartPOSTForm = "multipart/form-data" -) - -// Binding http binding request interface. -type Binding interface { - Name() string - Bind(*http.Request, interface{}) error -} - -// StructValidator http validator interface. -type StructValidator interface { - // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right. - // If the received type is not a struct, any validation should be skipped and nil must be returned. - // If the received type is a struct or pointer to a struct, the validation should be performed. - // If the struct is not valid or the validation itself fails, a descriptive error should be returned. - // Otherwise nil must be returned. - ValidateStruct(interface{}) error - - // RegisterValidation adds a validation Func to a Validate's map of validators denoted by the key - // NOTE: if the key already exists, the previous validation function will be replaced. - // NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation - RegisterValidation(string, validator.Func) error - - //GetValidate return the default validate - GetValidate() *validator.Validate -} - -// Validator default validator. -var Validator StructValidator = &defaultValidator{} - -// Binding -var ( - JSON = jsonBinding{} - XML = xmlBinding{} - Form = formBinding{} - Query = queryBinding{} - FormPost = formPostBinding{} - FormMultipart = formMultipartBinding{} -) - -// Default get by binding type by method and contexttype. -func Default(method, contentType string) Binding { - if method == "GET" { - return Form - } - - contentType = stripContentTypeParam(contentType) - switch contentType { - case MIMEJSON: - return JSON - case MIMEXML, MIMEXML2: - return XML - default: //case MIMEPOSTForm, MIMEMultipartPOSTForm: - return Form - } -} - -func validate(obj interface{}) error { - if Validator == nil { - return nil - } - return Validator.ValidateStruct(obj) -} - -func stripContentTypeParam(contentType string) string { - i := strings.Index(contentType, ";") - if i != -1 { - contentType = contentType[:i] - } - return contentType -} diff --git a/pkg/net/http/blademaster/binding/binding_test.go b/pkg/net/http/blademaster/binding/binding_test.go deleted file mode 100644 index 9b84c6c2f..000000000 --- a/pkg/net/http/blademaster/binding/binding_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package binding - -import ( - "bytes" - "mime/multipart" - "net/http" - "testing" - - "github.com/stretchr/testify/assert" -) - -type FooStruct struct { - Foo string `msgpack:"foo" json:"foo" form:"foo" xml:"foo" validate:"required"` -} - -type FooBarStruct struct { - FooStruct - Bar string `msgpack:"bar" json:"bar" form:"bar" xml:"bar" validate:"required"` - Slice []string `form:"slice" validate:"max=10"` -} - -type ComplexDefaultStruct struct { - Int int `form:"int" default:"999"` - String string `form:"string" default:"default-string"` - Bool bool `form:"bool" default:"false"` - Int64Slice []int64 `form:"int64_slice,split" default:"1,2,3,4"` - Int8Slice []int8 `form:"int8_slice,split" default:"1,2,3,4"` -} - -type Int8SliceStruct struct { - State []int8 `form:"state,split"` -} - -type Int64SliceStruct struct { - State []int64 `form:"state,split"` -} - -type StringSliceStruct struct { - State []string `form:"state,split"` -} - -func TestBindingDefault(t *testing.T) { - assert.Equal(t, Default("GET", ""), Form) - assert.Equal(t, Default("GET", MIMEJSON), Form) - assert.Equal(t, Default("GET", MIMEJSON+"; charset=utf-8"), Form) - - assert.Equal(t, Default("POST", MIMEJSON), JSON) - assert.Equal(t, Default("PUT", MIMEJSON), JSON) - - assert.Equal(t, Default("POST", MIMEJSON+"; charset=utf-8"), JSON) - assert.Equal(t, Default("PUT", MIMEJSON+"; charset=utf-8"), JSON) - - assert.Equal(t, Default("POST", MIMEXML), XML) - assert.Equal(t, Default("PUT", MIMEXML2), XML) - - assert.Equal(t, Default("POST", MIMEPOSTForm), Form) - assert.Equal(t, Default("PUT", MIMEPOSTForm), Form) - - assert.Equal(t, Default("POST", MIMEPOSTForm+"; charset=utf-8"), Form) - assert.Equal(t, Default("PUT", MIMEPOSTForm+"; charset=utf-8"), Form) - - assert.Equal(t, Default("POST", MIMEMultipartPOSTForm), Form) - assert.Equal(t, Default("PUT", MIMEMultipartPOSTForm), Form) -} - -func TestStripContentType(t *testing.T) { - c1 := "application/vnd.mozilla.xul+xml" - c2 := "application/vnd.mozilla.xul+xml; charset=utf-8" - assert.Equal(t, stripContentTypeParam(c1), c1) - assert.Equal(t, stripContentTypeParam(c2), "application/vnd.mozilla.xul+xml") -} - -func TestBindInt8Form(t *testing.T) { - params := "state=1,2,3" - req, _ := http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q := new(Int8SliceStruct) - Form.Bind(req, q) - assert.EqualValues(t, []int8{1, 2, 3}, q.State) - - params = "state=1,2,3,256" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(Int8SliceStruct) - assert.Error(t, Form.Bind(req, q)) - - params = "state=" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(Int8SliceStruct) - assert.NoError(t, Form.Bind(req, q)) - assert.Len(t, q.State, 0) - - params = "state=1,,2" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(Int8SliceStruct) - assert.NoError(t, Form.Bind(req, q)) - assert.EqualValues(t, []int8{1, 2}, q.State) -} - -func TestBindInt64Form(t *testing.T) { - params := "state=1,2,3" - req, _ := http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q := new(Int64SliceStruct) - Form.Bind(req, q) - assert.EqualValues(t, []int64{1, 2, 3}, q.State) - - params = "state=" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(Int64SliceStruct) - assert.NoError(t, Form.Bind(req, q)) - assert.Len(t, q.State, 0) -} - -func TestBindStringForm(t *testing.T) { - params := "state=1,2,3" - req, _ := http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q := new(StringSliceStruct) - Form.Bind(req, q) - assert.EqualValues(t, []string{"1", "2", "3"}, q.State) - - params = "state=" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(StringSliceStruct) - assert.NoError(t, Form.Bind(req, q)) - assert.Len(t, q.State, 0) - - params = "state=p,,p" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(StringSliceStruct) - Form.Bind(req, q) - assert.EqualValues(t, []string{"p", "p"}, q.State) -} - -func TestBindingJSON(t *testing.T) { - testBodyBinding(t, - JSON, "json", - "/", "/", - `{"foo": "bar"}`, `{"bar": "foo"}`) -} - -func TestBindingForm(t *testing.T) { - testFormBinding(t, "POST", - "/", "/", - "foo=bar&bar=foo&slice=a&slice=b", "bar2=foo") -} - -func TestBindingForm2(t *testing.T) { - testFormBinding(t, "GET", - "/?foo=bar&bar=foo", "/?bar2=foo", - "", "") -} - -func TestBindingQuery(t *testing.T) { - testQueryBinding(t, "POST", - "/?foo=bar&bar=foo", "/", - "foo=unused", "bar2=foo") -} - -func TestBindingQuery2(t *testing.T) { - testQueryBinding(t, "GET", - "/?foo=bar&bar=foo", "/?bar2=foo", - "foo=unused", "") -} - -func TestBindingXML(t *testing.T) { - testBodyBinding(t, - XML, "xml", - "/", "/", - "bar", "foo") -} - -func createFormPostRequest() *http.Request { - req, _ := http.NewRequest("POST", "/?foo=getfoo&bar=getbar", bytes.NewBufferString("foo=bar&bar=foo")) - req.Header.Set("Content-Type", MIMEPOSTForm) - return req -} - -func createFormMultipartRequest() *http.Request { - boundary := "--testboundary" - body := new(bytes.Buffer) - mw := multipart.NewWriter(body) - defer mw.Close() - - mw.SetBoundary(boundary) - mw.WriteField("foo", "bar") - mw.WriteField("bar", "foo") - req, _ := http.NewRequest("POST", "/?foo=getfoo&bar=getbar", body) - req.Header.Set("Content-Type", MIMEMultipartPOSTForm+"; boundary="+boundary) - return req -} - -func TestBindingFormPost(t *testing.T) { - req := createFormPostRequest() - var obj FooBarStruct - FormPost.Bind(req, &obj) - - assert.Equal(t, obj.Foo, "bar") - assert.Equal(t, obj.Bar, "foo") -} - -func TestBindingFormMultipart(t *testing.T) { - req := createFormMultipartRequest() - var obj FooBarStruct - FormMultipart.Bind(req, &obj) - - assert.Equal(t, obj.Foo, "bar") - assert.Equal(t, obj.Bar, "foo") -} - -func TestValidationFails(t *testing.T) { - var obj FooStruct - req := requestWithBody("POST", "/", `{"bar": "foo"}`) - err := JSON.Bind(req, &obj) - assert.Error(t, err) -} - -func TestValidationDisabled(t *testing.T) { - backup := Validator - Validator = nil - defer func() { Validator = backup }() - - var obj FooStruct - req := requestWithBody("POST", "/", `{"bar": "foo"}`) - err := JSON.Bind(req, &obj) - assert.NoError(t, err) -} - -func TestExistsSucceeds(t *testing.T) { - type HogeStruct struct { - Hoge *int `json:"hoge" binding:"exists"` - } - - var obj HogeStruct - req := requestWithBody("POST", "/", `{"hoge": 0}`) - err := JSON.Bind(req, &obj) - assert.NoError(t, err) -} - -func TestFormDefaultValue(t *testing.T) { - params := "int=333&string=hello&bool=true&int64_slice=5,6,7,8&int8_slice=5,6,7,8" - req, _ := http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q := new(ComplexDefaultStruct) - assert.NoError(t, Form.Bind(req, q)) - assert.Equal(t, 333, q.Int) - assert.Equal(t, "hello", q.String) - assert.Equal(t, true, q.Bool) - assert.EqualValues(t, []int64{5, 6, 7, 8}, q.Int64Slice) - assert.EqualValues(t, []int8{5, 6, 7, 8}, q.Int8Slice) - - params = "string=hello&bool=false" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(ComplexDefaultStruct) - assert.NoError(t, Form.Bind(req, q)) - assert.Equal(t, 999, q.Int) - assert.Equal(t, "hello", q.String) - assert.Equal(t, false, q.Bool) - assert.EqualValues(t, []int64{1, 2, 3, 4}, q.Int64Slice) - assert.EqualValues(t, []int8{1, 2, 3, 4}, q.Int8Slice) - - params = "strings=hello" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(ComplexDefaultStruct) - assert.NoError(t, Form.Bind(req, q)) - assert.Equal(t, 999, q.Int) - assert.Equal(t, "default-string", q.String) - assert.Equal(t, false, q.Bool) - assert.EqualValues(t, []int64{1, 2, 3, 4}, q.Int64Slice) - assert.EqualValues(t, []int8{1, 2, 3, 4}, q.Int8Slice) - - params = "int=&string=&bool=true&int64_slice=&int8_slice=" - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - q = new(ComplexDefaultStruct) - assert.NoError(t, Form.Bind(req, q)) - assert.Equal(t, 999, q.Int) - assert.Equal(t, "default-string", q.String) - assert.Equal(t, true, q.Bool) - assert.EqualValues(t, []int64{1, 2, 3, 4}, q.Int64Slice) - assert.EqualValues(t, []int8{1, 2, 3, 4}, q.Int8Slice) -} - -func testFormBinding(t *testing.T, method, path, badPath, body, badBody string) { - b := Form - assert.Equal(t, b.Name(), "form") - - obj := FooBarStruct{} - req := requestWithBody(method, path, body) - if method == "POST" { - req.Header.Add("Content-Type", MIMEPOSTForm) - } - err := b.Bind(req, &obj) - assert.NoError(t, err) - assert.Equal(t, obj.Foo, "bar") - assert.Equal(t, obj.Bar, "foo") - - obj = FooBarStruct{} - req = requestWithBody(method, badPath, badBody) - err = JSON.Bind(req, &obj) - assert.Error(t, err) -} - -func testQueryBinding(t *testing.T, method, path, badPath, body, badBody string) { - b := Query - assert.Equal(t, b.Name(), "query") - - obj := FooBarStruct{} - req := requestWithBody(method, path, body) - if method == "POST" { - req.Header.Add("Content-Type", MIMEPOSTForm) - } - err := b.Bind(req, &obj) - assert.NoError(t, err) - assert.Equal(t, obj.Foo, "bar") - assert.Equal(t, obj.Bar, "foo") -} - -func testBodyBinding(t *testing.T, b Binding, name, path, badPath, body, badBody string) { - assert.Equal(t, b.Name(), name) - - obj := FooStruct{} - req := requestWithBody("POST", path, body) - err := b.Bind(req, &obj) - assert.NoError(t, err) - assert.Equal(t, obj.Foo, "bar") - - obj = FooStruct{} - req = requestWithBody("POST", badPath, badBody) - err = JSON.Bind(req, &obj) - assert.Error(t, err) -} - -func requestWithBody(method, path, body string) (req *http.Request) { - req, _ = http.NewRequest(method, path, bytes.NewBufferString(body)) - return -} -func BenchmarkBindingForm(b *testing.B) { - req := requestWithBody("POST", "/", "foo=bar&bar=foo&slice=a&slice=b&slice=c&slice=w") - req.Header.Add("Content-Type", MIMEPOSTForm) - f := Form - for i := 0; i < b.N; i++ { - obj := FooBarStruct{} - f.Bind(req, &obj) - } -} diff --git a/pkg/net/http/blademaster/binding/default_validator.go b/pkg/net/http/blademaster/binding/default_validator.go deleted file mode 100644 index cced3f3e1..000000000 --- a/pkg/net/http/blademaster/binding/default_validator.go +++ /dev/null @@ -1,50 +0,0 @@ -package binding - -import ( - "reflect" - "sync" - - "gopkg.in/go-playground/validator.v9" -) - -type defaultValidator struct { - once sync.Once - validate *validator.Validate -} - -var _ StructValidator = &defaultValidator{} - -func (v *defaultValidator) ValidateStruct(obj interface{}) error { - if kindOfData(obj) == reflect.Struct { - v.lazyinit() - if err := v.validate.Struct(obj); err != nil { - return err - } - } - return nil -} - -func (v *defaultValidator) RegisterValidation(key string, fn validator.Func) error { - v.lazyinit() - return v.validate.RegisterValidation(key, fn) -} - -func (v *defaultValidator) lazyinit() { - v.once.Do(func() { - v.validate = validator.New() - }) -} - -func kindOfData(data interface{}) reflect.Kind { - value := reflect.ValueOf(data) - valueType := value.Kind() - if valueType == reflect.Ptr { - valueType = value.Elem().Kind() - } - return valueType -} - -func (v *defaultValidator) GetValidate() *validator.Validate { - v.lazyinit() - return v.validate -} diff --git a/pkg/net/http/blademaster/binding/example/test.pb.go b/pkg/net/http/blademaster/binding/example/test.pb.go deleted file mode 100644 index 3de8444ff..000000000 --- a/pkg/net/http/blademaster/binding/example/test.pb.go +++ /dev/null @@ -1,113 +0,0 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! - -/* -Package example is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - Test -*/ -package example - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type FOO int32 - -const ( - FOO_X FOO = 17 -) - -var FOO_name = map[int32]string{ - 17: "X", -} -var FOO_value = map[string]int32{ - "X": 17, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} - -type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Test) Reset() { *m = Test{} } -func (m *Test) String() string { return proto.CompactTextString(m) } -func (*Test) ProtoMessage() {} - -const Default_Test_Type int32 = 77 - -func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type -} - -func (m *Test) GetReps() []int64 { - if m != nil { - return m.Reps - } - return nil -} - -func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } -func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*Test_OptionalGroup) ProtoMessage() {} - -func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) -} diff --git a/pkg/net/http/blademaster/binding/example/test.proto b/pkg/net/http/blademaster/binding/example/test.proto deleted file mode 100644 index 8ee9800aa..000000000 --- a/pkg/net/http/blademaster/binding/example/test.proto +++ /dev/null @@ -1,12 +0,0 @@ -package example; - -enum FOO {X=17;}; - -message Test { - required string label = 1; - optional int32 type = 2[default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4{ - required string RequiredField = 5; - } -} diff --git a/pkg/net/http/blademaster/binding/example_test.go b/pkg/net/http/blademaster/binding/example_test.go deleted file mode 100644 index c667a517c..000000000 --- a/pkg/net/http/blademaster/binding/example_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package binding - -import ( - "fmt" - "log" - "net/http" -) - -type Arg struct { - Max int64 `form:"max" validate:"max=10"` - Min int64 `form:"min" validate:"min=2"` - Range int64 `form:"range" validate:"min=1,max=10"` - // use split option to split arg 1,2,3 into slice [1 2 3] - // otherwise slice type with parse url.Values (eg:a=b&a=c) default. - Slice []int64 `form:"slice,split" validate:"min=1"` -} - -func ExampleBinding() { - req := initHTTP("max=9&min=3&range=3&slice=1,2,3") - arg := new(Arg) - if err := Form.Bind(req, arg); err != nil { - log.Fatal(err) - } - fmt.Printf("arg.Max %d\narg.Min %d\narg.Range %d\narg.Slice %v", arg.Max, arg.Min, arg.Range, arg.Slice) - // Output: - // arg.Max 9 - // arg.Min 3 - // arg.Range 3 - // arg.Slice [1 2 3] -} - -func initHTTP(params string) (req *http.Request) { - req, _ = http.NewRequest("GET", "http://api.bilibili.com/test?"+params, nil) - req.ParseForm() - return -} diff --git a/pkg/net/http/blademaster/binding/form.go b/pkg/net/http/blademaster/binding/form.go deleted file mode 100644 index 61aa5ee83..000000000 --- a/pkg/net/http/blademaster/binding/form.go +++ /dev/null @@ -1,55 +0,0 @@ -package binding - -import ( - "net/http" - - "github.com/pkg/errors" -) - -const defaultMemory = 32 * 1024 * 1024 - -type formBinding struct{} -type formPostBinding struct{} -type formMultipartBinding struct{} - -func (f formBinding) Name() string { - return "form" -} - -func (f formBinding) Bind(req *http.Request, obj interface{}) error { - if err := req.ParseForm(); err != nil { - return errors.WithStack(err) - } - if err := mapForm(obj, req.Form); err != nil { - return err - } - return validate(obj) -} - -func (f formPostBinding) Name() string { - return "form-urlencoded" -} - -func (f formPostBinding) Bind(req *http.Request, obj interface{}) error { - if err := req.ParseForm(); err != nil { - return errors.WithStack(err) - } - if err := mapForm(obj, req.PostForm); err != nil { - return err - } - return validate(obj) -} - -func (f formMultipartBinding) Name() string { - return "multipart/form-data" -} - -func (f formMultipartBinding) Bind(req *http.Request, obj interface{}) error { - if err := req.ParseMultipartForm(defaultMemory); err != nil { - return errors.WithStack(err) - } - if err := mapForm(obj, req.MultipartForm.Value); err != nil { - return err - } - return validate(obj) -} diff --git a/pkg/net/http/blademaster/binding/form_mapping.go b/pkg/net/http/blademaster/binding/form_mapping.go deleted file mode 100644 index ac4ecd116..000000000 --- a/pkg/net/http/blademaster/binding/form_mapping.go +++ /dev/null @@ -1,276 +0,0 @@ -package binding - -import ( - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/pkg/errors" -) - -// scache struct reflect type cache. -var scache = &cache{ - data: make(map[reflect.Type]*sinfo), -} - -type cache struct { - data map[reflect.Type]*sinfo - mutex sync.RWMutex -} - -func (c *cache) get(obj reflect.Type) (s *sinfo) { - var ok bool - c.mutex.RLock() - if s, ok = c.data[obj]; !ok { - c.mutex.RUnlock() - s = c.set(obj) - return - } - c.mutex.RUnlock() - return -} - -func (c *cache) set(obj reflect.Type) (s *sinfo) { - s = new(sinfo) - tp := obj.Elem() - for i := 0; i < tp.NumField(); i++ { - fd := new(field) - fd.tp = tp.Field(i) - tag := fd.tp.Tag.Get("form") - fd.name, fd.option = parseTag(tag) - if defV := fd.tp.Tag.Get("default"); defV != "" { - dv := reflect.New(fd.tp.Type).Elem() - setWithProperType(fd.tp.Type.Kind(), []string{defV}, dv, fd.option) - fd.hasDefault = true - fd.defaultValue = dv - } - s.field = append(s.field, fd) - } - c.mutex.Lock() - c.data[obj] = s - c.mutex.Unlock() - return -} - -type sinfo struct { - field []*field -} - -type field struct { - tp reflect.StructField - name string - option tagOptions - - hasDefault bool // if field had default value - defaultValue reflect.Value // field default value -} - -func mapForm(ptr interface{}, form map[string][]string) error { - sinfo := scache.get(reflect.TypeOf(ptr)) - val := reflect.ValueOf(ptr).Elem() - for i, fd := range sinfo.field { - typeField := fd.tp - structField := val.Field(i) - if !structField.CanSet() { - continue - } - - structFieldKind := structField.Kind() - inputFieldName := fd.name - if inputFieldName == "" { - inputFieldName = typeField.Name - - // if "form" tag is nil, we inspect if the field is a struct. - // this would not make sense for JSON parsing but it does for a form - // since data is flatten - if structFieldKind == reflect.Struct { - err := mapForm(structField.Addr().Interface(), form) - if err != nil { - return err - } - continue - } - } - inputValue, exists := form[inputFieldName] - if !exists { - // Set the field as default value when the input value is not exist - if fd.hasDefault { - structField.Set(fd.defaultValue) - } - continue - } - // Set the field as default value when the input value is empty - if fd.hasDefault && inputValue[0] == "" { - structField.Set(fd.defaultValue) - continue - } - if _, isTime := structField.Interface().(time.Time); isTime { - if err := setTimeField(inputValue[0], typeField, structField); err != nil { - return err - } - continue - } - if err := setWithProperType(typeField.Type.Kind(), inputValue, structField, fd.option); err != nil { - return err - } - } - return nil -} - -func setWithProperType(valueKind reflect.Kind, val []string, structField reflect.Value, option tagOptions) error { - switch valueKind { - case reflect.Int: - return setIntField(val[0], 0, structField) - case reflect.Int8: - return setIntField(val[0], 8, structField) - case reflect.Int16: - return setIntField(val[0], 16, structField) - case reflect.Int32: - return setIntField(val[0], 32, structField) - case reflect.Int64: - return setIntField(val[0], 64, structField) - case reflect.Uint: - return setUintField(val[0], 0, structField) - case reflect.Uint8: - return setUintField(val[0], 8, structField) - case reflect.Uint16: - return setUintField(val[0], 16, structField) - case reflect.Uint32: - return setUintField(val[0], 32, structField) - case reflect.Uint64: - return setUintField(val[0], 64, structField) - case reflect.Bool: - return setBoolField(val[0], structField) - case reflect.Float32: - return setFloatField(val[0], 32, structField) - case reflect.Float64: - return setFloatField(val[0], 64, structField) - case reflect.String: - structField.SetString(val[0]) - case reflect.Slice: - if option.Contains("split") { - val = strings.Split(val[0], ",") - } - filtered := filterEmpty(val) - switch structField.Type().Elem().Kind() { - case reflect.Int64: - valSli := make([]int64, 0, len(filtered)) - for i := 0; i < len(filtered); i++ { - d, err := strconv.ParseInt(filtered[i], 10, 64) - if err != nil { - return err - } - valSli = append(valSli, d) - } - structField.Set(reflect.ValueOf(valSli)) - case reflect.String: - valSli := make([]string, 0, len(filtered)) - for i := 0; i < len(filtered); i++ { - valSli = append(valSli, filtered[i]) - } - structField.Set(reflect.ValueOf(valSli)) - default: - sliceOf := structField.Type().Elem().Kind() - numElems := len(filtered) - slice := reflect.MakeSlice(structField.Type(), len(filtered), len(filtered)) - for i := 0; i < numElems; i++ { - if err := setWithProperType(sliceOf, filtered[i:], slice.Index(i), ""); err != nil { - return err - } - } - structField.Set(slice) - } - default: - return errors.New("Unknown type") - } - return nil -} - -func setIntField(val string, bitSize int, field reflect.Value) error { - if val == "" { - val = "0" - } - intVal, err := strconv.ParseInt(val, 10, bitSize) - if err == nil { - field.SetInt(intVal) - } - return errors.WithStack(err) -} - -func setUintField(val string, bitSize int, field reflect.Value) error { - if val == "" { - val = "0" - } - uintVal, err := strconv.ParseUint(val, 10, bitSize) - if err == nil { - field.SetUint(uintVal) - } - return errors.WithStack(err) -} - -func setBoolField(val string, field reflect.Value) error { - if val == "" { - val = "false" - } - boolVal, err := strconv.ParseBool(val) - if err == nil { - field.SetBool(boolVal) - } - return nil -} - -func setFloatField(val string, bitSize int, field reflect.Value) error { - if val == "" { - val = "0.0" - } - floatVal, err := strconv.ParseFloat(val, bitSize) - if err == nil { - field.SetFloat(floatVal) - } - return errors.WithStack(err) -} - -func setTimeField(val string, structField reflect.StructField, value reflect.Value) error { - timeFormat := structField.Tag.Get("time_format") - if timeFormat == "" { - return errors.New("Blank time format") - } - - if val == "" { - value.Set(reflect.ValueOf(time.Time{})) - return nil - } - - l := time.Local - if isUTC, _ := strconv.ParseBool(structField.Tag.Get("time_utc")); isUTC { - l = time.UTC - } - - if locTag := structField.Tag.Get("time_location"); locTag != "" { - loc, err := time.LoadLocation(locTag) - if err != nil { - return errors.WithStack(err) - } - l = loc - } - - t, err := time.ParseInLocation(timeFormat, val, l) - if err != nil { - return errors.WithStack(err) - } - - value.Set(reflect.ValueOf(t)) - return nil -} - -func filterEmpty(val []string) []string { - filtered := make([]string, 0, len(val)) - for _, v := range val { - if v != "" { - filtered = append(filtered, v) - } - } - return filtered -} diff --git a/pkg/net/http/blademaster/binding/json.go b/pkg/net/http/blademaster/binding/json.go deleted file mode 100644 index f01e479b3..000000000 --- a/pkg/net/http/blademaster/binding/json.go +++ /dev/null @@ -1,22 +0,0 @@ -package binding - -import ( - "encoding/json" - "net/http" - - "github.com/pkg/errors" -) - -type jsonBinding struct{} - -func (jsonBinding) Name() string { - return "json" -} - -func (jsonBinding) Bind(req *http.Request, obj interface{}) error { - decoder := json.NewDecoder(req.Body) - if err := decoder.Decode(obj); err != nil { - return errors.WithStack(err) - } - return validate(obj) -} diff --git a/pkg/net/http/blademaster/binding/query.go b/pkg/net/http/blademaster/binding/query.go deleted file mode 100644 index b169436eb..000000000 --- a/pkg/net/http/blademaster/binding/query.go +++ /dev/null @@ -1,19 +0,0 @@ -package binding - -import ( - "net/http" -) - -type queryBinding struct{} - -func (queryBinding) Name() string { - return "query" -} - -func (queryBinding) Bind(req *http.Request, obj interface{}) error { - values := req.URL.Query() - if err := mapForm(obj, values); err != nil { - return err - } - return validate(obj) -} diff --git a/pkg/net/http/blademaster/binding/tags.go b/pkg/net/http/blademaster/binding/tags.go deleted file mode 100644 index 535bd8624..000000000 --- a/pkg/net/http/blademaster/binding/tags.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package binding - -import ( - "strings" -) - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/pkg/net/http/blademaster/binding/validate_test.go b/pkg/net/http/blademaster/binding/validate_test.go deleted file mode 100644 index ac1793713..000000000 --- a/pkg/net/http/blademaster/binding/validate_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package binding - -import ( - "bytes" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type testInterface interface { - String() string -} - -type substructNoValidation struct { - IString string - IInt int -} - -type mapNoValidationSub map[string]substructNoValidation - -type structNoValidationValues struct { - substructNoValidation - - Boolean bool - - Uinteger uint - Integer int - Integer8 int8 - Integer16 int16 - Integer32 int32 - Integer64 int64 - Uinteger8 uint8 - Uinteger16 uint16 - Uinteger32 uint32 - Uinteger64 uint64 - - Float32 float32 - Float64 float64 - - String string - - Date time.Time - - Struct substructNoValidation - InlinedStruct struct { - String []string - Integer int - } - - IntSlice []int - IntPointerSlice []*int - StructPointerSlice []*substructNoValidation - StructSlice []substructNoValidation - InterfaceSlice []testInterface - - UniversalInterface interface{} - CustomInterface testInterface - - FloatMap map[string]float32 - StructMap mapNoValidationSub -} - -func createNoValidationValues() structNoValidationValues { - integer := 1 - s := structNoValidationValues{ - Boolean: true, - Uinteger: 1 << 29, - Integer: -10000, - Integer8: 120, - Integer16: -20000, - Integer32: 1 << 29, - Integer64: 1 << 61, - Uinteger8: 250, - Uinteger16: 50000, - Uinteger32: 1 << 31, - Uinteger64: 1 << 62, - Float32: 123.456, - Float64: 123.456789, - String: "text", - Date: time.Time{}, - CustomInterface: &bytes.Buffer{}, - Struct: substructNoValidation{}, - IntSlice: []int{-3, -2, 1, 0, 1, 2, 3}, - IntPointerSlice: []*int{&integer}, - StructSlice: []substructNoValidation{}, - UniversalInterface: 1.2, - FloatMap: map[string]float32{ - "foo": 1.23, - "bar": 232.323, - }, - StructMap: mapNoValidationSub{ - "foo": substructNoValidation{}, - "bar": substructNoValidation{}, - }, - // StructPointerSlice []noValidationSub - // InterfaceSlice []testInterface - } - s.InlinedStruct.Integer = 1000 - s.InlinedStruct.String = []string{"first", "second"} - s.IString = "substring" - s.IInt = 987654 - return s -} - -func TestValidateNoValidationValues(t *testing.T) { - origin := createNoValidationValues() - test := createNoValidationValues() - empty := structNoValidationValues{} - - assert.Nil(t, validate(test)) - assert.Nil(t, validate(&test)) - assert.Nil(t, validate(empty)) - assert.Nil(t, validate(&empty)) - - assert.Equal(t, origin, test) -} - -type structNoValidationPointer struct { - // substructNoValidation - - Boolean bool - - Uinteger *uint - Integer *int - Integer8 *int8 - Integer16 *int16 - Integer32 *int32 - Integer64 *int64 - Uinteger8 *uint8 - Uinteger16 *uint16 - Uinteger32 *uint32 - Uinteger64 *uint64 - - Float32 *float32 - Float64 *float64 - - String *string - - Date *time.Time - - Struct *substructNoValidation - - IntSlice *[]int - IntPointerSlice *[]*int - StructPointerSlice *[]*substructNoValidation - StructSlice *[]substructNoValidation - InterfaceSlice *[]testInterface - - FloatMap *map[string]float32 - StructMap *mapNoValidationSub -} - -func TestValidateNoValidationPointers(t *testing.T) { - //origin := createNoValidation_values() - //test := createNoValidation_values() - empty := structNoValidationPointer{} - - //assert.Nil(t, validate(test)) - //assert.Nil(t, validate(&test)) - assert.Nil(t, validate(empty)) - assert.Nil(t, validate(&empty)) - - //assert.Equal(t, origin, test) -} - -type Object map[string]interface{} - -func TestValidatePrimitives(t *testing.T) { - obj := Object{"foo": "bar", "bar": 1} - assert.NoError(t, validate(obj)) - assert.NoError(t, validate(&obj)) - assert.Equal(t, obj, Object{"foo": "bar", "bar": 1}) - - obj2 := []Object{{"foo": "bar", "bar": 1}, {"foo": "bar", "bar": 1}} - assert.NoError(t, validate(obj2)) - assert.NoError(t, validate(&obj2)) - - nu := 10 - assert.NoError(t, validate(nu)) - assert.NoError(t, validate(&nu)) - assert.Equal(t, nu, 10) - - str := "value" - assert.NoError(t, validate(str)) - assert.NoError(t, validate(&str)) - assert.Equal(t, str, "value") -} - -// structCustomValidation is a helper struct we use to check that -// custom validation can be registered on it. -// The `notone` binding directive is for custom validation and registered later. -// type structCustomValidation struct { -// Integer int `binding:"notone"` -// } - -// notOne is a custom validator meant to be used with `validator.v8` library. -// The method signature for `v9` is significantly different and this function -// would need to be changed for tests to pass after upgrade. -// See https://github.com/gin-gonic/gin/pull/1015. -// func notOne( -// v *validator.Validate, topStruct reflect.Value, currentStructOrField reflect.Value, -// field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string, -// ) bool { -// if val, ok := field.Interface().(int); ok { -// return val != 1 -// } -// return false -// } diff --git a/pkg/net/http/blademaster/binding/xml.go b/pkg/net/http/blademaster/binding/xml.go deleted file mode 100644 index 99b303c2f..000000000 --- a/pkg/net/http/blademaster/binding/xml.go +++ /dev/null @@ -1,22 +0,0 @@ -package binding - -import ( - "encoding/xml" - "net/http" - - "github.com/pkg/errors" -) - -type xmlBinding struct{} - -func (xmlBinding) Name() string { - return "xml" -} - -func (xmlBinding) Bind(req *http.Request, obj interface{}) error { - decoder := xml.NewDecoder(req.Body) - if err := decoder.Decode(obj); err != nil { - return errors.WithStack(err) - } - return validate(obj) -} diff --git a/pkg/net/http/blademaster/client.go b/pkg/net/http/blademaster/client.go deleted file mode 100644 index 68d0ea6a1..000000000 --- a/pkg/net/http/blademaster/client.go +++ /dev/null @@ -1,365 +0,0 @@ -package blademaster - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "net" - xhttp "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/net/metadata" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/gogo/protobuf/proto" - pkgerr "github.com/pkg/errors" -) - -const ( - _minRead = 16 * 1024 // 16kb -) - -var ( - _noKickUserAgent = "blademaster" -) - -func init() { - n, err := os.Hostname() - if err == nil { - _noKickUserAgent = _noKickUserAgent + runtime.Version() + " " + n - } -} - -// ClientConfig is http client conf. -type ClientConfig struct { - Dial xtime.Duration - Timeout xtime.Duration - KeepAlive xtime.Duration - Breaker *breaker.Config - URL map[string]*ClientConfig - Host map[string]*ClientConfig -} - -// Client is http client. -type Client struct { - conf *ClientConfig - client *xhttp.Client - dialer *net.Dialer - transport xhttp.RoundTripper - - urlConf map[string]*ClientConfig - hostConf map[string]*ClientConfig - mutex sync.RWMutex - breaker *breaker.Group -} - -// NewClient new a http client. -func NewClient(c *ClientConfig) *Client { - client := new(Client) - client.conf = c - client.dialer = &net.Dialer{ - Timeout: time.Duration(c.Dial), - KeepAlive: time.Duration(c.KeepAlive), - } - - originTransport := &xhttp.Transport{ - DialContext: client.dialer.DialContext, - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - - // wraps RoundTripper for tracer - client.transport = &TraceTransport{RoundTripper: originTransport} - client.client = &xhttp.Client{ - Transport: client.transport, - } - client.urlConf = make(map[string]*ClientConfig) - client.hostConf = make(map[string]*ClientConfig) - client.breaker = breaker.NewGroup(c.Breaker) - if c.Timeout <= 0 { - panic("must config http timeout!!!") - } - for uri, cfg := range c.URL { - client.urlConf[uri] = cfg - } - for host, cfg := range c.Host { - client.hostConf[host] = cfg - } - return client -} - -// SetTransport set client transport -func (client *Client) SetTransport(t xhttp.RoundTripper) { - client.transport = t - client.client.Transport = t -} - -// SetConfig set client config. -func (client *Client) SetConfig(c *ClientConfig) { - client.mutex.Lock() - if c.Timeout > 0 { - client.conf.Timeout = c.Timeout - } - if c.KeepAlive > 0 { - client.dialer.KeepAlive = time.Duration(c.KeepAlive) - client.conf.KeepAlive = c.KeepAlive - } - if c.Dial > 0 { - client.dialer.Timeout = time.Duration(c.Dial) - client.conf.Timeout = c.Dial - } - if c.Breaker != nil { - client.conf.Breaker = c.Breaker - client.breaker.Reload(c.Breaker) - } - for uri, cfg := range c.URL { - client.urlConf[uri] = cfg - } - for host, cfg := range c.Host { - client.hostConf[host] = cfg - } - client.mutex.Unlock() -} - -// NewRequest new http request with method, uri, ip, values and headers. -// TODO(zhoujiahui): param realIP should be removed later. -func (client *Client) NewRequest(method, uri, realIP string, params url.Values) (req *xhttp.Request, err error) { - if method == xhttp.MethodGet { - req, err = xhttp.NewRequest(xhttp.MethodGet, fmt.Sprintf("%s?%s", uri, params.Encode()), nil) - } else { - req, err = xhttp.NewRequest(xhttp.MethodPost, uri, strings.NewReader(params.Encode())) - } - if err != nil { - err = pkgerr.Wrapf(err, "method:%s,uri:%s", method, uri) - return - } - const ( - _contentType = "Content-Type" - _urlencoded = "application/x-www-form-urlencoded" - _userAgent = "User-Agent" - ) - if method == xhttp.MethodPost { - req.Header.Set(_contentType, _urlencoded) - } - if realIP != "" { - req.Header.Set(_httpHeaderRemoteIP, realIP) - } - req.Header.Set(_userAgent, _noKickUserAgent+" "+env.AppID) - return -} - -// Get issues a GET to the specified URL. -func (client *Client) Get(c context.Context, uri, ip string, params url.Values, res interface{}) (err error) { - req, err := client.NewRequest(xhttp.MethodGet, uri, ip, params) - if err != nil { - return - } - return client.Do(c, req, res) -} - -// Post issues a Post to the specified URL. -func (client *Client) Post(c context.Context, uri, ip string, params url.Values, res interface{}) (err error) { - req, err := client.NewRequest(xhttp.MethodPost, uri, ip, params) - if err != nil { - return - } - return client.Do(c, req, res) -} - -// RESTfulGet issues a RESTful GET to the specified URL. -func (client *Client) RESTfulGet(c context.Context, uri, ip string, params url.Values, res interface{}, v ...interface{}) (err error) { - req, err := client.NewRequest(xhttp.MethodGet, fmt.Sprintf(uri, v...), ip, params) - if err != nil { - return - } - return client.Do(c, req, res, uri) -} - -// RESTfulPost issues a RESTful Post to the specified URL. -func (client *Client) RESTfulPost(c context.Context, uri, ip string, params url.Values, res interface{}, v ...interface{}) (err error) { - req, err := client.NewRequest(xhttp.MethodPost, fmt.Sprintf(uri, v...), ip, params) - if err != nil { - return - } - return client.Do(c, req, res, uri) -} - -// Raw sends an HTTP request and returns bytes response -func (client *Client) Raw(c context.Context, req *xhttp.Request, v ...string) (bs []byte, err error) { - var ( - ok bool - code string - cancel func() - resp *xhttp.Response - config *ClientConfig - timeout time.Duration - uri = fmt.Sprintf("%s://%s%s", req.URL.Scheme, req.Host, req.URL.Path) - ) - // NOTE fix prom & config uri key. - if len(v) == 1 { - uri = v[0] - } - // breaker - brk := client.breaker.Get(uri) - if err = brk.Allow(); err != nil { - code = "breaker" - _metricClientReqCodeTotal.Inc(uri, req.Method, code) - return - } - defer client.onBreaker(brk, &err) - // stat - now := time.Now() - defer func() { - _metricClientReqDur.Observe(int64(time.Since(now)/time.Millisecond), uri, req.Method) - if code != "" { - _metricClientReqCodeTotal.Inc(uri, req.Method, code) - } - }() - // get config - // 1.url config 2.host config 3.default - client.mutex.RLock() - if config, ok = client.urlConf[uri]; !ok { - if config, ok = client.hostConf[req.Host]; !ok { - config = client.conf - } - } - client.mutex.RUnlock() - // timeout - deliver := true - timeout = time.Duration(config.Timeout) - if deadline, ok := c.Deadline(); ok { - if ctimeout := time.Until(deadline); ctimeout < timeout { - // deliver small timeout - timeout = ctimeout - deliver = false - } - } - if deliver { - c, cancel = context.WithTimeout(c, timeout) - defer cancel() - } - setTimeout(req, timeout) - req = req.WithContext(c) - setCaller(req) - metadata.Range(c, - func(key string, value interface{}) { - setMetadata(req, key, value) - }, - metadata.IsOutgoingKey) - if resp, err = client.client.Do(req); err != nil { - err = pkgerr.Wrapf(err, "host:%s, url:%s", req.URL.Host, realURL(req)) - code = "failed" - return - } - defer resp.Body.Close() - if resp.StatusCode >= xhttp.StatusBadRequest { - err = pkgerr.Errorf("incorrect http status:%d host:%s, url:%s", resp.StatusCode, req.URL.Host, realURL(req)) - code = strconv.Itoa(resp.StatusCode) - return - } - if bs, err = readAll(resp.Body, _minRead); err != nil { - err = pkgerr.Wrapf(err, "host:%s, url:%s", req.URL.Host, realURL(req)) - return - } - return -} - -// Do sends an HTTP request and returns an HTTP json response. -func (client *Client) Do(c context.Context, req *xhttp.Request, res interface{}, v ...string) (err error) { - var bs []byte - if bs, err = client.Raw(c, req, v...); err != nil { - return - } - if res != nil { - if err = json.Unmarshal(bs, res); err != nil { - err = pkgerr.Wrapf(err, "host:%s, url:%s", req.URL.Host, realURL(req)) - } - } - return -} - -// JSON sends an HTTP request and returns an HTTP json response. -func (client *Client) JSON(c context.Context, req *xhttp.Request, res interface{}, v ...string) (err error) { - var bs []byte - if bs, err = client.Raw(c, req, v...); err != nil { - return - } - if res != nil { - if err = json.Unmarshal(bs, res); err != nil { - err = pkgerr.Wrapf(err, "host:%s, url:%s", req.URL.Host, realURL(req)) - } - } - return -} - -// PB sends an HTTP request and returns an HTTP proto response. -func (client *Client) PB(c context.Context, req *xhttp.Request, res proto.Message, v ...string) (err error) { - var bs []byte - if bs, err = client.Raw(c, req, v...); err != nil { - return - } - if res != nil { - if err = proto.Unmarshal(bs, res); err != nil { - err = pkgerr.Wrapf(err, "host:%s, url:%s", req.URL.Host, realURL(req)) - } - } - return -} - -func (client *Client) onBreaker(breaker breaker.Breaker, err *error) { - if err != nil && *err != nil { - breaker.MarkFailed() - } else { - breaker.MarkSuccess() - } -} - -// realUrl return url with http://host/params. -func realURL(req *xhttp.Request) string { - if req.Method == xhttp.MethodGet { - return req.URL.String() - } else if req.Method == xhttp.MethodPost { - ru := req.URL.Path - if req.Body != nil { - rd, ok := req.Body.(io.Reader) - if ok { - buf := bytes.NewBuffer([]byte{}) - buf.ReadFrom(rd) - ru = ru + "?" + buf.String() - } - } - return ru - } - return req.URL.Path -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} diff --git a/pkg/net/http/blademaster/context.go b/pkg/net/http/blademaster/context.go deleted file mode 100644 index fc6b544f9..000000000 --- a/pkg/net/http/blademaster/context.go +++ /dev/null @@ -1,408 +0,0 @@ -package blademaster - -import ( - "context" - "math" - "net/http" - "strconv" - "strings" - "sync" - "text/template" - - "github.com/go-kratos/kratos/pkg/net/metadata" - - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/net/http/blademaster/binding" - "github.com/go-kratos/kratos/pkg/net/http/blademaster/render" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/pkg/errors" -) - -const ( - _abortIndex int8 = math.MaxInt8 / 2 -) - -var ( - _openParen = []byte("(") - _closeParen = []byte(")") -) - -// Context is the most important part. It allows us to pass variables between -// middleware, manage the flow, validate the JSON of a request and render a -// JSON response for example. -type Context struct { - context.Context - - Request *http.Request - Writer http.ResponseWriter - - // flow control - index int8 - handlers []HandlerFunc - - // Keys is a key/value pair exclusively for the context of each request. - Keys map[string]interface{} - // This mutex protect Keys map - keysMutex sync.RWMutex - - Error error - - method string - engine *Engine - - RoutePath string - - Params Params -} - -/************************************/ -/********** CONTEXT CREATION ********/ -/************************************/ -func (c *Context) reset() { - c.Context = nil - c.index = -1 - c.handlers = nil - c.Keys = nil - c.Error = nil - c.method = "" - c.RoutePath = "" - c.Params = c.Params[0:0] -} - -/************************************/ -/*********** FLOW CONTROL ***********/ -/************************************/ - -// Next should be used only inside middleware. -// It executes the pending handlers in the chain inside the calling handler. -// See example in godoc. -func (c *Context) Next() { - c.index++ - for c.index < int8(len(c.handlers)) { - c.handlers[c.index](c) - c.index++ - } -} - -// Abort prevents pending handlers from being called. Note that this will not stop the current handler. -// Let's say you have an authorization middleware that validates that the current request is authorized. -// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers -// for this request are not called. -func (c *Context) Abort() { - c.index = _abortIndex -} - -// AbortWithStatus calls `Abort()` and writes the headers with the specified status code. -// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401). -func (c *Context) AbortWithStatus(code int) { - c.Status(code) - c.Abort() -} - -// IsAborted returns true if the current context was aborted. -func (c *Context) IsAborted() bool { - return c.index >= _abortIndex -} - -/************************************/ -/******** METADATA MANAGEMENT********/ -/************************************/ - -// Set is used to store a new key/value pair exclusively for this context. -// It also lazy initializes c.Keys if it was not used previously. -func (c *Context) Set(key string, value interface{}) { - c.keysMutex.Lock() - if c.Keys == nil { - c.Keys = make(map[string]interface{}) - } - c.Keys[key] = value - c.keysMutex.Unlock() -} - -// Get returns the value for the given key, ie: (value, true). -// If the value does not exists it returns (nil, false) -func (c *Context) Get(key string) (value interface{}, exists bool) { - c.keysMutex.RLock() - value, exists = c.Keys[key] - c.keysMutex.RUnlock() - return -} - -// GetString returns the value associated with the key as a string. -func (c *Context) GetString(key string) (s string) { - if val, ok := c.Get(key); ok && val != nil { - s, _ = val.(string) - } - return -} - -// GetBool returns the value associated with the key as a boolean. -func (c *Context) GetBool(key string) (b bool) { - if val, ok := c.Get(key); ok && val != nil { - b, _ = val.(bool) - } - return -} - -// GetInt returns the value associated with the key as an integer. -func (c *Context) GetInt(key string) (i int) { - if val, ok := c.Get(key); ok && val != nil { - i, _ = val.(int) - } - return -} - -// GetUint returns the value associated with the key as an unsigned integer. -func (c *Context) GetUint(key string) (ui uint) { - if val, ok := c.Get(key); ok && val != nil { - ui, _ = val.(uint) - } - return -} - -// GetInt64 returns the value associated with the key as an integer. -func (c *Context) GetInt64(key string) (i64 int64) { - if val, ok := c.Get(key); ok && val != nil { - i64, _ = val.(int64) - } - return -} - -// GetUint64 returns the value associated with the key as an unsigned integer. -func (c *Context) GetUint64(key string) (ui64 uint64) { - if val, ok := c.Get(key); ok && val != nil { - ui64, _ = val.(uint64) - } - return -} - -// GetFloat64 returns the value associated with the key as a float64. -func (c *Context) GetFloat64(key string) (f64 float64) { - if val, ok := c.Get(key); ok && val != nil { - f64, _ = val.(float64) - } - return -} - -/************************************/ -/******** RESPONSE RENDERING ********/ -/************************************/ - -// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -// Status sets the HTTP response code. -func (c *Context) Status(code int) { - c.Writer.WriteHeader(code) -} - -// Render http response with http code by a render instance. -func (c *Context) Render(code int, r render.Render) { - r.WriteContentType(c.Writer) - if code > 0 { - c.Status(code) - } - - if !bodyAllowedForStatus(code) { - return - } - - params := c.Request.Form - cb := template.JSEscapeString(params.Get("callback")) - jsonp := cb != "" - if jsonp { - c.Writer.Write([]byte(cb)) - c.Writer.Write(_openParen) - } - - if err := r.Render(c.Writer); err != nil { - c.Error = err - return - } - - if jsonp { - if _, err := c.Writer.Write(_closeParen); err != nil { - c.Error = errors.WithStack(err) - } - } -} - -// JSON serializes the given struct as JSON into the response body. -// It also sets the Content-Type as "application/json". -func (c *Context) JSON(data interface{}, err error) { - code := http.StatusOK - c.Error = err - bcode := ecode.Cause(err) - // TODO app allow 5xx? - /* - if bcode.Code() == -500 { - code = http.StatusServiceUnavailable - } - */ - writeStatusCode(c.Writer, bcode.Code()) - c.Render(code, render.JSON{ - Code: bcode.Code(), - Message: bcode.Message(), - Data: data, - }) -} - -// JSONMap serializes the given map as map JSON into the response body. -// It also sets the Content-Type as "application/json". -func (c *Context) JSONMap(data map[string]interface{}, err error) { - code := http.StatusOK - c.Error = err - bcode := ecode.Cause(err) - // TODO app allow 5xx? - /* - if bcode.Code() == -500 { - code = http.StatusServiceUnavailable - } - */ - writeStatusCode(c.Writer, bcode.Code()) - data["code"] = bcode.Code() - if _, ok := data["message"]; !ok { - data["message"] = bcode.Message() - } - c.Render(code, render.MapJSON(data)) -} - -// XML serializes the given struct as XML into the response body. -// It also sets the Content-Type as "application/xml". -func (c *Context) XML(data interface{}, err error) { - code := http.StatusOK - c.Error = err - bcode := ecode.Cause(err) - // TODO app allow 5xx? - /* - if bcode.Code() == -500 { - code = http.StatusServiceUnavailable - } - */ - writeStatusCode(c.Writer, bcode.Code()) - c.Render(code, render.XML{ - Code: bcode.Code(), - Message: bcode.Message(), - Data: data, - }) -} - -// Protobuf serializes the given struct as PB into the response body. -// It also sets the ContentType as "application/x-protobuf". -func (c *Context) Protobuf(data proto.Message, err error) { - var ( - bytes []byte - ) - - code := http.StatusOK - c.Error = err - bcode := ecode.Cause(err) - - any := new(types.Any) - if data != nil { - if bytes, err = proto.Marshal(data); err != nil { - c.Error = errors.WithStack(err) - return - } - any.TypeUrl = "type.googleapis.com/" + proto.MessageName(data) - any.Value = bytes - } - writeStatusCode(c.Writer, bcode.Code()) - c.Render(code, render.PB{ - Code: int64(bcode.Code()), - Message: bcode.Message(), - Data: any, - }) -} - -// Bytes writes some data into the body stream and updates the HTTP code. -func (c *Context) Bytes(code int, contentType string, data ...[]byte) { - c.Render(code, render.Data{ - ContentType: contentType, - Data: data, - }) -} - -// String writes the given string into the response body. -func (c *Context) String(code int, format string, values ...interface{}) { - c.Render(code, render.String{Format: format, Data: values}) -} - -// Redirect returns a HTTP redirect to the specific location. -func (c *Context) Redirect(code int, location string) { - c.Render(-1, render.Redirect{ - Code: code, - Location: location, - Request: c.Request, - }) -} - -// BindWith bind req arg with parser. -func (c *Context) BindWith(obj interface{}, b binding.Binding) error { - return c.mustBindWith(obj, b) -} - -// Bind checks the Content-Type to select a binding engine automatically, -// Depending the "Content-Type" header different bindings are used: -// "application/json" --> JSON binding -// "application/xml" --> XML binding -// otherwise --> returns an error. -// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. -// It decodes the json payload into the struct specified as a pointer. -// It writes a 400 error and sets Content-Type header "text/plain" in the response if input is not valid. -func (c *Context) Bind(obj interface{}) error { - b := binding.Default(c.Request.Method, c.Request.Header.Get("Content-Type")) - return c.mustBindWith(obj, b) -} - -// mustBindWith binds the passed struct pointer using the specified binding engine. -// It will abort the request with HTTP 400 if any error ocurrs. -// See the binding package. -func (c *Context) mustBindWith(obj interface{}, b binding.Binding) (err error) { - if err = b.Bind(c.Request, obj); err != nil { - c.Error = ecode.RequestErr - c.Render(http.StatusOK, render.JSON{ - Code: ecode.RequestErr.Code(), - Message: err.Error(), - Data: nil, - }) - c.Abort() - } - return -} - -func writeStatusCode(w http.ResponseWriter, ecode int) { - header := w.Header() - header.Set("kratos-status-code", strconv.FormatInt(int64(ecode), 10)) -} - -// RemoteIP implements a best effort algorithm to return the real client IP, it parses -// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy. -// Use X-Forwarded-For before X-Real-Ip as nginx uses X-Real-Ip with the proxy's IP. -// Notice: metadata.RemoteIP take precedence over X-Forwarded-For and X-Real-Ip -func (c *Context) RemoteIP() (remoteIP string) { - remoteIP = metadata.String(c, metadata.RemoteIP) - if remoteIP != "" { - return - } - - remoteIP = c.Request.Header.Get("X-Forwarded-For") - remoteIP = strings.TrimSpace(strings.Split(remoteIP, ",")[0]) - if remoteIP == "" { - remoteIP = strings.TrimSpace(c.Request.Header.Get("X-Real-Ip")) - } - - return -} diff --git a/pkg/net/http/blademaster/cors.go b/pkg/net/http/blademaster/cors.go deleted file mode 100644 index 8192e0871..000000000 --- a/pkg/net/http/blademaster/cors.go +++ /dev/null @@ -1,249 +0,0 @@ -package blademaster - -import ( - "net/http" - "strconv" - "strings" - "time" - - "github.com/go-kratos/kratos/pkg/log" - - "github.com/pkg/errors" -) - -// CORSConfig represents all available options for the middleware. -type CORSConfig struct { - AllowAllOrigins bool - - // AllowedOrigins is a list of origins a cross-domain request can be executed from. - // If the special "*" value is present in the list, all origins will be allowed. - // Default value is [] - AllowOrigins []string - - // AllowOriginFunc is a custom function to validate the origin. It take the origin - // as argument and returns true if allowed or false otherwise. If this option is - // set, the content of AllowedOrigins is ignored. - AllowOriginFunc func(origin string) bool - - // AllowedMethods is a list of methods the client is allowed to use with - // cross-domain requests. Default value is simple methods (GET and POST) - AllowMethods []string - - // AllowedHeaders is list of non simple headers the client is allowed to use with - // cross-domain requests. - AllowHeaders []string - - // AllowCredentials indicates whether the request can include user credentials like - // cookies, HTTP authentication or client side SSL certificates. - AllowCredentials bool - - // ExposedHeaders indicates which headers are safe to expose to the API of a CORS - // API specification - ExposeHeaders []string - - // MaxAge indicates how long (in seconds) the results of a preflight request - // can be cached - MaxAge time.Duration -} - -type cors struct { - allowAllOrigins bool - allowCredentials bool - allowOriginFunc func(string) bool - allowOrigins []string - normalHeaders http.Header - preflightHeaders http.Header -} - -type converter func(string) string - -// Validate is check configuration of user defined. -func (c *CORSConfig) Validate() error { - if c.AllowAllOrigins && (c.AllowOriginFunc != nil || len(c.AllowOrigins) > 0) { - return errors.New("conflict settings: all origins are allowed. AllowOriginFunc or AllowedOrigins is not needed") - } - if !c.AllowAllOrigins && c.AllowOriginFunc == nil && len(c.AllowOrigins) == 0 { - return errors.New("conflict settings: all origins disabled") - } - for _, origin := range c.AllowOrigins { - if origin != "*" && !strings.HasPrefix(origin, "http://") && !strings.HasPrefix(origin, "https://") { - return errors.New("bad origin: origins must either be '*' or include http:// or https://") - } - } - return nil -} - -// CORS returns the location middleware with default configuration. -func CORS(allowOriginHosts []string) HandlerFunc { - config := &CORSConfig{ - AllowMethods: []string{"GET", "POST"}, - AllowHeaders: []string{"Origin", "Content-Length", "Content-Type"}, - AllowCredentials: true, - MaxAge: time.Duration(0), - AllowOriginFunc: func(origin string) bool { - for _, host := range allowOriginHosts { - if strings.HasSuffix(strings.ToLower(origin), host) { - return true - } - } - return false - }, - } - return newCORS(config) -} - -// newCORS returns the location middleware with user-defined custom configuration. -func newCORS(config *CORSConfig) HandlerFunc { - if err := config.Validate(); err != nil { - panic(err.Error()) - } - cors := &cors{ - allowOriginFunc: config.AllowOriginFunc, - allowAllOrigins: config.AllowAllOrigins, - allowCredentials: config.AllowCredentials, - allowOrigins: normalize(config.AllowOrigins), - normalHeaders: generateNormalHeaders(config), - preflightHeaders: generatePreflightHeaders(config), - } - - return func(c *Context) { - cors.applyCORS(c) - } -} - -func (cors *cors) applyCORS(c *Context) { - origin := c.Request.Header.Get("Origin") - if len(origin) == 0 { - // request is not a CORS request - return - } - if !cors.validateOrigin(origin) { - log.V(5).Info("The request's Origin header `%s` does not match any of allowed origins.", origin) - c.AbortWithStatus(http.StatusForbidden) - return - } - - if c.Request.Method == "OPTIONS" { - cors.handlePreflight(c) - defer c.AbortWithStatus(200) - } else { - cors.handleNormal(c) - } - - if !cors.allowAllOrigins { - header := c.Writer.Header() - header.Set("Access-Control-Allow-Origin", origin) - } -} - -func (cors *cors) validateOrigin(origin string) bool { - if cors.allowAllOrigins { - return true - } - for _, value := range cors.allowOrigins { - if value == origin { - return true - } - } - if cors.allowOriginFunc != nil { - return cors.allowOriginFunc(origin) - } - return false -} - -func (cors *cors) handlePreflight(c *Context) { - header := c.Writer.Header() - for key, value := range cors.preflightHeaders { - header[key] = value - } -} - -func (cors *cors) handleNormal(c *Context) { - header := c.Writer.Header() - for key, value := range cors.normalHeaders { - header[key] = value - } -} - -func generateNormalHeaders(c *CORSConfig) http.Header { - headers := make(http.Header) - if c.AllowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - - // backport support for early browsers - if len(c.AllowMethods) > 0 { - allowMethods := convert(normalize(c.AllowMethods), strings.ToUpper) - value := strings.Join(allowMethods, ",") - headers.Set("Access-Control-Allow-Methods", value) - } - - if len(c.ExposeHeaders) > 0 { - exposeHeaders := convert(normalize(c.ExposeHeaders), http.CanonicalHeaderKey) - headers.Set("Access-Control-Expose-Headers", strings.Join(exposeHeaders, ",")) - } - if c.AllowAllOrigins { - headers.Set("Access-Control-Allow-Origin", "*") - } else { - headers.Set("Vary", "Origin") - } - return headers -} - -func generatePreflightHeaders(c *CORSConfig) http.Header { - headers := make(http.Header) - if c.AllowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - if len(c.AllowMethods) > 0 { - allowMethods := convert(normalize(c.AllowMethods), strings.ToUpper) - value := strings.Join(allowMethods, ",") - headers.Set("Access-Control-Allow-Methods", value) - } - if len(c.AllowHeaders) > 0 { - allowHeaders := convert(normalize(c.AllowHeaders), http.CanonicalHeaderKey) - value := strings.Join(allowHeaders, ",") - headers.Set("Access-Control-Allow-Headers", value) - } - if c.MaxAge > time.Duration(0) { - value := strconv.FormatInt(int64(c.MaxAge/time.Second), 10) - headers.Set("Access-Control-Max-Age", value) - } - if c.AllowAllOrigins { - headers.Set("Access-Control-Allow-Origin", "*") - } else { - // Always set Vary headers - // see https://github.com/rs/cors/issues/10, - // https://github.com/rs/cors/commit/dbdca4d95feaa7511a46e6f1efb3b3aa505bc43f#commitcomment-12352001 - - headers.Add("Vary", "Origin") - headers.Add("Vary", "Access-Control-Request-Method") - headers.Add("Vary", "Access-Control-Request-Headers") - } - return headers -} - -func normalize(values []string) []string { - if values == nil { - return nil - } - distinctMap := make(map[string]bool, len(values)) - normalized := make([]string, 0, len(values)) - for _, value := range values { - value = strings.TrimSpace(value) - value = strings.ToLower(value) - if _, seen := distinctMap[value]; !seen { - normalized = append(normalized, value) - distinctMap[value] = true - } - } - return normalized -} - -func convert(s []string, c converter) []string { - var out []string - for _, i := range s { - out = append(out, c(i)) - } - return out -} diff --git a/pkg/net/http/blademaster/criticality.go b/pkg/net/http/blademaster/criticality.go deleted file mode 100644 index 0705ff0e7..000000000 --- a/pkg/net/http/blademaster/criticality.go +++ /dev/null @@ -1,21 +0,0 @@ -package blademaster - -import ( - criticalityPkg "github.com/go-kratos/kratos/pkg/net/criticality" - "github.com/go-kratos/kratos/pkg/net/metadata" - - "github.com/pkg/errors" -) - -// Criticality is -func Criticality(pathCriticality criticalityPkg.Criticality) HandlerFunc { - if !criticalityPkg.Exist(pathCriticality) { - panic(errors.Errorf("This criticality is not exist: %s", pathCriticality)) - } - return func(ctx *Context) { - md, ok := metadata.FromContext(ctx) - if ok { - md[metadata.Criticality] = string(pathCriticality) - } - } -} diff --git a/pkg/net/http/blademaster/csrf.go b/pkg/net/http/blademaster/csrf.go deleted file mode 100644 index 25482f5d7..000000000 --- a/pkg/net/http/blademaster/csrf.go +++ /dev/null @@ -1,64 +0,0 @@ -package blademaster - -import ( - "net/url" - "regexp" - "strings" - - "github.com/go-kratos/kratos/pkg/log" -) - -func matchHostSuffix(suffix string) func(*url.URL) bool { - return func(uri *url.URL) bool { - return strings.HasSuffix(strings.ToLower(uri.Host), suffix) - } -} - -func matchPattern(pattern *regexp.Regexp) func(*url.URL) bool { - return func(uri *url.URL) bool { - return pattern.MatchString(strings.ToLower(uri.String())) - } -} - -// CSRF returns the csrf middleware to prevent invalid cross site request. -// Only referer is checked currently. -func CSRF(allowHosts []string, allowPattern []string) HandlerFunc { - validations := []func(*url.URL) bool{} - - addHostSuffix := func(suffix string) { - validations = append(validations, matchHostSuffix(suffix)) - } - addPattern := func(pattern string) { - validations = append(validations, matchPattern(regexp.MustCompile(pattern))) - } - - for _, r := range allowHosts { - addHostSuffix(r) - } - for _, p := range allowPattern { - addPattern(p) - } - - return func(c *Context) { - referer := c.Request.Header.Get("Referer") - if referer == "" { - log.V(5).Info("The request's Referer or Origin header is empty.") - c.AbortWithStatus(403) - return - } - illegal := true - if uri, err := url.Parse(referer); err == nil && uri.Host != "" { - for _, validate := range validations { - if validate(uri) { - illegal = false - break - } - } - } - if illegal { - log.V(5).Info("The request's Referer header `%s` does not match any of allowed referers.", referer) - c.AbortWithStatus(403) - return - } - } -} diff --git a/pkg/net/http/blademaster/logger.go b/pkg/net/http/blademaster/logger.go deleted file mode 100644 index 7a0700826..000000000 --- a/pkg/net/http/blademaster/logger.go +++ /dev/null @@ -1,70 +0,0 @@ -package blademaster - -import ( - "fmt" - "strconv" - "time" - - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/metadata" -) - -// Logger is logger middleware -func Logger() HandlerFunc { - const noUser = "no_user" - return func(c *Context) { - now := time.Now() - req := c.Request - path := req.URL.Path - params := req.Form - var quota float64 - if deadline, ok := c.Context.Deadline(); ok { - quota = time.Until(deadline).Seconds() - } - - c.Next() - - err := c.Error - cerr := ecode.Cause(err) - dt := time.Since(now) - caller := metadata.String(c, metadata.Caller) - if caller == "" { - caller = noUser - } - - if len(c.RoutePath) > 0 { - _metricServerReqCodeTotal.Inc(c.RoutePath[1:], caller, req.Method, strconv.FormatInt(int64(cerr.Code()), 10)) - _metricServerReqDur.Observe(int64(dt/time.Millisecond), c.RoutePath[1:], caller, req.Method) - } - - lf := log.Infov - errmsg := "" - isSlow := dt >= (time.Millisecond * 500) - if err != nil { - errmsg = err.Error() - lf = log.Errorv - if cerr.Code() > 0 { - lf = log.Warnv - } - } else { - if isSlow { - lf = log.Warnv - } - } - lf(c, - log.KVString("method", req.Method), - log.KVString("ip", c.RemoteIP()), - log.KVString("user", caller), - log.KVString("path", path), - log.KVString("params", params.Encode()), - log.KVInt("ret", cerr.Code()), - log.KVString("msg", cerr.Message()), - log.KVString("stack", fmt.Sprintf("%+v", err)), - log.KVString("err", errmsg), - log.KVFloat64("timeout_quota", quota), - log.KVFloat64("ts", dt.Seconds()), - log.KVString("source", "http-access-log"), - ) - } -} diff --git a/pkg/net/http/blademaster/metadata.go b/pkg/net/http/blademaster/metadata.go deleted file mode 100644 index 5befddd5f..000000000 --- a/pkg/net/http/blademaster/metadata.go +++ /dev/null @@ -1,123 +0,0 @@ -package blademaster - -import ( - "fmt" - "net" - "net/http" - "strconv" - "strings" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/criticality" - "github.com/go-kratos/kratos/pkg/net/metadata" - - "github.com/pkg/errors" -) - -const ( - // http head - _httpHeaderUser = "x1-bmspy-user" - _httpHeaderTimeout = "x1-bmspy-timeout" - _httpHeaderRemoteIP = "x-backend-bm-real-ip" - _httpHeaderRemoteIPPort = "x-backend-bm-real-ipport" -) - -const ( - _httpHeaderMetadata = "x-bm-metadata-" -) - -var _parser = map[string]func(string) interface{}{ - "mirror": func(mirrorStr string) interface{} { - if mirrorStr == "" { - return false - } - val, err := strconv.ParseBool(mirrorStr) - if err != nil { - log.Warn("blademaster: failed to parse mirror: %+v", errors.Wrap(err, mirrorStr)) - return false - } - if !val { - log.Warn("blademaster: request mirrorStr value :%s is false", mirrorStr) - } - return val - }, - "criticality": func(in string) interface{} { - if crtl := criticality.Criticality(in); crtl != criticality.EmptyCriticality { - return string(crtl) - } - return string(criticality.Critical) - }, -} - -func parseMetadataTo(req *http.Request, to metadata.MD) { - for rawKey := range req.Header { - key := strings.ReplaceAll(strings.TrimPrefix(strings.ToLower(rawKey), _httpHeaderMetadata), "-", "_") - rawValue := req.Header.Get(rawKey) - var value interface{} = rawValue - parser, ok := _parser[key] - if ok { - value = parser(rawValue) - } - to[key] = value - } - return -} - -func setMetadata(req *http.Request, key string, value interface{}) { - strV, ok := value.(string) - if !ok { - return - } - header := fmt.Sprintf("%s%s", _httpHeaderMetadata, strings.ReplaceAll(key, "_", "-")) - req.Header.Set(header, strV) -} - -// setCaller set caller into http request. -func setCaller(req *http.Request) { - req.Header.Set(_httpHeaderUser, env.AppID) -} - -// setTimeout set timeout into http request. -func setTimeout(req *http.Request, timeout time.Duration) { - td := int64(timeout / time.Millisecond) - req.Header.Set(_httpHeaderTimeout, strconv.FormatInt(td, 10)) -} - -// timeout get timeout from http request. -func timeout(req *http.Request) time.Duration { - to := req.Header.Get(_httpHeaderTimeout) - timeout, err := strconv.ParseInt(to, 10, 64) - if err == nil && timeout > 20 { - timeout -= 20 // reduce 20ms every time. - } - return time.Duration(timeout) * time.Millisecond -} - -// remoteIP implements a best effort algorithm to return the real client IP, it parses -// x-backend-bm-real-ip or X-Real-IP or X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy. -// Use X-Forwarded-For before X-Real-Ip as nginx uses X-Real-Ip with the proxy's IP. -func remoteIP(req *http.Request) (remote string) { - if remote = req.Header.Get(_httpHeaderRemoteIP); remote != "" && remote != "null" { - return - } - var xff = req.Header.Get("X-Forwarded-For") - if idx := strings.IndexByte(xff, ','); idx > -1 { - if remote = strings.TrimSpace(xff[:idx]); remote != "" { - return - } - } - if remote = req.Header.Get("X-Real-IP"); remote != "" { - return - } - remote, _, _ = net.SplitHostPort(req.RemoteAddr) - return -} - -func remotePort(req *http.Request) (port string) { - if port = req.Header.Get(_httpHeaderRemoteIPPort); port != "" && port != "null" { - return - } - return -} diff --git a/pkg/net/http/blademaster/metrics.go b/pkg/net/http/blademaster/metrics.go deleted file mode 100644 index 87ee59167..000000000 --- a/pkg/net/http/blademaster/metrics.go +++ /dev/null @@ -1,48 +0,0 @@ -package blademaster - -import "github.com/go-kratos/kratos/pkg/stat/metric" - -const ( - clientNamespace = "http_client" - serverNamespace = "http_server" -) - -var ( - _metricServerReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: serverNamespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "http server requests duration(ms).", - Labels: []string{"path", "caller", "method"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000}, - }) - _metricServerReqCodeTotal = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: serverNamespace, - Subsystem: "requests", - Name: "code_total", - Help: "http server requests error count.", - Labels: []string{"path", "caller", "method", "code"}, - }) - _metricServerBBR = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: serverNamespace, - Subsystem: "", - Name: "bbr_total", - Help: "http server bbr total.", - Labels: []string{"url", "method"}, - }) - _metricClientReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: clientNamespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "http client requests duration(ms).", - Labels: []string{"path", "method"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000}, - }) - _metricClientReqCodeTotal = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: clientNamespace, - Subsystem: "requests", - Name: "code_total", - Help: "http client requests code count.", - Labels: []string{"path", "method", "code"}, - }) -) diff --git a/pkg/net/http/blademaster/perf.go b/pkg/net/http/blademaster/perf.go deleted file mode 100644 index a15c5aac0..000000000 --- a/pkg/net/http/blademaster/perf.go +++ /dev/null @@ -1,63 +0,0 @@ -package blademaster - -import ( - "flag" - "net/http" - "net/http/pprof" - "os" - "sync" - - "github.com/go-kratos/kratos/pkg/conf/dsn" - - "github.com/pkg/errors" -) - -var ( - _perfOnce sync.Once - _perfDSN string -) - -func init() { - v := os.Getenv("HTTP_PERF") - flag.StringVar(&_perfDSN, "http.perf", v, "listen http perf dsn, or use HTTP_PERF env variable.") -} - -func startPerf(engine *Engine) { - _perfOnce.Do(func() { - if os.Getenv("HTTP_PERF") == "" { - prefixRouter := engine.Group("/debug/pprof") - { - prefixRouter.GET("/", pprofHandler(pprof.Index)) - prefixRouter.GET("/cmdline", pprofHandler(pprof.Cmdline)) - prefixRouter.GET("/profile", pprofHandler(pprof.Profile)) - prefixRouter.POST("/symbol", pprofHandler(pprof.Symbol)) - prefixRouter.GET("/symbol", pprofHandler(pprof.Symbol)) - prefixRouter.GET("/trace", pprofHandler(pprof.Trace)) - prefixRouter.GET("/allocs", pprofHandler(pprof.Handler("allocs").ServeHTTP)) - prefixRouter.GET("/block", pprofHandler(pprof.Handler("block").ServeHTTP)) - prefixRouter.GET("/goroutine", pprofHandler(pprof.Handler("goroutine").ServeHTTP)) - prefixRouter.GET("/heap", pprofHandler(pprof.Handler("heap").ServeHTTP)) - prefixRouter.GET("/mutex", pprofHandler(pprof.Handler("mutex").ServeHTTP)) - prefixRouter.GET("/threadcreate", pprofHandler(pprof.Handler("threadcreate").ServeHTTP)) - } - return - } - - go func() { - d, err := dsn.Parse(_perfDSN) - if err != nil { - panic(errors.Errorf("blademaster: http perf dsn must be tcp://$host:port, %s:error(%v)", _perfDSN, err)) - } - if err := http.ListenAndServe(d.Host, nil); err != nil { - panic(errors.Errorf("blademaster: listen %s: error(%v)", d.Host, err)) - } - }() - }) -} - -func pprofHandler(h http.HandlerFunc) HandlerFunc { - handler := http.HandlerFunc(h) - return func(c *Context) { - handler.ServeHTTP(c.Writer, c.Request) - } -} diff --git a/pkg/net/http/blademaster/prometheus.go b/pkg/net/http/blademaster/prometheus.go deleted file mode 100644 index 68af2ee58..000000000 --- a/pkg/net/http/blademaster/prometheus.go +++ /dev/null @@ -1,12 +0,0 @@ -package blademaster - -import ( - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -func monitor() HandlerFunc { - return func(c *Context) { - h := promhttp.Handler() - h.ServeHTTP(c.Writer, c.Request) - } -} diff --git a/pkg/net/http/blademaster/ratelimit.go b/pkg/net/http/blademaster/ratelimit.go deleted file mode 100644 index e2347a46d..000000000 --- a/pkg/net/http/blademaster/ratelimit.go +++ /dev/null @@ -1,53 +0,0 @@ -package blademaster - -import ( - "fmt" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/log" - limit "github.com/go-kratos/kratos/pkg/ratelimit" - "github.com/go-kratos/kratos/pkg/ratelimit/bbr" -) - -// RateLimiter bbr middleware. -type RateLimiter struct { - group *bbr.Group - logTime int64 -} - -// NewRateLimiter return a ratelimit middleware. -func NewRateLimiter(conf *bbr.Config) (s *RateLimiter) { - return &RateLimiter{ - group: bbr.NewGroup(conf), - logTime: time.Now().UnixNano(), - } -} - -func (b *RateLimiter) printStats(routePath string, limiter limit.Limiter) { - now := time.Now().UnixNano() - if now-atomic.LoadInt64(&b.logTime) > int64(time.Second*3) { - atomic.StoreInt64(&b.logTime, now) - log.Info("http.bbr path:%s stat:%+v", routePath, limiter.(*bbr.BBR).Stat()) - } -} - -// Limit return a bm handler func. -func (b *RateLimiter) Limit() HandlerFunc { - return func(c *Context) { - uri := fmt.Sprintf("%s://%s%s", c.Request.URL.Scheme, c.Request.Host, c.Request.URL.Path) - limiter := b.group.Get(uri) - done, err := limiter.Allow(c) - if err != nil { - _metricServerBBR.Inc(uri, c.Request.Method) - c.JSON(nil, err) - c.Abort() - return - } - defer func() { - done(limit.DoneInfo{Op: limit.Success}) - b.printStats(uri, limiter) - }() - c.Next() - } -} diff --git a/pkg/net/http/blademaster/recovery.go b/pkg/net/http/blademaster/recovery.go deleted file mode 100644 index 239d496bb..000000000 --- a/pkg/net/http/blademaster/recovery.go +++ /dev/null @@ -1,32 +0,0 @@ -package blademaster - -import ( - "fmt" - "net/http/httputil" - "os" - "runtime" - - "github.com/go-kratos/kratos/pkg/log" -) - -// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one. -func Recovery() HandlerFunc { - return func(c *Context) { - defer func() { - var rawReq []byte - if err := recover(); err != nil { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - if c.Request != nil { - rawReq, _ = httputil.DumpRequest(c.Request, false) - } - pl := fmt.Sprintf("http call panic: %s\n%v\n%s\n", string(rawReq), err, buf) - fmt.Fprintf(os.Stderr, pl) - log.Error(pl) - c.AbortWithStatus(500) - } - }() - c.Next() - } -} diff --git a/pkg/net/http/blademaster/render/data.go b/pkg/net/http/blademaster/render/data.go deleted file mode 100644 index d602350b0..000000000 --- a/pkg/net/http/blademaster/render/data.go +++ /dev/null @@ -1,30 +0,0 @@ -package render - -import ( - "net/http" - - "github.com/pkg/errors" -) - -// Data common bytes struct. -type Data struct { - ContentType string - Data [][]byte -} - -// Render (Data) writes data with custom ContentType. -func (r Data) Render(w http.ResponseWriter) (err error) { - r.WriteContentType(w) - for _, d := range r.Data { - if _, err = w.Write(d); err != nil { - err = errors.WithStack(err) - return - } - } - return -} - -// WriteContentType writes data with custom ContentType. -func (r Data) WriteContentType(w http.ResponseWriter) { - writeContentType(w, []string{r.ContentType}) -} diff --git a/pkg/net/http/blademaster/render/json.go b/pkg/net/http/blademaster/render/json.go deleted file mode 100644 index 5a5f23bff..000000000 --- a/pkg/net/http/blademaster/render/json.go +++ /dev/null @@ -1,58 +0,0 @@ -package render - -import ( - "encoding/json" - "net/http" - - "github.com/pkg/errors" -) - -var jsonContentType = []string{"application/json; charset=utf-8"} - -// JSON common json struct. -type JSON struct { - Code int `json:"code"` - Message string `json:"message"` - TTL int `json:"ttl"` - Data interface{} `json:"data,omitempty"` -} - -func writeJSON(w http.ResponseWriter, obj interface{}) (err error) { - var jsonBytes []byte - writeContentType(w, jsonContentType) - if jsonBytes, err = json.Marshal(obj); err != nil { - err = errors.WithStack(err) - return - } - if _, err = w.Write(jsonBytes); err != nil { - err = errors.WithStack(err) - } - return -} - -// Render (JSON) writes data with json ContentType. -func (r JSON) Render(w http.ResponseWriter) error { - // FIXME(zhoujiahui): the TTL field will be configurable in the future - if r.TTL <= 0 { - r.TTL = 1 - } - return writeJSON(w, r) -} - -// WriteContentType write json ContentType. -func (r JSON) WriteContentType(w http.ResponseWriter) { - writeContentType(w, jsonContentType) -} - -// MapJSON common map json struct. -type MapJSON map[string]interface{} - -// Render (MapJSON) writes data with json ContentType. -func (m MapJSON) Render(w http.ResponseWriter) error { - return writeJSON(w, m) -} - -// WriteContentType write json ContentType. -func (m MapJSON) WriteContentType(w http.ResponseWriter) { - writeContentType(w, jsonContentType) -} diff --git a/pkg/net/http/blademaster/render/protobuf.go b/pkg/net/http/blademaster/render/protobuf.go deleted file mode 100644 index 4664f2b5f..000000000 --- a/pkg/net/http/blademaster/render/protobuf.go +++ /dev/null @@ -1,38 +0,0 @@ -package render - -import ( - "net/http" - - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" -) - -var pbContentType = []string{"application/x-protobuf"} - -// Render (PB) writes data with protobuf ContentType. -func (r PB) Render(w http.ResponseWriter) error { - if r.TTL <= 0 { - r.TTL = 1 - } - return writePB(w, r) -} - -// WriteContentType write protobuf ContentType. -func (r PB) WriteContentType(w http.ResponseWriter) { - writeContentType(w, pbContentType) -} - -func writePB(w http.ResponseWriter, obj PB) (err error) { - var pbBytes []byte - writeContentType(w, pbContentType) - - if pbBytes, err = proto.Marshal(&obj); err != nil { - err = errors.WithStack(err) - return - } - - if _, err = w.Write(pbBytes); err != nil { - err = errors.WithStack(err) - } - return -} diff --git a/pkg/net/http/blademaster/render/redirect.go b/pkg/net/http/blademaster/render/redirect.go deleted file mode 100644 index 73e516d65..000000000 --- a/pkg/net/http/blademaster/render/redirect.go +++ /dev/null @@ -1,26 +0,0 @@ -package render - -import ( - "net/http" - - "github.com/pkg/errors" -) - -// Redirect render for redirect to specified location. -type Redirect struct { - Code int - Request *http.Request - Location string -} - -// Render (Redirect) redirect to specified location. -func (r Redirect) Render(w http.ResponseWriter) error { - if (r.Code < 300 || r.Code > 308) && r.Code != 201 { - return errors.Errorf("Cannot redirect with status code %d", r.Code) - } - http.Redirect(w, r.Request, r.Location, r.Code) - return nil -} - -// WriteContentType noneContentType. -func (r Redirect) WriteContentType(http.ResponseWriter) {} diff --git a/pkg/net/http/blademaster/render/render.go b/pkg/net/http/blademaster/render/render.go deleted file mode 100644 index 1cb40d409..000000000 --- a/pkg/net/http/blademaster/render/render.go +++ /dev/null @@ -1,30 +0,0 @@ -package render - -import ( - "net/http" -) - -// Render http response render. -type Render interface { - // Render render it to http response writer. - Render(http.ResponseWriter) error - // WriteContentType write content-type to http response writer. - WriteContentType(w http.ResponseWriter) -} - -var ( - _ Render = JSON{} - _ Render = MapJSON{} - _ Render = XML{} - _ Render = String{} - _ Render = Redirect{} - _ Render = Data{} - _ Render = PB{} -) - -func writeContentType(w http.ResponseWriter, value []string) { - header := w.Header() - if val := header["Content-Type"]; len(val) == 0 { - header["Content-Type"] = value - } -} diff --git a/pkg/net/http/blademaster/render/render.pb.go b/pkg/net/http/blademaster/render/render.pb.go deleted file mode 100644 index bb5390e98..000000000 --- a/pkg/net/http/blademaster/render/render.pb.go +++ /dev/null @@ -1,89 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: pb.proto - -/* -Package render is a generated protocol buffer package. - -It is generated from these files: - pb.proto - -It has these top-level messages: - PB -*/ -package render - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/gogo/protobuf/types" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type PB struct { - Code int64 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` - Message string `protobuf:"bytes,2,opt,name=Message,proto3" json:"Message,omitempty"` - TTL uint64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - Data *google_protobuf.Any `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` -} - -func (m *PB) Reset() { *m = PB{} } -func (m *PB) String() string { return proto.CompactTextString(m) } -func (*PB) ProtoMessage() {} -func (*PB) Descriptor() ([]byte, []int) { return fileDescriptorPb, []int{0} } - -func (m *PB) GetCode() int64 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *PB) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *PB) GetTTL() uint64 { - if m != nil { - return m.TTL - } - return 0 -} - -func (m *PB) GetData() *google_protobuf.Any { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterType((*PB)(nil), "render.PB") -} - -func init() { proto.RegisterFile("pb.proto", fileDescriptorPb) } - -var fileDescriptorPb = []byte{ - // 154 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x28, 0x48, 0xd2, 0x2b, - 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0x4a, 0xcd, 0x4b, 0x49, 0x2d, 0x92, 0x92, 0x4c, 0xcf, - 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26, 0x95, 0xa6, 0xe9, 0x27, 0xe6, 0x55, 0x42, 0x94, - 0x28, 0xe5, 0x71, 0x31, 0x05, 0x38, 0x09, 0x09, 0x71, 0xb1, 0x38, 0xe7, 0xa7, 0xa4, 0x4a, 0x30, - 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x81, 0xd9, 0x42, 0x12, 0x5c, 0xec, 0xbe, 0xa9, 0xc5, 0xc5, 0x89, - 0xe9, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0x90, 0x00, 0x17, 0x73, 0x48, - 0x88, 0x8f, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4b, 0x10, 0x88, 0x29, 0xa4, 0xc1, 0xc5, 0xe2, 0x92, - 0x58, 0x92, 0x28, 0xc1, 0xa2, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xa2, 0x07, 0xb1, 0x4f, 0x0f, 0x66, - 0x9f, 0x9e, 0x63, 0x5e, 0x65, 0x10, 0x58, 0x45, 0x12, 0x1b, 0x58, 0xcc, 0x18, 0x10, 0x00, 0x00, - 0xff, 0xff, 0x7a, 0x92, 0x16, 0x71, 0xa5, 0x00, 0x00, 0x00, -} diff --git a/pkg/net/http/blademaster/render/render.proto b/pkg/net/http/blademaster/render/render.proto deleted file mode 100644 index e3f53870f..000000000 --- a/pkg/net/http/blademaster/render/render.proto +++ /dev/null @@ -1,14 +0,0 @@ -// use under command to generate pb.pb.go -// protoc --proto_path=.:$GOPATH/src/github.com/gogo/protobuf --gogo_out=Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. *.proto -syntax = "proto3"; -package render; - -import "google/protobuf/any.proto"; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -message PB { - int64 Code = 1; - string Message = 2; - uint64 TTL = 3; - google.protobuf.Any Data = 4; -} \ No newline at end of file diff --git a/pkg/net/http/blademaster/render/string.go b/pkg/net/http/blademaster/render/string.go deleted file mode 100644 index 4112b5713..000000000 --- a/pkg/net/http/blademaster/render/string.go +++ /dev/null @@ -1,40 +0,0 @@ -package render - -import ( - "fmt" - "io" - "net/http" - - "github.com/pkg/errors" -) - -var plainContentType = []string{"text/plain; charset=utf-8"} - -// String common string struct. -type String struct { - Format string - Data []interface{} -} - -// Render (String) writes data with custom ContentType. -func (r String) Render(w http.ResponseWriter) error { - return writeString(w, r.Format, r.Data) -} - -// WriteContentType writes string with text/plain ContentType. -func (r String) WriteContentType(w http.ResponseWriter) { - writeContentType(w, plainContentType) -} - -func writeString(w http.ResponseWriter, format string, data []interface{}) (err error) { - writeContentType(w, plainContentType) - if len(data) > 0 { - _, err = fmt.Fprintf(w, format, data...) - } else { - _, err = io.WriteString(w, format) - } - if err != nil { - err = errors.WithStack(err) - } - return -} diff --git a/pkg/net/http/blademaster/render/xml.go b/pkg/net/http/blademaster/render/xml.go deleted file mode 100644 index 8837c582c..000000000 --- a/pkg/net/http/blademaster/render/xml.go +++ /dev/null @@ -1,31 +0,0 @@ -package render - -import ( - "encoding/xml" - "net/http" - - "github.com/pkg/errors" -) - -// XML common xml struct. -type XML struct { - Code int - Message string - Data interface{} -} - -var xmlContentType = []string{"application/xml; charset=utf-8"} - -// Render (XML) writes data with xml ContentType. -func (r XML) Render(w http.ResponseWriter) (err error) { - r.WriteContentType(w) - if err = xml.NewEncoder(w).Encode(r.Data); err != nil { - err = errors.WithStack(err) - } - return -} - -// WriteContentType write xml ContentType. -func (r XML) WriteContentType(w http.ResponseWriter) { - writeContentType(w, xmlContentType) -} diff --git a/pkg/net/http/blademaster/routergroup.go b/pkg/net/http/blademaster/routergroup.go deleted file mode 100644 index 281796b12..000000000 --- a/pkg/net/http/blademaster/routergroup.go +++ /dev/null @@ -1,191 +0,0 @@ -package blademaster - -import ( - "regexp" -) - -// IRouter http router framework interface. -type IRouter interface { - IRoutes - Group(string, ...HandlerFunc) *RouterGroup -} - -// IRoutes http router interface. -type IRoutes interface { - UseFunc(...HandlerFunc) IRoutes - Use(...Handler) IRoutes - - Handle(string, string, ...HandlerFunc) IRoutes - HEAD(string, ...HandlerFunc) IRoutes - GET(string, ...HandlerFunc) IRoutes - POST(string, ...HandlerFunc) IRoutes - PUT(string, ...HandlerFunc) IRoutes - DELETE(string, ...HandlerFunc) IRoutes -} - -// RouterGroup is used internally to configure router, a RouterGroup is associated with a prefix -// and an array of handlers (middleware). -type RouterGroup struct { - Handlers []HandlerFunc - basePath string - engine *Engine - root bool - baseConfig *MethodConfig -} - -var _ IRouter = &RouterGroup{} - -// Use adds middleware to the group, see example code in doc. -func (group *RouterGroup) Use(middleware ...Handler) IRoutes { - for _, m := range middleware { - group.Handlers = append(group.Handlers, m.ServeHTTP) - } - return group.returnObj() -} - -// UseFunc adds middleware to the group, see example code in doc. -func (group *RouterGroup) UseFunc(middleware ...HandlerFunc) IRoutes { - group.Handlers = append(group.Handlers, middleware...) - return group.returnObj() -} - -// Group creates a new router group. You should add all the routes that have common middlwares or the same path prefix. -// For example, all the routes that use a common middlware for authorization could be grouped. -func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup { - return &RouterGroup{ - Handlers: group.combineHandlers(handlers), - basePath: group.calculateAbsolutePath(relativePath), - engine: group.engine, - root: false, - } -} - -// SetMethodConfig is used to set config on specified method -func (group *RouterGroup) SetMethodConfig(config *MethodConfig) *RouterGroup { - group.baseConfig = config - return group -} - -// BasePath router group base path. -func (group *RouterGroup) BasePath() string { - return group.basePath -} - -func (group *RouterGroup) handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes { - absolutePath := group.calculateAbsolutePath(relativePath) - injections := group.injections(relativePath) - handlers = group.combineHandlers(injections, handlers) - group.engine.addRoute(httpMethod, absolutePath, handlers...) - if group.baseConfig != nil { - group.engine.SetMethodConfig(absolutePath, group.baseConfig) - } - return group.returnObj() -} - -// Handle registers a new request handle and middleware with the given path and method. -// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes. -// See the example code in doc. -// -// For HEAD, GET, POST, PUT, and DELETE requests the respective shortcut -// functions can be used. -// -// This function is intended for bulk loading and to allow the usage of less -// frequently used, non-standardized or custom methods (e.g. for internal -// communication with a proxy). -func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes { - if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil { - panic("http method " + httpMethod + " is not valid") - } - return group.handle(httpMethod, relativePath, handlers...) -} - -// GET is a shortcut for router.Handle("GET", path, handle). -func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("GET", relativePath, handlers...) -} - -// POST is a shortcut for router.Handle("POST", path, handle). -func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("POST", relativePath, handlers...) -} - -// PUT is a shortcut for router.Handle("PUT", path, handle). -func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("PUT", relativePath, handlers...) -} - -// DELETE is a shortcut for router.Handle("DELETE", path, handle). -func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("DELETE", relativePath, handlers...) -} - -// PATCH is a shortcut for router.Handle("PATCH", path, handle). -func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("PATCH", relativePath, handlers...) -} - -// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle). -func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("OPTIONS", relativePath, handlers...) -} - -// HEAD is a shortcut for router.Handle("HEAD", path, handle). -func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("HEAD", relativePath, handlers...) -} - -func (group *RouterGroup) combineHandlers(handlerGroups ...[]HandlerFunc) []HandlerFunc { - finalSize := len(group.Handlers) - for _, handlers := range handlerGroups { - finalSize += len(handlers) - } - if finalSize >= int(_abortIndex) { - panic("too many handlers") - } - mergedHandlers := make([]HandlerFunc, finalSize) - copy(mergedHandlers, group.Handlers) - position := len(group.Handlers) - for _, handlers := range handlerGroups { - copy(mergedHandlers[position:], handlers) - position += len(handlers) - } - return mergedHandlers -} - -func (group *RouterGroup) calculateAbsolutePath(relativePath string) string { - return joinPaths(group.basePath, relativePath) -} - -func (group *RouterGroup) returnObj() IRoutes { - if group.root { - return group.engine - } - return group -} - -// injections is -func (group *RouterGroup) injections(relativePath string) []HandlerFunc { - absPath := group.calculateAbsolutePath(relativePath) - for _, injection := range group.engine.injections { - if !injection.pattern.MatchString(absPath) { - continue - } - return injection.handlers - } - return nil -} - -// Any registers a route that matches all the HTTP methods. -// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE. -func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes { - group.handle("GET", relativePath, handlers...) - group.handle("POST", relativePath, handlers...) - group.handle("PUT", relativePath, handlers...) - group.handle("PATCH", relativePath, handlers...) - group.handle("HEAD", relativePath, handlers...) - group.handle("OPTIONS", relativePath, handlers...) - group.handle("DELETE", relativePath, handlers...) - group.handle("CONNECT", relativePath, handlers...) - group.handle("TRACE", relativePath, handlers...) - return group.returnObj() -} diff --git a/pkg/net/http/blademaster/server.go b/pkg/net/http/blademaster/server.go deleted file mode 100644 index 63187306a..000000000 --- a/pkg/net/http/blademaster/server.go +++ /dev/null @@ -1,517 +0,0 @@ -package blademaster - -import ( - "context" - "flag" - "fmt" - "net" - "net/http" - "os" - "regexp" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/conf/dsn" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/criticality" - "github.com/go-kratos/kratos/pkg/net/ip" - "github.com/go-kratos/kratos/pkg/net/metadata" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/pkg/errors" -) - -const ( - defaultMaxMemory = 32 << 20 // 32 MB -) - -var ( - _ IRouter = &Engine{} - - _httpDSN string - default405Body = []byte("405 method not allowed") - default404Body = []byte("404 page not found") -) - -func init() { - addFlag(flag.CommandLine) -} - -func addFlag(fs *flag.FlagSet) { - v := os.Getenv("HTTP") - if v == "" { - v = "tcp://0.0.0.0:8000/?timeout=1s" - } - fs.StringVar(&_httpDSN, "http", v, "listen http dsn, or use HTTP env variable.") -} - -func parseDSN(rawdsn string) *ServerConfig { - conf := new(ServerConfig) - d, err := dsn.Parse(rawdsn) - if err != nil { - panic(errors.Wrapf(err, "blademaster: invalid dsn: %s", rawdsn)) - } - if _, err = d.Bind(conf); err != nil { - panic(errors.Wrapf(err, "blademaster: invalid dsn: %s", rawdsn)) - } - return conf -} - -// Handler responds to an HTTP request. -type Handler interface { - ServeHTTP(c *Context) -} - -// HandlerFunc http request handler function. -type HandlerFunc func(*Context) - -// ServeHTTP calls f(ctx). -func (f HandlerFunc) ServeHTTP(c *Context) { - f(c) -} - -// ServerConfig is the bm server config model -type ServerConfig struct { - Network string `dsn:"network"` - Addr string `dsn:"address"` - Timeout xtime.Duration `dsn:"query.timeout"` - ReadTimeout xtime.Duration `dsn:"query.readTimeout"` - WriteTimeout xtime.Duration `dsn:"query.writeTimeout"` -} - -// MethodConfig is -type MethodConfig struct { - Timeout xtime.Duration -} - -// Start listen and serve bm engine by given DSN. -func (engine *Engine) Start() error { - conf := engine.conf - l, err := net.Listen(conf.Network, conf.Addr) - if err != nil { - return errors.Wrapf(err, "blademaster: listen tcp: %s", conf.Addr) - } - - log.Info("blademaster: start http listen addr: %s", l.Addr().String()) - server := &http.Server{ - ReadTimeout: time.Duration(conf.ReadTimeout), - WriteTimeout: time.Duration(conf.WriteTimeout), - } - go func() { - if err := engine.RunServer(server, l); err != nil { - if errors.Cause(err) == http.ErrServerClosed { - log.Info("blademaster: server closed") - return - } - panic(errors.Wrapf(err, "blademaster: engine.ListenServer(%+v, %+v)", server, l)) - } - }() - - return nil -} - -// Engine is the framework's instance, it contains the muxer, middleware and configuration settings. -// Create an instance of Engine, by using New() or Default() -type Engine struct { - RouterGroup - - lock sync.RWMutex - conf *ServerConfig - - address string - - trees methodTrees - server atomic.Value // store *http.Server - metastore map[string]map[string]interface{} // metastore is the path as key and the metadata of this path as value, it export via /metadata - - pcLock sync.RWMutex - methodConfigs map[string]*MethodConfig - - injections []injection - - // If enabled, the url.RawPath will be used to find parameters. - UseRawPath bool - - // If true, the path value will be unescaped. - // If UseRawPath is false (by default), the UnescapePathValues effectively is true, - // as url.Path gonna be used, which is already unescaped. - UnescapePathValues bool - - // If enabled, the router checks if another method is allowed for the - // current route, if the current request can not be routed. - // If this is the case, the request is answered with 'Method Not Allowed' - // and HTTP status code 405. - // If no other Method is allowed, the request is delegated to the NotFound - // handler. - HandleMethodNotAllowed bool - - allNoRoute []HandlerFunc - allNoMethod []HandlerFunc - noRoute []HandlerFunc - noMethod []HandlerFunc - - pool sync.Pool -} - -type injection struct { - pattern *regexp.Regexp - handlers []HandlerFunc -} - -// NewServer returns a new blank Engine instance without any middleware attached. -func NewServer(conf *ServerConfig) *Engine { - if conf == nil { - if !flag.Parsed() { - fmt.Fprint(os.Stderr, "[blademaster] please call flag.Parse() before Init blademaster server, some configure may not effect.\n") - } - conf = parseDSN(_httpDSN) - } - engine := &Engine{ - RouterGroup: RouterGroup{ - Handlers: nil, - basePath: "/", - root: true, - }, - address: ip.InternalIP(), - trees: make(methodTrees, 0, 9), - metastore: make(map[string]map[string]interface{}), - methodConfigs: make(map[string]*MethodConfig), - HandleMethodNotAllowed: true, - injections: make([]injection, 0), - } - if err := engine.SetConfig(conf); err != nil { - panic(err) - } - engine.pool.New = func() interface{} { - return engine.newContext() - } - engine.RouterGroup.engine = engine - // NOTE add prometheus monitor location - engine.addRoute("GET", "/metrics", monitor()) - engine.addRoute("GET", "/metadata", engine.metadata()) - engine.NoRoute(func(c *Context) { - c.Bytes(404, "text/plain", default404Body) - c.Abort() - }) - engine.NoMethod(func(c *Context) { - c.Bytes(405, "text/plain", []byte(http.StatusText(405))) - c.Abort() - }) - startPerf(engine) - return engine -} - -// SetMethodConfig is used to set config on specified path -func (engine *Engine) SetMethodConfig(path string, mc *MethodConfig) { - engine.pcLock.Lock() - engine.methodConfigs[path] = mc - engine.pcLock.Unlock() -} - -// DefaultServer returns an Engine instance with the Recovery and Logger middleware already attached. -func DefaultServer(conf *ServerConfig) *Engine { - engine := NewServer(conf) - engine.Use(Recovery(), Trace(), Logger()) - return engine -} - -func (engine *Engine) addRoute(method, path string, handlers ...HandlerFunc) { - if path[0] != '/' { - panic("blademaster: path must begin with '/'") - } - if method == "" { - panic("blademaster: HTTP method can not be empty") - } - if len(handlers) == 0 { - panic("blademaster: there must be at least one handler") - } - if _, ok := engine.metastore[path]; !ok { - engine.metastore[path] = make(map[string]interface{}) - } - engine.metastore[path]["method"] = method - root := engine.trees.get(method) - if root == nil { - root = new(node) - engine.trees = append(engine.trees, methodTree{method: method, root: root}) - } - - prelude := func(c *Context) { - c.method = method - c.RoutePath = path - } - handlers = append([]HandlerFunc{prelude}, handlers...) - root.addRoute(path, handlers) -} - -func (engine *Engine) prepareHandler(c *Context) { - httpMethod := c.Request.Method - rPath := c.Request.URL.Path - unescape := false - if engine.UseRawPath && len(c.Request.URL.EscapedPath()) > 0 { - rPath = c.Request.URL.EscapedPath() - unescape = engine.UnescapePathValues - } - rPath = cleanPath(rPath) - - // Find root of the tree for the given HTTP method - t := engine.trees - for i, tl := 0, len(t); i < tl; i++ { - if t[i].method != httpMethod { - continue - } - root := t[i].root - // Find route in tree - handlers, params, _ := root.getValue(rPath, c.Params, unescape) - if handlers != nil { - c.handlers = handlers - c.Params = params - return - } - break - } - - if engine.HandleMethodNotAllowed { - for _, tree := range engine.trees { - if tree.method == httpMethod { - continue - } - if handlers, _, _ := tree.root.getValue(rPath, nil, unescape); handlers != nil { - c.handlers = engine.allNoMethod - return - } - } - } - c.handlers = engine.allNoRoute - return -} - -func (engine *Engine) handleContext(c *Context) { - var cancel func() - req := c.Request - ctype := req.Header.Get("Content-Type") - switch { - case strings.Contains(ctype, "multipart/form-data"): - req.ParseMultipartForm(defaultMaxMemory) - default: - req.ParseForm() - } - // get derived timeout from http request header, - // compare with the engine configured, - // and use the minimum one - engine.lock.RLock() - tm := time.Duration(engine.conf.Timeout) - engine.lock.RUnlock() - // the method config is preferred - if pc := engine.methodConfig(c.Request.URL.Path); pc != nil { - tm = time.Duration(pc.Timeout) - } - if ctm := timeout(req); ctm > 0 && tm > ctm { - tm = ctm - } - md := metadata.MD{ - metadata.RemoteIP: remoteIP(req), - metadata.RemotePort: remotePort(req), - metadata.Criticality: string(criticality.Critical), - } - parseMetadataTo(req, md) - ctx := metadata.NewContext(context.Background(), md) - if tm > 0 { - c.Context, cancel = context.WithTimeout(ctx, tm) - } else { - c.Context, cancel = context.WithCancel(ctx) - } - defer cancel() - engine.prepareHandler(c) - c.Next() -} - -// SetConfig is used to set the engine configuration. -// Only the valid config will be loaded. -func (engine *Engine) SetConfig(conf *ServerConfig) (err error) { - if conf.Timeout <= 0 { - return errors.New("blademaster: config timeout must greater than 0") - } - if conf.Network == "" { - conf.Network = "tcp" - } - engine.lock.Lock() - engine.conf = conf - engine.lock.Unlock() - return -} - -func (engine *Engine) methodConfig(path string) *MethodConfig { - engine.pcLock.RLock() - mc := engine.methodConfigs[path] - engine.pcLock.RUnlock() - return mc -} - -// Router return a http.Handler for using http.ListenAndServe() directly. -func (engine *Engine) Router() http.Handler { - return engine -} - -// Server is used to load stored http server. -func (engine *Engine) Server() *http.Server { - s, ok := engine.server.Load().(*http.Server) - if !ok { - return nil - } - return s -} - -// Shutdown the http server without interrupting active connections. -func (engine *Engine) Shutdown(ctx context.Context) error { - server := engine.Server() - if server == nil { - return errors.New("blademaster: no server") - } - return errors.WithStack(server.Shutdown(ctx)) -} - -// UseFunc attachs a global middleware to the router. ie. the middleware attached though UseFunc() will be -// included in the handlers chain for every single request. Even 404, 405, static files... -// For example, this is the right place for a logger or error management middleware. -func (engine *Engine) UseFunc(middleware ...HandlerFunc) IRoutes { - engine.RouterGroup.UseFunc(middleware...) - engine.rebuild404Handlers() - engine.rebuild405Handlers() - return engine -} - -// Use attachs a global middleware to the router. ie. the middleware attached though Use() will be -// included in the handlers chain for every single request. Even 404, 405, static files... -// For example, this is the right place for a logger or error management middleware. -func (engine *Engine) Use(middleware ...Handler) IRoutes { - engine.RouterGroup.Use(middleware...) - engine.rebuild404Handlers() - engine.rebuild405Handlers() - return engine -} - -// Ping is used to set the general HTTP ping handler. -func (engine *Engine) Ping(handler HandlerFunc) { - engine.GET("/ping", handler) -} - -// Register is used to export metadata to discovery. -func (engine *Engine) Register(handler HandlerFunc) { - engine.GET("/register", handler) -} - -// Run attaches the router to a http.Server and starts listening and serving HTTP requests. -// It is a shortcut for http.ListenAndServe(addr, router) -// Note: this method will block the calling goroutine indefinitely unless an error happens. -func (engine *Engine) Run(addr ...string) (err error) { - address := resolveAddress(addr) - server := &http.Server{ - Addr: address, - Handler: engine, - } - engine.server.Store(server) - if err = server.ListenAndServe(); err != nil { - err = errors.Wrapf(err, "addrs: %v", addr) - } - return -} - -// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests. -// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router) -// Note: this method will block the calling goroutine indefinitely unless an error happens. -func (engine *Engine) RunTLS(addr, certFile, keyFile string) (err error) { - server := &http.Server{ - Addr: addr, - Handler: engine, - } - engine.server.Store(server) - if err = server.ListenAndServeTLS(certFile, keyFile); err != nil { - err = errors.Wrapf(err, "tls: %s/%s:%s", addr, certFile, keyFile) - } - return -} - -// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests -// through the specified unix socket (ie. a file). -// Note: this method will block the calling goroutine indefinitely unless an error happens. -func (engine *Engine) RunUnix(file string) (err error) { - os.Remove(file) - listener, err := net.Listen("unix", file) - if err != nil { - err = errors.Wrapf(err, "unix: %s", file) - return - } - defer listener.Close() - server := &http.Server{ - Handler: engine, - } - engine.server.Store(server) - if err = server.Serve(listener); err != nil { - err = errors.Wrapf(err, "unix: %s", file) - } - return -} - -// RunServer will serve and start listening HTTP requests by given server and listener. -// Note: this method will block the calling goroutine indefinitely unless an error happens. -func (engine *Engine) RunServer(server *http.Server, l net.Listener) (err error) { - server.Handler = engine - engine.server.Store(server) - if err = server.Serve(l); err != nil { - err = errors.Wrapf(err, "listen server: %+v/%+v", server, l) - return - } - return -} - -func (engine *Engine) metadata() HandlerFunc { - return func(c *Context) { - c.JSON(engine.metastore, nil) - } -} - -// Inject is -func (engine *Engine) Inject(pattern string, handlers ...HandlerFunc) { - engine.injections = append(engine.injections, injection{ - pattern: regexp.MustCompile(pattern), - handlers: handlers, - }) -} - -// ServeHTTP conforms to the http.Handler interface. -func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) { - c := engine.pool.Get().(*Context) - c.Request = req - c.Writer = w - c.reset() - - engine.handleContext(c) - engine.pool.Put(c) -} - -//newContext for sync.pool -func (engine *Engine) newContext() *Context { - return &Context{engine: engine} -} - -// NoRoute adds handlers for NoRoute. It return a 404 code by default. -func (engine *Engine) NoRoute(handlers ...HandlerFunc) { - engine.noRoute = handlers - engine.rebuild404Handlers() -} - -// NoMethod sets the handlers called when... TODO. -func (engine *Engine) NoMethod(handlers ...HandlerFunc) { - engine.noMethod = handlers - engine.rebuild405Handlers() -} - -func (engine *Engine) rebuild404Handlers() { - engine.allNoRoute = engine.combineHandlers(engine.noRoute) -} - -func (engine *Engine) rebuild405Handlers() { - engine.allNoMethod = engine.combineHandlers(engine.noMethod) -} diff --git a/pkg/net/http/blademaster/server_test.go b/pkg/net/http/blademaster/server_test.go deleted file mode 100644 index 60343a52d..000000000 --- a/pkg/net/http/blademaster/server_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package blademaster - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "sync" - "sync/atomic" - "testing" - "time" - - criticalityPkg "github.com/go-kratos/kratos/pkg/net/criticality" - "github.com/go-kratos/kratos/pkg/net/metadata" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/stretchr/testify/assert" -) - -var ( - sonce sync.Once - - curEngine atomic.Value -) - -func uri(base, path string) string { - return fmt.Sprintf("%s://%s%s", "http", base, path) -} - -func shutdown() { - if err := curEngine.Load().(*Engine).Shutdown(context.TODO()); err != nil { - panic(err) - } -} - -func setupHandler(engine *Engine) { - // set the global timeout is 2 second - engine.conf.Timeout = xtime.Duration(time.Second * 2) - - engine.Ping(func(ctx *Context) { - ctx.AbortWithStatus(200) - }) - - engine.GET("/criticality/api", Criticality(criticalityPkg.Critical), func(ctx *Context) { - ctx.String(200, "%s", metadata.String(ctx, metadata.Criticality)) - }) - engine.GET("/criticality/none/api", func(ctx *Context) { - ctx.String(200, "%s", metadata.String(ctx, metadata.Criticality)) - }) -} - -func startServer(addr string) { - e := DefaultServer(nil) - setupHandler(e) - go e.Run(addr) - curEngine.Store(e) - time.Sleep(time.Second) -} - -func TestCriticality(t *testing.T) { - addr := "localhost:18001" - startServer(addr) - defer shutdown() - - tests := []*struct { - path string - crtl criticalityPkg.Criticality - expected criticalityPkg.Criticality - }{ - { - "/criticality/api", - criticalityPkg.EmptyCriticality, - criticalityPkg.Critical, - }, - { - "/criticality/api", - criticalityPkg.CriticalPlus, - criticalityPkg.Critical, - }, - { - "/criticality/api", - criticalityPkg.SheddablePlus, - criticalityPkg.Critical, - }, - } - client := &http.Client{} - for _, testCase := range tests { - req, err := http.NewRequest("GET", uri(addr, testCase.path), nil) - assert.NoError(t, err) - req.Header.Set("x-bm-metadata-criticality", string(testCase.crtl)) - resp, err := client.Do(req) - assert.NoError(t, err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, testCase.expected, criticalityPkg.Criticality(body)) - } -} - -func TestNoneCriticality(t *testing.T) { - addr := "localhost:18002" - startServer(addr) - defer shutdown() - - tests := []*struct { - path string - crtl criticalityPkg.Criticality - expected criticalityPkg.Criticality - }{ - { - "/criticality/none/api", - criticalityPkg.EmptyCriticality, - criticalityPkg.Critical, - }, - { - "/criticality/none/api", - criticalityPkg.CriticalPlus, - criticalityPkg.CriticalPlus, - }, - { - "/criticality/none/api", - criticalityPkg.SheddablePlus, - criticalityPkg.SheddablePlus, - }, - } - client := &http.Client{} - for _, testCase := range tests { - req, err := http.NewRequest("GET", uri(addr, testCase.path), nil) - assert.NoError(t, err) - req.Header.Set("x-bm-metadata-criticality", string(testCase.crtl)) - resp, err := client.Do(req) - assert.NoError(t, err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, testCase.expected, criticalityPkg.Criticality(body)) - } -} diff --git a/pkg/net/http/blademaster/trace.go b/pkg/net/http/blademaster/trace.go deleted file mode 100644 index b4625dadb..000000000 --- a/pkg/net/http/blademaster/trace.go +++ /dev/null @@ -1,229 +0,0 @@ -package blademaster - -import ( - "io" - "net/http" - "net/http/httptrace" - "strconv" - - "github.com/go-kratos/kratos/pkg/net/metadata" - "github.com/go-kratos/kratos/pkg/net/trace" -) - -const _defaultComponentName = "net/http" - -// Trace is trace middleware -func Trace() HandlerFunc { - return func(c *Context) { - // handle http request - // get derived trace from http request header - t, err := trace.Extract(trace.HTTPFormat, c.Request.Header) - if err != nil { - var opts []trace.Option - if ok, _ := strconv.ParseBool(trace.KratosTraceDebug); ok { - opts = append(opts, trace.EnableDebug()) - } - t = trace.New(c.Request.URL.Path, opts...) - } - t.SetTitle(c.Request.URL.Path) - t.SetTag(trace.String(trace.TagComponent, _defaultComponentName)) - t.SetTag(trace.String(trace.TagHTTPMethod, c.Request.Method)) - t.SetTag(trace.String(trace.TagHTTPURL, c.Request.URL.String())) - t.SetTag(trace.String(trace.TagSpanKind, "server")) - // business tag - t.SetTag(trace.String("caller", metadata.String(c.Context, metadata.Caller))) - // export trace id to user. - c.Writer.Header().Set(trace.KratosTraceID, t.TraceID()) - c.Context = trace.NewContext(c.Context, t) - c.Next() - t.Finish(&c.Error) - } -} - -type closeTracker struct { - io.ReadCloser - tr trace.Trace -} - -func (c *closeTracker) Close() error { - err := c.ReadCloser.Close() - c.tr.SetLog(trace.Log(trace.LogEvent, "ClosedBody")) - c.tr.Finish(&err) - return err -} - -// NewTraceTransport NewTraceTransport -func NewTraceTransport(rt http.RoundTripper, peerService string, internalTags ...trace.Tag) *TraceTransport { - return &TraceTransport{RoundTripper: rt, peerService: peerService, internalTags: internalTags} -} - -// TraceTransport wraps a RoundTripper. If a request is being traced with -// Tracer, Transport will inject the current span into the headers, -// and set HTTP related tags on the span. -type TraceTransport struct { - peerService string - internalTags []trace.Tag - // The actual RoundTripper to use for the request. A nil - // RoundTripper defaults to http.DefaultTransport. - http.RoundTripper -} - -// RoundTrip implements the RoundTripper interface -func (t *TraceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.RoundTripper - if rt == nil { - rt = http.DefaultTransport - } - tr, ok := trace.FromContext(req.Context()) - if !ok { - return rt.RoundTrip(req) - } - operationName := "HTTP:" + req.Method - // fork new trace - tr = tr.Fork("", operationName) - - tr.SetTag(trace.TagString(trace.TagComponent, _defaultComponentName)) - tr.SetTag(trace.TagString(trace.TagHTTPMethod, req.Method)) - tr.SetTag(trace.TagString(trace.TagHTTPURL, req.URL.String())) - tr.SetTag(trace.TagString(trace.TagSpanKind, "client")) - if t.peerService != "" { - tr.SetTag(trace.TagString(trace.TagPeerService, t.peerService)) - } - tr.SetTag(t.internalTags...) - - // inject trace to http header - trace.Inject(tr, trace.HTTPFormat, req.Header) - - // FIXME: uncomment after trace sdk is goroutinue safe - // ct := clientTracer{tr: tr} - // req = req.WithContext(httptrace.WithClientTrace(req.Context(), ct.clientTrace())) - resp, err := rt.RoundTrip(req) - - if err != nil { - tr.SetTag(trace.TagBool(trace.TagError, true)) - tr.Finish(&err) - return resp, err - } - - // TODO: get ecode - tr.SetTag(trace.TagInt64(trace.TagHTTPStatusCode, int64(resp.StatusCode))) - - if req.Method == "HEAD" { - tr.Finish(nil) - } else { - resp.Body = &closeTracker{resp.Body, tr} - } - return resp, err -} - -type clientTracer struct { - tr trace.Trace -} - -func (h *clientTracer) clientTrace() *httptrace.ClientTrace { - return &httptrace.ClientTrace{ - GetConn: h.getConn, - GotConn: h.gotConn, - PutIdleConn: h.putIdleConn, - GotFirstResponseByte: h.gotFirstResponseByte, - Got100Continue: h.got100Continue, - DNSStart: h.dnsStart, - DNSDone: h.dnsDone, - ConnectStart: h.connectStart, - ConnectDone: h.connectDone, - WroteHeaders: h.wroteHeaders, - Wait100Continue: h.wait100Continue, - WroteRequest: h.wroteRequest, - } -} - -func (h *clientTracer) getConn(hostPort string) { - // ext.HTTPUrl.Set(h.sp, hostPort) - h.tr.SetLog(trace.Log(trace.LogEvent, "GetConn")) -} - -func (h *clientTracer) gotConn(info httptrace.GotConnInfo) { - h.tr.SetTag(trace.TagBool("net/http.reused", info.Reused)) - h.tr.SetTag(trace.TagBool("net/http.was_idle", info.WasIdle)) - h.tr.SetLog(trace.Log(trace.LogEvent, "GotConn")) -} - -func (h *clientTracer) putIdleConn(error) { - h.tr.SetLog(trace.Log(trace.LogEvent, "PutIdleConn")) -} - -func (h *clientTracer) gotFirstResponseByte() { - h.tr.SetLog(trace.Log(trace.LogEvent, "GotFirstResponseByte")) -} - -func (h *clientTracer) got100Continue() { - h.tr.SetLog(trace.Log(trace.LogEvent, "Got100Continue")) -} - -func (h *clientTracer) dnsStart(info httptrace.DNSStartInfo) { - h.tr.SetLog( - trace.Log(trace.LogEvent, "DNSStart"), - trace.Log("host", info.Host), - ) -} - -func (h *clientTracer) dnsDone(info httptrace.DNSDoneInfo) { - fields := []trace.LogField{trace.Log(trace.LogEvent, "DNSDone")} - for _, addr := range info.Addrs { - fields = append(fields, trace.Log("addr", addr.String())) - } - if info.Err != nil { - // TODO: support log error object - fields = append(fields, trace.Log(trace.LogErrorObject, info.Err.Error())) - } - h.tr.SetLog(fields...) -} - -func (h *clientTracer) connectStart(network, addr string) { - h.tr.SetLog( - trace.Log(trace.LogEvent, "ConnectStart"), - trace.Log("network", network), - trace.Log("addr", addr), - ) -} - -func (h *clientTracer) connectDone(network, addr string, err error) { - if err != nil { - h.tr.SetLog( - trace.Log("message", "ConnectDone"), - trace.Log("network", network), - trace.Log("addr", addr), - trace.Log(trace.LogEvent, "error"), - // TODO: support log error object - trace.Log(trace.LogErrorObject, err.Error()), - ) - } else { - h.tr.SetLog( - trace.Log(trace.LogEvent, "ConnectDone"), - trace.Log("network", network), - trace.Log("addr", addr), - ) - } -} - -func (h *clientTracer) wroteHeaders() { - h.tr.SetLog(trace.Log("event", "WroteHeaders")) -} - -func (h *clientTracer) wait100Continue() { - h.tr.SetLog(trace.Log("event", "Wait100Continue")) -} - -func (h *clientTracer) wroteRequest(info httptrace.WroteRequestInfo) { - if info.Err != nil { - h.tr.SetLog( - trace.Log("message", "WroteRequest"), - trace.Log("event", "error"), - // TODO: support log error object - trace.Log(trace.LogErrorObject, info.Err.Error()), - ) - h.tr.SetTag(trace.TagBool(trace.TagError, true)) - } else { - h.tr.SetLog(trace.Log("event", "WroteRequest")) - } -} diff --git a/pkg/net/http/blademaster/tree.go b/pkg/net/http/blademaster/tree.go deleted file mode 100644 index 4b81a9101..000000000 --- a/pkg/net/http/blademaster/tree.go +++ /dev/null @@ -1,625 +0,0 @@ -// Copyright 2013 Julien Schmidt. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE - -package blademaster - -import ( - "net/url" - "strings" - "unicode" -) - -// Param is a single URL parameter, consisting of a key and a value. -type Param struct { - Key string - Value string -} - -// Params is a Param-slice, as returned by the router. -// The slice is ordered, the first URL parameter is also the first slice value. -// It is therefore safe to read values by the index. -type Params []Param - -// Get returns the value of the first Param which key matches the given name. -// If no matching Param is found, an empty string is returned. -func (ps Params) Get(name string) (string, bool) { - for _, entry := range ps { - if entry.Key == name { - return entry.Value, true - } - } - return "", false -} - -// ByName returns the value of the first Param which key matches the given name. -// If no matching Param is found, an empty string is returned. -func (ps Params) ByName(name string) (va string) { - va, _ = ps.Get(name) - return -} - -type methodTree struct { - method string - root *node -} - -type methodTrees []methodTree - -func (trees methodTrees) get(method string) *node { - for _, tree := range trees { - if tree.method == method { - return tree.root - } - } - return nil -} - -func min(a, b int) int { - if a <= b { - return a - } - return b -} - -func countParams(path string) uint8 { - var n uint - for i := 0; i < len(path); i++ { - if path[i] != ':' && path[i] != '*' { - continue - } - n++ - } - if n >= 255 { - return 255 - } - return uint8(n) -} - -type nodeType uint8 - -const ( - static nodeType = iota // default - root - param - catchAll -) - -type node struct { - path string - indices string - children []*node - handlers []HandlerFunc - priority uint32 - nType nodeType - maxParams uint8 - wildChild bool -} - -// increments priority of the given child and reorders if necessary. -func (n *node) incrementChildPrio(pos int) int { - n.children[pos].priority++ - prio := n.children[pos].priority - - // adjust position (move to front) - newPos := pos - for newPos > 0 && n.children[newPos-1].priority < prio { - // swap node positions - n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1] - - newPos-- - } - - // build new index char string - if newPos != pos { - n.indices = n.indices[:newPos] + // unchanged prefix, might be empty - n.indices[pos:pos+1] + // the index char we move - n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' - } - - return newPos -} - -// addRoute adds a node with the given handle to the path. -// Not concurrency-safe! -func (n *node) addRoute(path string, handlers []HandlerFunc) { - fullPath := path - n.priority++ - numParams := countParams(path) - - // non-empty tree - if len(n.path) > 0 || len(n.children) > 0 { - walk: - for { - // Update maxParams of the current node - if numParams > n.maxParams { - n.maxParams = numParams - } - - // Find the longest common prefix. - // This also implies that the common prefix contains no ':' or '*' - // since the existing key can't contain those chars. - i := 0 - max := min(len(path), len(n.path)) - for i < max && path[i] == n.path[i] { - i++ - } - - // Split edge - if i < len(n.path) { - child := node{ - path: n.path[i:], - wildChild: n.wildChild, - indices: n.indices, - children: n.children, - handlers: n.handlers, - priority: n.priority - 1, - } - - // Update maxParams (max of all children) - for i := range child.children { - if child.children[i].maxParams > child.maxParams { - child.maxParams = child.children[i].maxParams - } - } - - n.children = []*node{&child} - // []byte for proper unicode char conversion, see #65 - n.indices = string([]byte{n.path[i]}) - n.path = path[:i] - n.handlers = nil - n.wildChild = false - } - - // Make new node a child of this node - if i < len(path) { - path = path[i:] - - if n.wildChild { - n = n.children[0] - n.priority++ - - // Update maxParams of the child node - if numParams > n.maxParams { - n.maxParams = numParams - } - numParams-- - - // Check if the wildcard matches - if len(path) >= len(n.path) && n.path == path[:len(n.path)] { - // check for longer wildcard, e.g. :name and :names - if len(n.path) >= len(path) || path[len(n.path)] == '/' { - continue walk - } - } - - pathSeg := path - if n.nType != catchAll { - pathSeg = strings.SplitN(path, "/", 2)[0] - } - prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path - panic("'" + pathSeg + - "' in new path '" + fullPath + - "' conflicts with existing wildcard '" + n.path + - "' in existing prefix '" + prefix + - "'") - } - - c := path[0] - - // slash after param - if n.nType == param && c == '/' && len(n.children) == 1 { - n = n.children[0] - n.priority++ - continue walk - } - - // Check if a child with the next path byte exists - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - i = n.incrementChildPrio(i) - n = n.children[i] - continue walk - } - } - - // Otherwise insert it - if c != ':' && c != '*' { - // []byte for proper unicode char conversion, see #65 - n.indices += string([]byte{c}) - child := &node{ - maxParams: numParams, - } - n.children = append(n.children, child) - n.incrementChildPrio(len(n.indices) - 1) - n = child - } - n.insertChild(numParams, path, fullPath, handlers) - return - } else if i == len(path) { // Make node a (in-path) leaf - if n.handlers != nil { - panic("handlers are already registered for path '" + fullPath + "'") - } - n.handlers = handlers - } - return - } - } else { // Empty tree - n.insertChild(numParams, path, fullPath, handlers) - n.nType = root - } -} - -func (n *node) insertChild(numParams uint8, path string, fullPath string, handlers []HandlerFunc) { - var offset int // already handled bytes of the path - - // find prefix until first wildcard (beginning with ':' or '*') - for i, max := 0, len(path); numParams > 0; i++ { - c := path[i] - if c != ':' && c != '*' { - continue - } - - // find wildcard end (either '/' or path end) - end := i + 1 - for end < max && path[end] != '/' { - switch path[end] { - // the wildcard name must not contain ':' and '*' - case ':', '*': - panic("only one wildcard per path segment is allowed, has: '" + - path[i:] + "' in path '" + fullPath + "'") - default: - end++ - } - } - - // check if this Node existing children which would be - // unreachable if we insert the wildcard here - if len(n.children) > 0 { - panic("wildcard route '" + path[i:end] + - "' conflicts with existing children in path '" + fullPath + "'") - } - - // check if the wildcard has a name - if end-i < 2 { - panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") - } - - if c == ':' { // param - // split path at the beginning of the wildcard - if i > 0 { - n.path = path[offset:i] - offset = i - } - - child := &node{ - nType: param, - maxParams: numParams, - } - n.children = []*node{child} - n.wildChild = true - n = child - n.priority++ - numParams-- - - // if the path doesn't end with the wildcard, then there - // will be another non-wildcard subpath starting with '/' - if end < max { - n.path = path[offset:end] - offset = end - - child := &node{ - maxParams: numParams, - priority: 1, - } - n.children = []*node{child} - n = child - } - } else { // catchAll - if end != max || numParams > 1 { - panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") - } - - if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { - panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") - } - - // currently fixed width 1 for '/' - i-- - if path[i] != '/' { - panic("no / before catch-all in path '" + fullPath + "'") - } - - n.path = path[offset:i] - - // first node: catchAll node with empty path - child := &node{ - wildChild: true, - nType: catchAll, - maxParams: 1, - } - n.children = []*node{child} - n.indices = string(path[i]) - n = child - n.priority++ - - // second node: node holding the variable - child = &node{ - path: path[i:], - nType: catchAll, - maxParams: 1, - handlers: handlers, - priority: 1, - } - n.children = []*node{child} - - return - } - } - - // insert remaining path part and handle to the leaf - n.path = path[offset:] - n.handlers = handlers -} - -// getValue returns the handle registered with the given path (key). The values of -// wildcards are saved to a map. -// If no handle can be found, a TSR (trailing slash redirect) recommendation is -// made if a handle exists with an extra (without the) trailing slash for the -// given path. -func (n *node) getValue(path string, po Params, unescape bool) (handlers []HandlerFunc, p Params, tsr bool) { - p = po -walk: // Outer loop for walking the tree - for { - if len(path) > len(n.path) { - if path[:len(n.path)] == n.path { - path = path[len(n.path):] - // If this node does not have a wildcard (param or catchAll) - // child, we can just look up the next child node and continue - // to walk down the tree - if !n.wildChild { - c := path[0] - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - n = n.children[i] - continue walk - } - } - - // Nothing found. - // We can recommend to redirect to the same URL without a - // trailing slash if a leaf exists for that path. - tsr = path == "/" && n.handlers != nil - return - } - - // handle wildcard child - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - end := 0 - for end < len(path) && path[end] != '/' { - end++ - } - - // save param value - if cap(p) < int(n.maxParams) { - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[1:] - val := path[:end] - if unescape { - var err error - if p[i].Value, err = url.QueryUnescape(val); err != nil { - p[i].Value = val // fallback, in case of error - } - } else { - p[i].Value = val - } - - // we need to go deeper! - if end < len(path) { - if len(n.children) > 0 { - path = path[end:] - n = n.children[0] - continue walk - } - - // ... but we can't - tsr = len(path) == end+1 - return - } - - if handlers = n.handlers; handlers != nil { - return - } - if len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists for TSR recommendation - n = n.children[0] - tsr = n.path == "/" && n.handlers != nil - } - - return - - case catchAll: - // save param value - if cap(p) < int(n.maxParams) { - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[2:] - if unescape { - var err error - if p[i].Value, err = url.QueryUnescape(path); err != nil { - p[i].Value = path // fallback, in case of error - } - } else { - p[i].Value = path - } - - handlers = n.handlers - return - - default: - panic("invalid node type") - } - } - } else if path == n.path { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if handlers = n.handlers; handlers != nil { - return - } - - if path == "/" && n.wildChild && n.nType != root { - tsr = true - return - } - - // No handle found. Check if a handle for this path + a - // trailing slash exists for trailing slash recommendation - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - tsr = (len(n.path) == 1 && n.handlers != nil) || - (n.nType == catchAll && n.children[0].handlers != nil) - return - } - } - - return - } - - // Nothing found. We can recommend to redirect to the same URL with an - // extra trailing slash if a leaf exists for that path - tsr = (path == "/") || - (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && - path == n.path[:len(n.path)-1] && n.handlers != nil) - return - } -} - -// findCaseInsensitivePath makes a case-insensitive lookup of the given path and tries to find a handler. -// It can optionally also fix trailing slashes. -// It returns the case-corrected path and a bool indicating whether the lookup -// was successful. -func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { - ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory - - // Outer loop for walking the tree - for len(path) >= len(n.path) && strings.EqualFold(path[:len(n.path)], n.path) { - path = path[len(n.path):] - ciPath = append(ciPath, n.path...) - - if len(path) > 0 { - // If this node does not have a wildcard (param or catchAll) child, - // we can just look up the next child node and continue to walk down - // the tree - if !n.wildChild { - r := unicode.ToLower(rune(path[0])) - for i, index := range n.indices { - // must use recursive approach since both index and - // ToLower(index) could exist. We must check both. - if r == unicode.ToLower(index) { - out, _found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) - if _found { - return append(ciPath, out...), true - } - } - } - - // Nothing found. We can recommend to redirect to the same URL - // without a trailing slash if a leaf exists for that path - found = fixTrailingSlash && path == "/" && n.handlers != nil - return - } - - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - k := 0 - for k < len(path) && path[k] != '/' { - k++ - } - - // add param value to case insensitive path - ciPath = append(ciPath, path[:k]...) - - // we need to go deeper! - if k < len(path) { - if len(n.children) > 0 { - path = path[k:] - n = n.children[0] - continue - } - - // ... but we can't - if fixTrailingSlash && len(path) == k+1 { - return ciPath, true - } - return - } - - if n.handlers != nil { - return ciPath, true - } else if fixTrailingSlash && len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists - n = n.children[0] - if n.path == "/" && n.handlers != nil { - return append(ciPath, '/'), true - } - } - return - - case catchAll: - return append(ciPath, path...), true - - default: - panic("invalid node type") - } - } else { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. - if n.handlers != nil { - return ciPath, true - } - - // No handle found. - // Try to fix the path by adding a trailing slash - if fixTrailingSlash { - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - if (len(n.path) == 1 && n.handlers != nil) || - (n.nType == catchAll && n.children[0].handlers != nil) { - return append(ciPath, '/'), true - } - return - } - } - } - return - } - } - - // Nothing found. - // Try to fix the path by adding / removing a trailing slash - if fixTrailingSlash { - if path == "/" { - return ciPath, true - } - if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && - strings.EqualFold(path, n.path[:len(path)]) && - n.handlers != nil { - return append(ciPath, n.path...), true - } - } - return -} diff --git a/pkg/net/http/blademaster/utils.go b/pkg/net/http/blademaster/utils.go deleted file mode 100644 index 7bd86034f..000000000 --- a/pkg/net/http/blademaster/utils.go +++ /dev/null @@ -1,159 +0,0 @@ -package blademaster - -import ( - "os" - "path" -) - -func lastChar(str string) uint8 { - if str == "" { - panic("The length of the string can't be 0") - } - return str[len(str)-1] -} - -func joinPaths(absolutePath, relativePath string) string { - if relativePath == "" { - return absolutePath - } - - finalPath := path.Join(absolutePath, relativePath) - appendSlash := lastChar(relativePath) == '/' && lastChar(finalPath) != '/' - if appendSlash { - return finalPath + "/" - } - return finalPath -} - -func resolveAddress(addr []string) string { - switch len(addr) { - case 0: - if port := os.Getenv("PORT"); port != "" { - //debugPrint("Environment variable PORT=\"%s\"", port) - return ":" + port - } - //debugPrint("Environment variable PORT is undefined. Using port :8080 by default") - return ":8080" - case 1: - return addr[0] - default: - panic("too much parameters") - } -} - -// cleanPath is the URL version of path.Clean, it returns a canonical URL path -// for p, eliminating . and .. elements. -// -// The following rules are applied iteratively until no further processing can -// be done: -// 1. Replace multiple slashes with a single slash. -// 2. Eliminate each . path name element (the current directory). -// 3. Eliminate each inner .. path name element (the parent directory) -// along with the non-.. element that precedes it. -// 4. Eliminate .. elements that begin a rooted path: -// that is, replace "/.." by "/" at the beginning of a path. -// -// If the result of this process is an empty string, "/" is returned. -func cleanPath(p string) string { - // Turn empty string into "/" - if p == "" { - return "/" - } - - n := len(p) - var buf []byte - - // Invariants: - // reading from path; r is index of next byte to process. - // writing to buf; w is index of next byte to write. - - // path must start with '/' - r := 1 - w := 1 - - if p[0] != '/' { - r = 0 - buf = make([]byte, n+1) - buf[0] = '/' - } - - trailing := n > 1 && p[n-1] == '/' - - // A bit more clunky without a 'lazybuf' like the path package, but the loop - // gets completely inlined (bufApp). So in contrast to the path package this - // loop has no expensive function calls (except 1x make) - - for r < n { - switch { - case p[r] == '/': - // empty path element, trailing slash is added after the end - r++ - - case p[r] == '.' && r+1 == n: - trailing = true - r++ - - case p[r] == '.' && p[r+1] == '/': - // . element - r += 2 - - case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): - // .. element: remove to last / - r += 3 - - if w > 1 { - // can backtrack - w-- - - if buf == nil { - for w > 1 && p[w] != '/' { - w-- - } - } else { - for w > 1 && buf[w] != '/' { - w-- - } - } - } - - default: - // real path element. - // add slash if needed - if w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - // copy element - for r < n && p[r] != '/' { - bufApp(&buf, p, w, p[r]) - w++ - r++ - } - } - } - - // re-append trailing slash - if trailing && w > 1 { - bufApp(&buf, p, w, '/') - w++ - } - - if buf == nil { - return p[:w] - } - return string(buf[:w]) -} - -// internal helper to lazily create a buffer if necessary. -func bufApp(buf *[]byte, s string, w int, c byte) { - if *buf == nil { - if s[w] == c { - return - } - - *buf = make([]byte, len(s)) - copy(*buf, s[:w]) - } - (*buf)[w] = c -} diff --git a/pkg/net/ip/ip.go b/pkg/net/ip/ip.go deleted file mode 100644 index 0966ccc5e..000000000 --- a/pkg/net/ip/ip.go +++ /dev/null @@ -1,74 +0,0 @@ -package ip - -import ( - "net" - "strings" -) - -// ExternalIP get external ip. -func ExternalIP() (res []string) { - inters, err := net.Interfaces() - if err != nil { - return - } - for _, inter := range inters { - if !strings.HasPrefix(inter.Name, "lo") { - addrs, err := inter.Addrs() - if err != nil { - continue - } - for _, addr := range addrs { - if ipnet, ok := addr.(*net.IPNet); ok { - if ipnet.IP.IsLoopback() || ipnet.IP.IsLinkLocalMulticast() || ipnet.IP.IsLinkLocalUnicast() { - continue - } - if ip4 := ipnet.IP.To4(); ip4 != nil { - switch true { - case ip4[0] == 10: - continue - case ip4[0] == 172 && ip4[1] >= 16 && ip4[1] <= 31: - continue - case ip4[0] == 192 && ip4[1] == 168: - continue - default: - res = append(res, ipnet.IP.String()) - } - } - } - } - } - } - return -} - -// InternalIP get internal ip. -func InternalIP() string { - inters, err := net.Interfaces() - if err != nil { - return "" - } - for _, inter := range inters { - if !isUp(inter.Flags) { - continue - } - if !strings.HasPrefix(inter.Name, "lo") { - addrs, err := inter.Addrs() - if err != nil { - continue - } - for _, addr := range addrs { - if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { - if ipnet.IP.To4() != nil { - return ipnet.IP.String() - } - } - } - } - } - return "" -} - -// isUp Interface is up -func isUp(v net.Flags) bool { - return v&net.FlagUp == net.FlagUp -} diff --git a/pkg/net/metadata/README.md b/pkg/net/metadata/README.md deleted file mode 100644 index eb51734b8..000000000 --- a/pkg/net/metadata/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# net/metadata - -## 项目简介 - -用于储存各种元信息 diff --git a/pkg/net/metadata/key.go b/pkg/net/metadata/key.go deleted file mode 100644 index cde79d943..000000000 --- a/pkg/net/metadata/key.go +++ /dev/null @@ -1,67 +0,0 @@ -package metadata - -// metadata common key -const ( - - // Network - RemoteIP = "remote_ip" - RemotePort = "remote_port" - ServerAddr = "server_addr" - ClientAddr = "client_addr" - - // Router - Cluster = "cluster" - Color = "color" - - // Trace - Trace = "trace" - Caller = "caller" - - // Timeout - Timeout = "timeout" - - // Dispatch - CPUUsage = "cpu_usage" - Errors = "errors" - Requests = "requests" - - // Mirror - Mirror = "mirror" - - // Mid 外网账户用户id - Mid = "mid" // NOTE: !!!业务可重新修改key名!!! - - // Device 客户端信息 - Device = "device" - - // Criticality 重要性 - Criticality = "criticality" -) - -var outgoingKey = map[string]struct{}{ - Color: {}, - RemoteIP: {}, - RemotePort: {}, - Mirror: {}, - Criticality: {}, -} - -var incomingKey = map[string]struct{}{ - Caller: {}, -} - -// IsOutgoingKey represent this key should propagate by rpc. -func IsOutgoingKey(key string) bool { - _, ok := outgoingKey[key] - return ok -} - -// IsIncomingKey represent this key should extract from rpc metadata. -func IsIncomingKey(key string) (ok bool) { - _, ok = outgoingKey[key] - if ok { - return - } - _, ok = incomingKey[key] - return -} diff --git a/pkg/net/metadata/metadata.go b/pkg/net/metadata/metadata.go deleted file mode 100644 index 83eb3657c..000000000 --- a/pkg/net/metadata/metadata.go +++ /dev/null @@ -1,156 +0,0 @@ -package metadata - -import ( - "context" - "fmt" - "strconv" - - "github.com/pkg/errors" -) - -// MD is a mapping from metadata keys to values. -type MD map[string]interface{} - -type mdKey struct{} - -// Len returns the number of items in md. -func (md MD) Len() int { - return len(md) -} - -// Copy returns a copy of md. -func (md MD) Copy() MD { - return Join(md) -} - -// New creates an MD from a given key-value map. -func New(m map[string]interface{}) MD { - md := MD{} - for k, val := range m { - md[k] = val - } - return md -} - -// Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. -func Join(mds ...MD) MD { - out := MD{} - for _, md := range mds { - for k, v := range md { - out[k] = v - } - } - return out -} - -// Pairs returns an MD formed by the mapping of key, value ... -// Pairs panics if len(kv) is odd. -func Pairs(kv ...interface{}) MD { - if len(kv)%2 == 1 { - panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) - } - md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = s.(string) - continue - } - md[key] = s - } - return md -} - -// NewContext creates a new context with md attached. -func NewContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdKey{}, md) -} - -// FromContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdKey{}).(MD) - return -} - -// String get string value from metadata in context -func String(ctx context.Context, key string) string { - md, ok := ctx.Value(mdKey{}).(MD) - if !ok { - return "" - } - str, _ := md[key].(string) - return str -} - -// Int64 get int64 value from metadata in context -func Int64(ctx context.Context, key string) int64 { - md, ok := ctx.Value(mdKey{}).(MD) - if !ok { - return 0 - } - i64, _ := md[key].(int64) - return i64 -} - -// Value get value from metadata in context return nil if not found -func Value(ctx context.Context, key string) interface{} { - md, ok := ctx.Value(mdKey{}).(MD) - if !ok { - return nil - } - return md[key] -} - -// WithContext return no deadline context and retain metadata. -func WithContext(c context.Context) context.Context { - md, ok := FromContext(c) - if ok { - nmd := md.Copy() - // NOTE: temporary delete prevent asynchronous task reuse finished task - delete(nmd, Trace) - return NewContext(context.Background(), nmd) - } - return context.Background() -} - -// Bool get boolean from metadata in context use strconv.Parse. -func Bool(ctx context.Context, key string) bool { - md, ok := ctx.Value(mdKey{}).(MD) - if !ok { - return false - } - - switch md[key].(type) { - case bool: - return md[key].(bool) - case string: - ok, _ = strconv.ParseBool(md[key].(string)) - return ok - default: - return false - } -} - -// Range range value from metadata in context filtered by filterFunc. -func Range(ctx context.Context, rangeFunc func(key string, value interface{}), filterFunc ...func(key string) bool) { - var filter func(key string) bool - filterLen := len(filterFunc) - if filterLen > 1 { - panic(errors.New("metadata: Range got the lenth of filterFunc must less than 2")) - } else if filterLen == 1 { - filter = filterFunc[0] - } - md, ok := ctx.Value(mdKey{}).(MD) - if !ok { - return - } - for key, value := range md { - if filter == nil || filter(key) { - rangeFunc(key, value) - } - } -} diff --git a/pkg/net/metadata/metadata_test.go b/pkg/net/metadata/metadata_test.go deleted file mode 100644 index 48a1fd2bc..000000000 --- a/pkg/net/metadata/metadata_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package metadata - -import ( - "context" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPairsMD(t *testing.T) { - for _, test := range []struct { - // input - kv []interface{} - // output - md MD - }{ - {[]interface{}{}, MD{}}, - {[]interface{}{"k1", "v1", "k1", "v2"}, MD{"k1": "v2"}}, - } { - md := Pairs(test.kv...) - if !reflect.DeepEqual(md, test.md) { - t.Fatalf("Pairs(%v) = %v, want %v", test.kv, md, test.md) - } - } -} -func TestCopy(t *testing.T) { - const key, val = "key", "val" - orig := Pairs(key, val) - copy := orig.Copy() - if !reflect.DeepEqual(orig, copy) { - t.Errorf("copied value not equal to the original, got %v, want %v", copy, orig) - } - orig[key] = "foo" - if v := copy[key]; v != val { - t.Errorf("change in original should not affect copy, got %q, want %q", v, val) - } -} -func TestJoin(t *testing.T) { - for _, test := range []struct { - mds []MD - want MD - }{ - {[]MD{}, MD{}}, - {[]MD{Pairs("foo", "bar")}, Pairs("foo", "bar")}, - {[]MD{Pairs("foo", "bar"), Pairs("foo", "baz")}, Pairs("foo", "bar", "foo", "baz")}, - {[]MD{Pairs("foo", "bar"), Pairs("foo", "baz"), Pairs("zip", "zap")}, Pairs("foo", "bar", "foo", "baz", "zip", "zap")}, - } { - md := Join(test.mds...) - if !reflect.DeepEqual(md, test.want) { - t.Errorf("context's metadata is %v, want %v", md, test.want) - } - } -} - -func TestWithContext(t *testing.T) { - md := MD(map[string]interface{}{RemoteIP: "127.0.0.1", Color: "red", Mirror: true}) - c := NewContext(context.Background(), md) - ctx := WithContext(c) - md1, ok := FromContext(ctx) - if !ok { - t.Errorf("expect ok be true") - t.FailNow() - } - if !reflect.DeepEqual(md1, md) { - t.Errorf("expect md1 equal to md") - t.FailNow() - } -} - -func TestBool(t *testing.T) { - md := MD{RemoteIP: "127.0.0.1", Color: "red"} - mdcontext := NewContext(context.Background(), md) - assert.Equal(t, false, Bool(mdcontext, Mirror)) - - mdcontext = NewContext(context.Background(), MD{Mirror: true}) - assert.Equal(t, true, Bool(mdcontext, Mirror)) - - mdcontext = NewContext(context.Background(), MD{Mirror: "true"}) - assert.Equal(t, true, Bool(mdcontext, Mirror)) - - mdcontext = NewContext(context.Background(), MD{Mirror: "1"}) - assert.Equal(t, true, Bool(mdcontext, Mirror)) - - mdcontext = NewContext(context.Background(), MD{Mirror: "0"}) - assert.Equal(t, false, Bool(mdcontext, Mirror)) -} -func TestInt64(t *testing.T) { - mdcontext := NewContext(context.Background(), MD{Mid: int64(1)}) - assert.Equal(t, int64(1), Int64(mdcontext, Mid)) - mdcontext = NewContext(context.Background(), MD{Mid: int64(2)}) - assert.NotEqual(t, int64(1), Int64(mdcontext, Mid)) - mdcontext = NewContext(context.Background(), MD{Mid: 10}) - assert.NotEqual(t, int64(10), Int64(mdcontext, Mid)) -} - -func TestRange(t *testing.T) { - for _, test := range []struct { - filterFunc func(key string) bool - md MD - want MD - }{ - { - nil, - Pairs("foo", "bar"), - Pairs("foo", "bar"), - }, - { - IsOutgoingKey, - Pairs("foo", "bar", RemoteIP, "127.0.0.1", Color, "red", Mirror, "false"), - Pairs(RemoteIP, "127.0.0.1", Color, "red", Mirror, "false"), - }, - { - IsOutgoingKey, - Pairs("foo", "bar", Caller, "app-feed", RemoteIP, "127.0.0.1", Color, "red", Mirror, "true"), - Pairs(RemoteIP, "127.0.0.1", Color, "red", Mirror, "true"), - }, - { - IsIncomingKey, - Pairs("foo", "bar", Caller, "app-feed", RemoteIP, "127.0.0.1", Color, "red", Mirror, "true"), - Pairs(Caller, "app-feed", RemoteIP, "127.0.0.1", Color, "red", Mirror, "true"), - }, - } { - var mds []MD - c := NewContext(context.Background(), test.md) - ctx := WithContext(c) - Range(ctx, - func(key string, value interface{}) { - mds = append(mds, Pairs(key, value)) - }, - test.filterFunc) - rmd := Join(mds...) - if !reflect.DeepEqual(rmd, test.want) { - t.Fatalf("Range(%v) = %v, want %v", test.md, rmd, test.want) - } - if test.filterFunc == nil { - var mds []MD - Range(ctx, - func(key string, value interface{}) { - mds = append(mds, Pairs(key, value)) - }) - rmd := Join(mds...) - if !reflect.DeepEqual(rmd, test.want) { - t.Fatalf("Range(%v) = %v, want %v", test.md, rmd, test.want) - } - } - } -} diff --git a/pkg/net/netutil/backoff.go b/pkg/net/netutil/backoff.go deleted file mode 100644 index d96f511f3..000000000 --- a/pkg/net/netutil/backoff.go +++ /dev/null @@ -1,72 +0,0 @@ -package netutil - -import ( - "math/rand" - "time" -) - -// DefaultBackoffConfig uses values specified for backoff in common. -var DefaultBackoffConfig = BackoffConfig{ - MaxDelay: 120 * time.Second, - BaseDelay: 1.0 * time.Second, - Factor: 1.6, - Jitter: 0.2, -} - -// Backoff defines the methodology for backing off after a call failure. -type Backoff interface { - // Backoff returns the amount of time to wait before the next retry given - // the number of consecutive failures. - Backoff(retries int) time.Duration -} - -// BackoffConfig defines the parameters for the default backoff strategy. -type BackoffConfig struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration - - // baseDelay is the amount of time to wait before retrying after the first - // failure. - BaseDelay time.Duration - - // factor is applied to the backoff after each retry. - Factor float64 - - // jitter provides a range to randomize backoff delays. - Jitter float64 -} - -/* -// NOTE TODO avoid use unexcept config. -func (bc *BackoffConfig) Fix() { - md := bc.MaxDelay - *bc = DefaultBackoffConfig - - if md > 0 { - bc.MaxDelay = md - } -} -*/ - -// Backoff returns the amount of time to wait before the next retry given -// the number of consecutive failures. -func (bc *BackoffConfig) Backoff(retries int) time.Duration { - if retries == 0 { - return bc.BaseDelay - } - backoff, max := float64(bc.BaseDelay), float64(bc.MaxDelay) - for backoff < max && retries > 0 { - backoff *= bc.Factor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Jitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} diff --git a/pkg/net/netutil/breaker/README.md b/pkg/net/netutil/breaker/README.md deleted file mode 100644 index d4294a9e2..000000000 --- a/pkg/net/netutil/breaker/README.md +++ /dev/null @@ -1,20 +0,0 @@ -#### breaker - -##### 项目简介 -1. 提供熔断器功能,供各种client(如rpc、http、msyql)等进行熔断 -2. 提供Go方法供业务在breaker熔断前后进行回调处理 - -##### 配置说明 -> 1. NewGroup(name string,c *Config)当c==nil时则采用默认配置 -> 2. 可通过breaker.Init(c *Config)替换默认配置 -> 3. 可通过group.Reload(c *Config)进行配置更新 -> 4. 默认配置如下所示: - _conf = &Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 100, - K:1.5, - } - -##### 测试 -1. 执行当前目录下所有测试文件,测试所有功能 diff --git a/pkg/net/netutil/breaker/breaker.go b/pkg/net/netutil/breaker/breaker.go deleted file mode 100644 index c8aa87564..000000000 --- a/pkg/net/netutil/breaker/breaker.go +++ /dev/null @@ -1,164 +0,0 @@ -package breaker - -import ( - "sync" - "time" - - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// Config broker config. -type Config struct { - SwitchOff bool // breaker switch,default off. - - // Google - K float64 - - Window xtime.Duration - Bucket int - Request int64 -} - -func (conf *Config) fix() { - if conf.K == 0 { - conf.K = 1.5 - } - if conf.Request == 0 { - conf.Request = 100 - } - if conf.Bucket == 0 { - conf.Bucket = 10 - } - if conf.Window == 0 { - conf.Window = xtime.Duration(3 * time.Second) - } -} - -// Breaker is a CircuitBreaker pattern. -// FIXME on int32 atomic.LoadInt32(&b.on) == _switchOn -type Breaker interface { - Allow() error - MarkSuccess() - MarkFailed() -} - -// Group represents a class of CircuitBreaker and forms a namespace in which -// units of CircuitBreaker. -type Group struct { - mu sync.RWMutex - brks map[string]Breaker - conf *Config -} - -const ( - // StateOpen when circuit breaker open, request not allowed, after sleep - // some duration, allow one single request for testing the health, if ok - // then state reset to closed, if not continue the step. - StateOpen int32 = iota - // StateClosed when circuit breaker closed, request allowed, the breaker - // calc the succeed ratio, if request num greater request setting and - // ratio lower than the setting ratio, then reset state to open. - StateClosed - // StateHalfopen when circuit breaker open, after slepp some duration, allow - // one request, but not state closed. - StateHalfopen - - //_switchOn int32 = iota - // _switchOff -) - -var ( - _mu sync.RWMutex - _conf = &Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 100, - - // Percentage of failures must be lower than 33.33% - K: 1.5, - - // Pattern: "", - } - _group = NewGroup(_conf) -) - -// Init init global breaker config, also can reload config after first time call. -func Init(conf *Config) { - if conf == nil { - return - } - _mu.Lock() - _conf = conf - _mu.Unlock() -} - -// Go runs your function while tracking the breaker state of default group. -func Go(name string, run, fallback func() error) error { - breaker := _group.Get(name) - if err := breaker.Allow(); err != nil { - return fallback() - } - return run() -} - -// newBreaker new a breaker. -func newBreaker(c *Config) (b Breaker) { - // factory - return newSRE(c) -} - -// NewGroup new a breaker group container, if conf nil use default conf. -func NewGroup(conf *Config) *Group { - if conf == nil { - _mu.RLock() - conf = _conf - _mu.RUnlock() - } else { - conf.fix() - } - return &Group{ - conf: conf, - brks: make(map[string]Breaker), - } -} - -// Get get a breaker by a specified key, if breaker not exists then make a new one. -func (g *Group) Get(key string) Breaker { - g.mu.RLock() - brk, ok := g.brks[key] - conf := g.conf - g.mu.RUnlock() - if ok { - return brk - } - // NOTE here may new multi breaker for rarely case, let gc drop it. - brk = newBreaker(conf) - g.mu.Lock() - if _, ok = g.brks[key]; !ok { - g.brks[key] = brk - } - g.mu.Unlock() - return brk -} - -// Reload reload the group by specified config, this may let all inner breaker -// reset to a new one. -func (g *Group) Reload(conf *Config) { - if conf == nil { - return - } - conf.fix() - g.mu.Lock() - g.conf = conf - g.brks = make(map[string]Breaker, len(g.brks)) - g.mu.Unlock() -} - -// Go runs your function while tracking the breaker state of group. -func (g *Group) Go(name string, run, fallback func() error) error { - breaker := g.Get(name) - if err := breaker.Allow(); err != nil { - return fallback() - } - return run() -} diff --git a/pkg/net/netutil/breaker/breaker_test.go b/pkg/net/netutil/breaker/breaker_test.go deleted file mode 100644 index 28bc2870d..000000000 --- a/pkg/net/netutil/breaker/breaker_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package breaker - -import ( - "errors" - "testing" - "time" - - xtime "github.com/go-kratos/kratos/pkg/time" -) - -func TestGroup(t *testing.T) { - g1 := NewGroup(nil) - g2 := NewGroup(_conf) - if g1.conf != g2.conf { - t.FailNow() - } - - brk := g2.Get("key") - brk1 := g2.Get("key1") - if brk == brk1 { - t.FailNow() - } - brk2 := g2.Get("key") - if brk != brk2 { - t.FailNow() - } - - g := NewGroup(_conf) - c := &Config{ - Window: xtime.Duration(1 * time.Second), - Bucket: 10, - Request: 100, - SwitchOff: !_conf.SwitchOff, - } - g.Reload(c) - if g.conf.SwitchOff == _conf.SwitchOff { - t.FailNow() - } -} - -func TestInit(t *testing.T) { - switchOff := _conf.SwitchOff - c := &Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 100, - SwitchOff: !switchOff, - } - Init(c) - if _conf.SwitchOff == switchOff { - t.FailNow() - } -} - -func TestGo(t *testing.T) { - if err := Go("test_run", func() error { - t.Log("breaker allow,callback run()") - return nil - }, func() error { - t.Log("breaker not allow,callback fallback()") - return errors.New("breaker not allow") - }); err != nil { - t.Error(err) - } - - _group.Reload(&Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 100, - SwitchOff: true, - }) - - if err := Go("test_fallback", func() error { - t.Log("breaker allow,callback run()") - return nil - }, func() error { - t.Log("breaker not allow,callback fallback()") - return nil - }); err != nil { - t.Error(err) - } -} - -func markSuccess(b Breaker, count int) { - for i := 0; i < count; i++ { - b.MarkSuccess() - } -} - -func markFailed(b Breaker, count int) { - for i := 0; i < count; i++ { - b.MarkFailed() - } -} diff --git a/pkg/net/netutil/breaker/example_test.go b/pkg/net/netutil/breaker/example_test.go deleted file mode 100644 index d02943d07..000000000 --- a/pkg/net/netutil/breaker/example_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package breaker_test - -import ( - "fmt" - "time" - - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// ExampleGroup show group usage. -func ExampleGroup() { - c := &breaker.Config{ - Window: xtime.Duration(3 * time.Second), - K: 1.5, - Bucket: 10, - Request: 100, - } - // init default config - breaker.Init(c) - // new group - g := breaker.NewGroup(c) - // reload group config - c.Bucket = 100 - c.Request = 200 - g.Reload(c) - // get breaker by key - g.Get("key") -} - -// ExampleBreaker show breaker usage. -func ExampleBreaker() { - // new group,use default breaker config - g := breaker.NewGroup(nil) - brk := g.Get("key") - // mark request success - brk.MarkSuccess() - // mark request failed - brk.MarkFailed() - // check if breaker allow or not - if brk.Allow() == nil { - fmt.Println("breaker allow") - } else { - fmt.Println("breaker not allow") - } -} - -// ExampleGo this example create a default group and show function callback -// according to the state of breaker. -func ExampleGo() { - run := func() error { - return nil - } - fallback := func() error { - return fmt.Errorf("unknown error") - } - if err := breaker.Go("example_go", run, fallback); err != nil { - fmt.Println(err) - } -} diff --git a/pkg/net/netutil/breaker/sre_breaker.go b/pkg/net/netutil/breaker/sre_breaker.go deleted file mode 100644 index 0caa057db..000000000 --- a/pkg/net/netutil/breaker/sre_breaker.go +++ /dev/null @@ -1,100 +0,0 @@ -package breaker - -import ( - "math" - "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/stat/metric" -) - -// sreBreaker is a sre CircuitBreaker pattern. -type sreBreaker struct { - stat metric.RollingCounter - r *rand.Rand - // rand.New(...) returns a non thread safe object - randLock sync.Mutex - - k float64 - request int64 - - state int32 -} - -func newSRE(c *Config) Breaker { - counterOpts := metric.RollingCounterOpts{ - Size: c.Bucket, - BucketDuration: time.Duration(int64(c.Window) / int64(c.Bucket)), - } - stat := metric.NewRollingCounter(counterOpts) - return &sreBreaker{ - stat: stat, - r: rand.New(rand.NewSource(time.Now().UnixNano())), - - request: c.Request, - k: c.K, - state: StateClosed, - } -} - -func (b *sreBreaker) summary() (success int64, total int64) { - b.stat.Reduce(func(iterator metric.Iterator) float64 { - for iterator.Next() { - bucket := iterator.Bucket() - total += bucket.Count - for _, p := range bucket.Points { - success += int64(p) - } - } - return 0 - }) - return -} - -func (b *sreBreaker) Allow() error { - success, total := b.summary() - k := b.k * float64(success) - if log.V(5) { - log.Info("breaker: request: %d, succee: %d, fail: %d", total, success, total-success) - } - // check overflow requests = K * success - if total < b.request || float64(total) < k { - if atomic.LoadInt32(&b.state) == StateOpen { - atomic.CompareAndSwapInt32(&b.state, StateOpen, StateClosed) - } - return nil - } - if atomic.LoadInt32(&b.state) == StateClosed { - atomic.CompareAndSwapInt32(&b.state, StateClosed, StateOpen) - } - dr := math.Max(0, (float64(total)-k)/float64(total+1)) - drop := b.trueOnProba(dr) - if log.V(5) { - log.Info("breaker: drop ratio: %f, drop: %t", dr, drop) - } - if drop { - return ecode.ServiceUnavailable - } - return nil -} - -func (b *sreBreaker) MarkSuccess() { - b.stat.Add(1) -} - -func (b *sreBreaker) MarkFailed() { - // NOTE: when client reject requets locally, continue add counter let the - // drop ratio higher. - b.stat.Add(0) -} - -func (b *sreBreaker) trueOnProba(proba float64) (truth bool) { - b.randLock.Lock() - truth = b.r.Float64() < proba - b.randLock.Unlock() - return -} diff --git a/pkg/net/netutil/breaker/sre_breaker_test.go b/pkg/net/netutil/breaker/sre_breaker_test.go deleted file mode 100644 index 1f54cfd4b..000000000 --- a/pkg/net/netutil/breaker/sre_breaker_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package breaker - -import ( - "math" - "math/rand" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/stat/metric" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/stretchr/testify/assert" -) - -func getSRE() Breaker { - return NewGroup(&Config{ - Window: xtime.Duration(1 * time.Second), - Bucket: 10, - Request: 100, - K: 2, - }).Get("") -} - -func getSREBreaker() *sreBreaker { - counterOpts := metric.RollingCounterOpts{ - Size: 10, - BucketDuration: time.Millisecond * 100, - } - stat := metric.NewRollingCounter(counterOpts) - return &sreBreaker{ - stat: stat, - r: rand.New(rand.NewSource(time.Now().UnixNano())), - - request: 100, - k: 2, - state: StateClosed, - } -} - -func markSuccessWithDuration(b Breaker, count int, sleep time.Duration) { - for i := 0; i < count; i++ { - b.MarkSuccess() - time.Sleep(sleep) - } -} - -func markFailedWithDuration(b Breaker, count int, sleep time.Duration) { - for i := 0; i < count; i++ { - b.MarkFailed() - time.Sleep(sleep) - } -} - -func testSREClose(t *testing.T, b Breaker) { - markSuccess(b, 80) - assert.Equal(t, b.Allow(), nil) - markSuccess(b, 120) - assert.Equal(t, b.Allow(), nil) -} - -func testSREOpen(t *testing.T, b Breaker) { - markSuccess(b, 100) - assert.Equal(t, b.Allow(), nil) - markFailed(b, 10000000) - assert.NotEqual(t, b.Allow(), nil) -} - -func testSREHalfOpen(t *testing.T, b Breaker) { - // failback - assert.Equal(t, b.Allow(), nil) - t.Run("allow single failed", func(t *testing.T) { - markFailed(b, 10000000) - assert.NotEqual(t, b.Allow(), nil) - }) - time.Sleep(2 * time.Second) - t.Run("allow single succeed", func(t *testing.T) { - assert.Equal(t, b.Allow(), nil) - markSuccess(b, 10000000) - assert.Equal(t, b.Allow(), nil) - }) -} - -func TestSRE(t *testing.T) { - b := getSRE() - testSREClose(t, b) - - b = getSRE() - testSREOpen(t, b) - - b = getSRE() - testSREHalfOpen(t, b) -} - -func TestSRESelfProtection(t *testing.T) { - t.Run("total request < 100", func(t *testing.T) { - b := getSRE() - markFailed(b, 99) - assert.Equal(t, b.Allow(), nil) - }) - t.Run("total request > 100, total < 2 * success", func(t *testing.T) { - b := getSRE() - size := rand.Intn(10000000) - succ := int(math.Ceil(float64(size))) + 1 - markSuccess(b, succ) - markFailed(b, size-succ) - assert.Equal(t, b.Allow(), nil) - }) -} - -func TestSRESummary(t *testing.T) { - var ( - b *sreBreaker - succ, total int64 - ) - - sleep := 50 * time.Millisecond - t.Run("succ == total", func(t *testing.T) { - b = getSREBreaker() - markSuccessWithDuration(b, 10, sleep) - succ, total = b.summary() - assert.Equal(t, succ, int64(10)) - assert.Equal(t, total, int64(10)) - }) - - t.Run("fail == total", func(t *testing.T) { - b = getSREBreaker() - markFailedWithDuration(b, 10, sleep) - succ, total = b.summary() - assert.Equal(t, succ, int64(0)) - assert.Equal(t, total, int64(10)) - }) - - t.Run("succ = 1/2 * total, fail = 1/2 * total", func(t *testing.T) { - b = getSREBreaker() - markFailedWithDuration(b, 5, sleep) - markSuccessWithDuration(b, 5, sleep) - succ, total = b.summary() - assert.Equal(t, succ, int64(5)) - assert.Equal(t, total, int64(10)) - }) - - t.Run("auto reset rolling counter", func(t *testing.T) { - time.Sleep(time.Second) - succ, total = b.summary() - assert.Equal(t, succ, int64(0)) - assert.Equal(t, total, int64(0)) - }) -} - -func TestTrueOnProba(t *testing.T) { - const proba = math.Pi / 10 - const total = 100000 - const epsilon = 0.05 - var count int - b := getSREBreaker() - for i := 0; i < total; i++ { - if b.trueOnProba(proba) { - count++ - } - } - - ratio := float64(count) / float64(total) - assert.InEpsilon(t, proba, ratio, epsilon) -} - -func BenchmarkSreBreakerAllow(b *testing.B) { - breaker := getSRE() - b.ResetTimer() - for i := 0; i <= b.N; i++ { - breaker.Allow() - if i%2 == 0 { - breaker.MarkSuccess() - } else { - breaker.MarkFailed() - } - } -} diff --git a/pkg/net/rpc/warden/README.md b/pkg/net/rpc/warden/README.md deleted file mode 100644 index 127502949..000000000 --- a/pkg/net/rpc/warden/README.md +++ /dev/null @@ -1,13 +0,0 @@ -#### net/rpc/warden - -##### 项目简介 - -来自 bilibili 主站技术部的 RPC 框架,融合主站技术部的核心科技,带来如飞一般的体验。 - -##### 编译环境 - -- **请只用 Golang v1.9.x 以上版本编译执行** - -##### 依赖包 - -- [grpc](google.golang.org/grpc) diff --git a/pkg/net/rpc/warden/balancer/p2c/README.md b/pkg/net/rpc/warden/balancer/p2c/README.md deleted file mode 100644 index 97b380952..000000000 --- a/pkg/net/rpc/warden/balancer/p2c/README.md +++ /dev/null @@ -1,5 +0,0 @@ -#### warden/balancer/p2c - -##### 项目简介 - -warden 的 Power of Two Choices (P2C)负载均衡模块,主要用于为每个RPC请求返回一个Server节点以供调用 diff --git a/pkg/net/rpc/warden/balancer/p2c/p2c.go b/pkg/net/rpc/warden/balancer/p2c/p2c.go deleted file mode 100644 index e2bba88af..000000000 --- a/pkg/net/rpc/warden/balancer/p2c/p2c.go +++ /dev/null @@ -1,293 +0,0 @@ -package p2c - -import ( - "context" - "math" - "math/rand" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - - "github.com/go-kratos/kratos/pkg/log" - nmd "github.com/go-kratos/kratos/pkg/net/metadata" - wmd "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/metadata" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" -) - -const ( - // The mean lifetime of `cost`, it reaches its half-life after Tau*ln(2). - tau = int64(time.Millisecond * 600) - // if statistic not collected,we add a big penalty to endpoint - penalty = uint64(1000 * time.Millisecond * 250) - - forceGap = int64(time.Second * 3) -) - -var _ base.PickerBuilder = &p2cPickerBuilder{} -var _ balancer.Picker = &p2cPicker{} - -// Name is the name of pick of two random choices balancer. -const Name = "p2c" - -// newBuilder creates a new weighted-roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &p2cPickerBuilder{}) -} - -func init() { - balancer.Register(newBuilder()) -} - -type subConn struct { - // metadata - conn balancer.SubConn - addr resolver.Address - meta wmd.MD - - //client statistic data - lag uint64 - success uint64 - inflight int64 - // server statistic data - svrCPU uint64 - - //last collected timestamp - stamp int64 - //last pick timestamp - pick int64 - // request number in a period time - reqs int64 -} - -func (sc *subConn) valid() bool { - return sc.health() > 500 && atomic.LoadUint64(&sc.svrCPU) < 900 -} - -func (sc *subConn) health() uint64 { - return atomic.LoadUint64(&sc.success) -} - -func (sc *subConn) load() uint64 { - lag := uint64(math.Sqrt(float64(atomic.LoadUint64(&sc.lag))) + 1) - load := atomic.LoadUint64(&sc.svrCPU) * lag * uint64(atomic.LoadInt64(&sc.inflight)) - if load == 0 { - // penalty是初始化没有数据时的惩罚值,默认为1e9 * 250 - load = penalty - } - return load -} - -func (sc *subConn) cost() uint64 { - load := atomic.LoadUint64(&sc.svrCPU) * atomic.LoadUint64(&sc.lag) * uint64(atomic.LoadInt64(&sc.inflight)) - if load == 0 { - // penalty是初始化没有数据时的惩罚值,默认为1e9 * 250 - load = penalty - } - return load -} - -// statistics is info for log -type statistic struct { - addr string - score float64 - cs uint64 - lantency uint64 - cpu uint64 - inflight int64 - reqs int64 -} - -type p2cPickerBuilder struct{} - -func (*p2cPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { - p := &p2cPicker{ - colors: make(map[string]*p2cPicker), - r: rand.New(rand.NewSource(time.Now().UnixNano())), - } - for addr, sc := range readySCs { - meta, ok := addr.Metadata.(wmd.MD) - if !ok { - meta = wmd.MD{ - Weight: 10, - } - } - subc := &subConn{ - conn: sc, - addr: addr, - meta: meta, - - svrCPU: 500, - lag: 0, - success: 1000, - inflight: 1, - } - if meta.Color == "" { - p.subConns = append(p.subConns, subc) - continue - } - // if color not empty, use color picker - cp, ok := p.colors[meta.Color] - if !ok { - cp = &p2cPicker{r: rand.New(rand.NewSource(time.Now().UnixNano()))} - p.colors[meta.Color] = cp - } - cp.subConns = append(cp.subConns, subc) - } - return p -} - -type p2cPicker struct { - // subConns is the snapshot of the weighted-roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []*subConn - colors map[string]*p2cPicker - logTs int64 - r *rand.Rand - lk sync.Mutex -} - -func (p *p2cPicker) Pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { - // FIXME refactor to unify the color logic - color := nmd.String(ctx, nmd.Color) - if color == "" && env.Color != "" { - color = env.Color - } - if color != "" { - if cp, ok := p.colors[color]; ok { - return cp.pick(ctx, opts) - } - } - return p.pick(ctx, opts) -} - -// choose two distinct nodes -func (p *p2cPicker) prePick() (nodeA *subConn, nodeB *subConn) { - for i := 0; i < 3; i++ { - p.lk.Lock() - a := p.r.Intn(len(p.subConns)) - b := p.r.Intn(len(p.subConns) - 1) - p.lk.Unlock() - if b >= a { - b = b + 1 - } - nodeA, nodeB = p.subConns[a], p.subConns[b] - if nodeA.valid() || nodeB.valid() { - break - } - } - return -} - -func (p *p2cPicker) pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { - var pc, upc *subConn - start := time.Now().UnixNano() - - if len(p.subConns) <= 0 { - return nil, nil, balancer.ErrNoSubConnAvailable - } else if len(p.subConns) == 1 { - pc = p.subConns[0] - } else { - nodeA, nodeB := p.prePick() - // meta.Weight为服务发布者在disocvery中设置的权重 - if nodeA.load()*nodeB.health()*nodeB.meta.Weight > nodeB.load()*nodeA.health()*nodeA.meta.Weight { - pc, upc = nodeB, nodeA - } else { - pc, upc = nodeA, nodeB - } - // 如果选中的节点,在forceGap期间内没有被选中一次,那么强制一次 - // 利用强制的机会,来触发成功率、延迟的衰减 - // 原子锁conn.pick保证并发安全,放行一次 - pick := atomic.LoadInt64(&upc.pick) - if start-pick > forceGap && atomic.CompareAndSwapInt64(&upc.pick, pick, start) { - pc = upc - } - } - - // 节点未发生切换才更新pick时间 - if pc != upc { - atomic.StoreInt64(&pc.pick, start) - } - atomic.AddInt64(&pc.inflight, 1) - atomic.AddInt64(&pc.reqs, 1) - return pc.conn, func(di balancer.DoneInfo) { - atomic.AddInt64(&pc.inflight, -1) - now := time.Now().UnixNano() - // get moving average ratio w - stamp := atomic.SwapInt64(&pc.stamp, now) - td := now - stamp - if td < 0 { - td = 0 - } - w := math.Exp(float64(-td) / float64(tau)) - - lag := now - start - if lag < 0 { - lag = 0 - } - oldLag := atomic.LoadUint64(&pc.lag) - if oldLag == 0 { - w = 0.0 - } - lag = int64(float64(oldLag)*w + float64(lag)*(1.0-w)) - atomic.StoreUint64(&pc.lag, uint64(lag)) - - success := uint64(1000) // error value ,if error set 1 - if di.Err != nil { - if st, ok := status.FromError(di.Err); ok { - // only counter the local grpc error, ignore any business error - if st.Code() != codes.Unknown && st.Code() != codes.OK { - success = 0 - } - } - } - oldSuc := atomic.LoadUint64(&pc.success) - success = uint64(float64(oldSuc)*w + float64(success)*(1.0-w)) - atomic.StoreUint64(&pc.success, success) - - trailer := di.Trailer - if strs, ok := trailer[wmd.CPUUsage]; ok { - if cpu, err2 := strconv.ParseUint(strs[0], 10, 64); err2 == nil && cpu > 0 { - atomic.StoreUint64(&pc.svrCPU, cpu) - } - } - - logTs := atomic.LoadInt64(&p.logTs) - if now-logTs > int64(time.Second*3) { - if atomic.CompareAndSwapInt64(&p.logTs, logTs, now) { - p.printStats() - } - } - }, nil -} - -func (p *p2cPicker) printStats() { - if len(p.subConns) <= 0 { - return - } - stats := make([]statistic, 0, len(p.subConns)) - for _, conn := range p.subConns { - var stat statistic - stat.addr = conn.addr.Addr - stat.cpu = atomic.LoadUint64(&conn.svrCPU) - stat.cs = atomic.LoadUint64(&conn.success) - stat.inflight = atomic.LoadInt64(&conn.inflight) - stat.lantency = atomic.LoadUint64(&conn.lag) - stat.reqs = atomic.SwapInt64(&conn.reqs, 0) - load := conn.load() - if load != 0 { - stat.score = float64(stat.cs*conn.meta.Weight*1e8) / float64(load) - } - stats = append(stats, stat) - } - log.Info("p2c %s : %+v", p.subConns[0].addr.ServerName, stats) - //fmt.Printf("%+v\n", stats) -} diff --git a/pkg/net/rpc/warden/balancer/p2c/p2c_test.go b/pkg/net/rpc/warden/balancer/p2c/p2c_test.go deleted file mode 100644 index c34d87fbd..000000000 --- a/pkg/net/rpc/warden/balancer/p2c/p2c_test.go +++ /dev/null @@ -1,345 +0,0 @@ -package p2c - -import ( - "context" - "flag" - "fmt" - "math/rand" - "strconv" - "sync/atomic" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - - nmd "github.com/go-kratos/kratos/pkg/net/metadata" - wmeta "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/metadata" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" -) - -var serverNum int -var cliNum int -var concurrency int -var extraLoad int64 -var extraDelay int64 -var extraWeight uint64 - -func init() { - flag.IntVar(&serverNum, "snum", 6, "-snum 6") - flag.IntVar(&cliNum, "cnum", 12, "-cnum 12") - flag.IntVar(&concurrency, "concurrency", 10, "-cc 10") - flag.Int64Var(&extraLoad, "exload", 3, "-exload 3") - flag.Int64Var(&extraDelay, "exdelay", 250, "-exdelay 250") - flag.Uint64Var(&extraWeight, "extraWeight", 50, "-exdelay 50") -} - -type testSubConn struct { - addr resolver.Address - wait chan struct{} - //statics - reqs int64 - usage int64 - cpu int64 - prevReq int64 - prevUsage int64 - //control params - loadJitter int64 - delayJitter int64 -} - -func newTestSubConn(addr string, weight uint64, color string) (sc *testSubConn) { - sc = &testSubConn{ - addr: resolver.Address{ - Addr: addr, - Metadata: wmeta.MD{ - Weight: weight, - Color: color, - }, - }, - wait: make(chan struct{}, 1000), - } - go func() { - for { - for i := 0; i < 210; i++ { - <-sc.wait - } - time.Sleep(time.Millisecond * 20) - } - }() - - return -} - -func (s *testSubConn) connect(ctx context.Context) { - time.Sleep(time.Millisecond * 15) - //add qps counter when request come in - atomic.AddInt64(&s.reqs, 1) - select { - case <-ctx.Done(): - return - case s.wait <- struct{}{}: - atomic.AddInt64(&s.usage, 1) - } - load := atomic.LoadInt64(&s.loadJitter) - if load > 0 { - for i := 0; i <= rand.Intn(int(load)); i++ { - select { - case <-ctx.Done(): - return - case s.wait <- struct{}{}: - atomic.AddInt64(&s.usage, 1) - } - } - } - delay := atomic.LoadInt64(&s.delayJitter) - if delay > 0 { - delay = rand.Int63n(delay) - time.Sleep(time.Millisecond * time.Duration(delay)) - } -} - -func (s *testSubConn) UpdateAddresses([]resolver.Address) { - -} - -// Connect starts the connecting for this SubConn. -func (s *testSubConn) Connect() { - -} - -func TestBalancerPick(t *testing.T) { - scs := map[resolver.Address]balancer.SubConn{} - sc1 := &testSubConn{ - addr: resolver.Address{ - Addr: "test1", - Metadata: wmeta.MD{ - Weight: 8, - }, - }, - } - sc2 := &testSubConn{ - addr: resolver.Address{ - Addr: "test2", - Metadata: wmeta.MD{ - Weight: 4, - Color: "red", - }, - }, - } - sc3 := &testSubConn{ - addr: resolver.Address{ - Addr: "test3", - Metadata: wmeta.MD{ - Weight: 2, - Color: "red", - }, - }, - } - sc4 := &testSubConn{ - addr: resolver.Address{ - Addr: "test4", - Metadata: wmeta.MD{ - Weight: 2, - Color: "purple", - }, - }, - } - scs[sc1.addr] = sc1 - scs[sc2.addr] = sc2 - scs[sc3.addr] = sc3 - scs[sc4.addr] = sc4 - b := &p2cPickerBuilder{} - picker := b.Build(scs) - res := []string{"test1", "test1", "test1", "test1"} - for i := 0; i < 3; i++ { - conn, _, err := picker.Pick(context.Background(), balancer.PickInfo{}) - if err != nil { - t.Fatalf("picker.Pick failed!idx:=%d", i) - } - sc := conn.(*testSubConn) - if sc.addr.Addr != res[i] { - t.Fatalf("the subconn picked(%s),but expected(%s)", sc.addr.Addr, res[i]) - } - } - - ctx := nmd.NewContext(context.Background(), nmd.New(map[string]interface{}{"color": "black"})) - for i := 0; i < 4; i++ { - conn, _, err := picker.Pick(ctx, balancer.PickInfo{}) - if err != nil { - t.Fatalf("picker.Pick failed!idx:=%d", i) - } - sc := conn.(*testSubConn) - if sc.addr.Addr != res[i] { - t.Fatalf("the (%d) subconn picked(%s),but expected(%s)", i, sc.addr.Addr, res[i]) - } - } - - env.Color = "purple" - ctx2 := context.Background() - for i := 0; i < 4; i++ { - conn, _, err := picker.Pick(ctx2, balancer.PickInfo{}) - if err != nil { - t.Fatalf("picker.Pick failed!idx:=%d", i) - } - sc := conn.(*testSubConn) - if sc.addr.Addr != "test4" { - t.Fatalf("the (%d) subconn picked(%s),but expected(%s)", i, sc.addr.Addr, res[i]) - } - } -} - -func Benchmark_Wrr(b *testing.B) { - scs := map[resolver.Address]balancer.SubConn{} - for i := 0; i < 50; i++ { - addr := resolver.Address{ - Addr: fmt.Sprintf("addr_%d", i), - Metadata: wmeta.MD{Weight: 10}, - } - scs[addr] = &testSubConn{addr: addr} - } - wpb := &p2cPickerBuilder{} - picker := wpb.Build(scs) - opt := balancer.PickInfo{} - ctx := context.Background() - for idx := 0; idx < b.N; idx++ { - _, done, err := picker.Pick(ctx, opt) - if err != nil { - done(balancer.DoneInfo{}) - } - } -} - -func TestChaosPick(t *testing.T) { - flag.Parse() - t.Logf("start chaos test!svrNum:%d cliNum:%d concurrency:%d exLoad:%d exDelay:%d\n", serverNum, cliNum, concurrency, extraLoad, extraDelay) - c := newController(serverNum, cliNum) - c.launch(concurrency) - go c.updateStatics() - go c.control(extraLoad, extraDelay) - time.Sleep(time.Second * 50) -} - -func newController(svrNum int, cliNum int) *controller { - //new servers - servers := []*testSubConn{} - var weight uint64 = 10 - if extraWeight > 0 { - weight = extraWeight - } - for i := 0; i < svrNum; i++ { - weight += extraWeight - sc := newTestSubConn(fmt.Sprintf("addr_%d", i), weight, "") - servers = append(servers, sc) - } - //new clients - var clients []balancer.Picker - scs := map[resolver.Address]balancer.SubConn{} - for _, v := range servers { - scs[v.addr] = v - } - for i := 0; i < cliNum; i++ { - wpb := &p2cPickerBuilder{} - picker := wpb.Build(scs) - clients = append(clients, picker) - } - - c := &controller{ - servers: servers, - clients: clients, - } - return c -} - -type controller struct { - servers []*testSubConn - clients []balancer.Picker -} - -func (c *controller) launch(concurrency int) { - opt := balancer.PickInfo{} - bkg := context.Background() - for i := range c.clients { - for j := 0; j < concurrency; j++ { - picker := c.clients[i] - go func() { - for { - ctx, cancel := context.WithTimeout(bkg, time.Millisecond*250) - sc, done, _ := picker.Pick(ctx, opt) - server := sc.(*testSubConn) - server.connect(ctx) - var err error - if ctx.Err() != nil { - err = status.Errorf(codes.DeadlineExceeded, "dead") - } - cancel() - cpu := atomic.LoadInt64(&server.cpu) - md := make(map[string]string) - md[wmeta.CPUUsage] = strconv.FormatInt(cpu, 10) - done(balancer.DoneInfo{Trailer: metadata.New(md), Err: err}) - time.Sleep(time.Millisecond * 10) - } - }() - } - } -} - -func (c *controller) updateStatics() { - for { - time.Sleep(time.Millisecond * 500) - for _, sc := range c.servers { - usage := atomic.LoadInt64(&sc.usage) - avgCpu := (usage - sc.prevUsage) * 2 - atomic.StoreInt64(&sc.cpu, avgCpu) - sc.prevUsage = usage - } - } -} - -func (c *controller) control(extraLoad, extraDelay int64) { - var chaos int - for { - fmt.Printf("\n") - //make some chaos - n := rand.Intn(3) - chaos = n + 1 - for i := 0; i < chaos; i++ { - if extraLoad > 0 { - degree := rand.Int63n(extraLoad) - degree++ - atomic.StoreInt64(&c.servers[i].loadJitter, degree) - fmt.Printf("set addr_%d load:%d ", i, degree) - } - if extraDelay > 0 { - degree := rand.Int63n(extraDelay) - atomic.StoreInt64(&c.servers[i].delayJitter, degree) - fmt.Printf("set addr_%d delay:%dms ", i, degree) - } - } - fmt.Printf("\n") - sleep := int64(5) - time.Sleep(time.Second * time.Duration(sleep)) - for _, sc := range c.servers { - req := atomic.LoadInt64(&sc.reqs) - qps := (req - sc.prevReq) / sleep - wait := len(sc.wait) - sc.prevReq = req - fmt.Printf("%s qps:%d waits:%d\n", sc.addr.Addr, qps, wait) - } - for _, picker := range c.clients { - p := picker.(*p2cPicker) - p.printStats() - } - fmt.Printf("\n") - //reset chaos - for i := 0; i < chaos; i++ { - atomic.StoreInt64(&c.servers[i].loadJitter, 0) - atomic.StoreInt64(&c.servers[i].delayJitter, 0) - } - chaos = 0 - } -} diff --git a/pkg/net/rpc/warden/balancer/wrr/README.md b/pkg/net/rpc/warden/balancer/wrr/README.md deleted file mode 100644 index 9483e71dd..000000000 --- a/pkg/net/rpc/warden/balancer/wrr/README.md +++ /dev/null @@ -1,5 +0,0 @@ -#### warden/balancer/wrr - -##### 项目简介 - -warden 的 weighted round robin负载均衡模块,主要用于为每个RPC请求返回一个Server节点以供调用 diff --git a/pkg/net/rpc/warden/balancer/wrr/wrr.go b/pkg/net/rpc/warden/balancer/wrr/wrr.go deleted file mode 100644 index 605f74010..000000000 --- a/pkg/net/rpc/warden/balancer/wrr/wrr.go +++ /dev/null @@ -1,302 +0,0 @@ -package wrr - -import ( - "context" - "math" - "strconv" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/log" - nmd "github.com/go-kratos/kratos/pkg/net/metadata" - wmeta "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/metadata" - "github.com/go-kratos/kratos/pkg/stat/metric" -) - -var _ base.PickerBuilder = &wrrPickerBuilder{} -var _ balancer.Picker = &wrrPicker{} - -// var dwrrFeature feature.Feature = "dwrr" - -// Name is the name of round_robin balancer. -const Name = "wrr" - -// newBuilder creates a new weighted-roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &wrrPickerBuilder{}) -} - -func init() { - //feature.DefaultGate.Add(map[feature.Feature]feature.Spec{ - // dwrrFeature: {Default: false}, - //}) - - balancer.Register(newBuilder()) -} - -type serverInfo struct { - cpu int64 - success uint64 // float64 bits -} - -type subConn struct { - conn balancer.SubConn - addr resolver.Address - meta wmeta.MD - - err metric.RollingCounter - latency metric.RollingGauge - si serverInfo - // effective weight - ewt int64 - // current weight - cwt int64 - // last score - score float64 -} - -func (c *subConn) errSummary() (err int64, req int64) { - c.err.Reduce(func(iterator metric.Iterator) float64 { - for iterator.Next() { - bucket := iterator.Bucket() - req += bucket.Count - for _, p := range bucket.Points { - err += int64(p) - } - } - return 0 - }) - return -} - -func (c *subConn) latencySummary() (latency float64, count int64) { - c.latency.Reduce(func(iterator metric.Iterator) float64 { - for iterator.Next() { - bucket := iterator.Bucket() - count += bucket.Count - for _, p := range bucket.Points { - latency += p - } - } - return 0 - }) - return latency / float64(count), count -} - -// statistics is info for log -type statistics struct { - addr string - ewt int64 - cs float64 - ss float64 - latency float64 - cpu float64 - req int64 -} - -// Stats is grpc Interceptor for client to collect server stats -func Stats() grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) (err error) { - var ( - trailer metadata.MD - md nmd.MD - ok bool - ) - if md, ok = nmd.FromContext(ctx); !ok { - md = nmd.MD{} - } else { - md = md.Copy() - } - ctx = nmd.NewContext(ctx, md) - opts = append(opts, grpc.Trailer(&trailer)) - - err = invoker(ctx, method, req, reply, cc, opts...) - - conn, ok := md["conn"].(*subConn) - if !ok { - return - } - if strs, ok := trailer[wmeta.CPUUsage]; ok { - if cpu, err2 := strconv.ParseInt(strs[0], 10, 64); err2 == nil && cpu > 0 { - atomic.StoreInt64(&conn.si.cpu, cpu) - } - } - return - } -} - -type wrrPickerBuilder struct{} - -func (*wrrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { - p := &wrrPicker{ - colors: make(map[string]*wrrPicker), - } - for addr, sc := range readySCs { - meta, ok := addr.Metadata.(wmeta.MD) - if !ok { - meta = wmeta.MD{ - Weight: 10, - } - } - subc := &subConn{ - conn: sc, - addr: addr, - - meta: meta, - ewt: int64(meta.Weight), - score: -1, - - err: metric.NewRollingCounter(metric.RollingCounterOpts{ - Size: 10, - BucketDuration: time.Millisecond * 100, - }), - latency: metric.NewRollingGauge(metric.RollingGaugeOpts{ - Size: 10, - BucketDuration: time.Millisecond * 100, - }), - - si: serverInfo{cpu: 500, success: math.Float64bits(1)}, - } - if meta.Color == "" { - p.subConns = append(p.subConns, subc) - continue - } - // if color not empty, use color picker - cp, ok := p.colors[meta.Color] - if !ok { - cp = &wrrPicker{} - p.colors[meta.Color] = cp - } - cp.subConns = append(cp.subConns, subc) - } - return p -} - -type wrrPicker struct { - // subConns is the snapshot of the weighted-roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []*subConn - colors map[string]*wrrPicker - updateAt int64 - - mu sync.Mutex -} - -func (p *wrrPicker) Pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { - // FIXME refactor to unify the color logic - color := nmd.String(ctx, nmd.Color) - if color == "" && env.Color != "" { - color = env.Color - } - if color != "" { - if cp, ok := p.colors[color]; ok { - return cp.pick(ctx, opts) - } - } - return p.pick(ctx, opts) -} - -func (p *wrrPicker) pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { - var ( - conn *subConn - totalWeight int64 - ) - if len(p.subConns) <= 0 { - return nil, nil, balancer.ErrNoSubConnAvailable - } - p.mu.Lock() - // nginx wrr load balancing algorithm: http://blog.csdn.net/zhangskd/article/details/50194069 - for _, sc := range p.subConns { - totalWeight += sc.ewt - sc.cwt += sc.ewt - if conn == nil || conn.cwt < sc.cwt { - conn = sc - } - } - conn.cwt -= totalWeight - p.mu.Unlock() - start := time.Now() - if cmd, ok := nmd.FromContext(ctx); ok { - cmd["conn"] = conn - } - //if !feature.DefaultGate.Enabled(dwrrFeature) { - // return conn.conn, nil, nil - //} - return conn.conn, func(di balancer.DoneInfo) { - ev := int64(0) // error value ,if error set 1 - if di.Err != nil { - if st, ok := status.FromError(di.Err); ok { - // only counter the local grpc error, ignore any business error - if st.Code() != codes.Unknown && st.Code() != codes.OK { - ev = 1 - } - } - } - conn.err.Add(ev) - - now := time.Now() - conn.latency.Add(now.Sub(start).Nanoseconds() / 1e5) - u := atomic.LoadInt64(&p.updateAt) - if now.UnixNano()-u < int64(time.Second) { - return - } - if !atomic.CompareAndSwapInt64(&p.updateAt, u, now.UnixNano()) { - return - } - var ( - stats = make([]statistics, len(p.subConns)) - count int - total float64 - ) - for i, conn := range p.subConns { - cpu := float64(atomic.LoadInt64(&conn.si.cpu)) - ss := math.Float64frombits(atomic.LoadUint64(&conn.si.success)) - errc, req := conn.errSummary() - lagv, lagc := conn.latencySummary() - - if req > 0 && lagc > 0 && lagv > 0 { - // client-side success ratio - cs := 1 - (float64(errc) / float64(req)) - if cs <= 0 { - cs = 0.1 - } else if cs <= 0.2 && req <= 5 { - cs = 0.2 - } - conn.score = math.Sqrt((cs * ss * ss * 1e9) / (lagv * cpu)) - stats[i] = statistics{cs: cs, ss: ss, latency: lagv, cpu: cpu, req: req} - } - stats[i].addr = conn.addr.Addr - - if conn.score > 0 { - total += conn.score - count++ - } - } - // count must be greater than 1,otherwise will lead ewt to 0 - if count < 2 { - return - } - avgscore := total / float64(count) - p.mu.Lock() - for i, conn := range p.subConns { - if conn.score <= 0 { - conn.score = avgscore - } - conn.ewt = int64(conn.score * float64(conn.meta.Weight)) - stats[i].ewt = conn.ewt - } - p.mu.Unlock() - log.Info("warden wrr(%s): %+v", conn.addr.ServerName, stats) - }, nil -} diff --git a/pkg/net/rpc/warden/balancer/wrr/wrr_test.go b/pkg/net/rpc/warden/balancer/wrr/wrr_test.go deleted file mode 100644 index e7cc1cda1..000000000 --- a/pkg/net/rpc/warden/balancer/wrr/wrr_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package wrr - -import ( - "context" - "fmt" - "testing" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/go-kratos/kratos/pkg/conf/env" - nmd "github.com/go-kratos/kratos/pkg/net/metadata" - wmeta "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/metadata" - "github.com/go-kratos/kratos/pkg/stat/metric" - - "github.com/stretchr/testify/assert" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/resolver" -) - -type testSubConn struct { - addr resolver.Address -} - -func (s *testSubConn) UpdateAddresses([]resolver.Address) { - -} - -// Connect starts the connecting for this SubConn. -func (s *testSubConn) Connect() { - fmt.Println(s.addr.Addr) -} - -func TestBalancerPick(t *testing.T) { - scs := map[resolver.Address]balancer.SubConn{} - sc1 := &testSubConn{ - addr: resolver.Address{ - Addr: "test1", - Metadata: wmeta.MD{ - Weight: 8, - }, - }, - } - sc2 := &testSubConn{ - addr: resolver.Address{ - Addr: "test2", - Metadata: wmeta.MD{ - Weight: 4, - Color: "red", - }, - }, - } - sc3 := &testSubConn{ - addr: resolver.Address{ - Addr: "test3", - Metadata: wmeta.MD{ - Weight: 2, - Color: "red", - }, - }, - } - scs[sc1.addr] = sc1 - scs[sc2.addr] = sc2 - scs[sc3.addr] = sc3 - b := &wrrPickerBuilder{} - picker := b.Build(scs) - res := []string{"test1", "test1", "test1", "test1"} - for i := 0; i < 3; i++ { - conn, _, err := picker.Pick(context.Background(), balancer.PickInfo{}) - if err != nil { - t.Fatalf("picker.Pick failed!idx:=%d", i) - } - sc := conn.(*testSubConn) - if sc.addr.Addr != res[i] { - t.Fatalf("the subconn picked(%s),but expected(%s)", sc.addr.Addr, res[i]) - } - } - res2 := []string{"test2", "test3", "test2", "test2", "test3", "test2"} - ctx := nmd.NewContext(context.Background(), nmd.New(map[string]interface{}{"color": "red"})) - for i := 0; i < 6; i++ { - conn, _, err := picker.Pick(ctx, balancer.PickInfo{}) - if err != nil { - t.Fatalf("picker.Pick failed!idx:=%d", i) - } - sc := conn.(*testSubConn) - if sc.addr.Addr != res2[i] { - t.Fatalf("the (%d) subconn picked(%s),but expected(%s)", i, sc.addr.Addr, res2[i]) - } - } - ctx = nmd.NewContext(context.Background(), nmd.New(map[string]interface{}{"color": "black"})) - for i := 0; i < 4; i++ { - conn, _, err := picker.Pick(ctx, balancer.PickInfo{}) - if err != nil { - t.Fatalf("picker.Pick failed!idx:=%d", i) - } - sc := conn.(*testSubConn) - if sc.addr.Addr != res[i] { - t.Fatalf("the (%d) subconn picked(%s),but expected(%s)", i, sc.addr.Addr, res[i]) - } - } - - // test for env color - ctx = context.Background() - env.Color = "red" - for i := 0; i < 6; i++ { - conn, _, err := picker.Pick(ctx, balancer.PickInfo{}) - if err != nil { - t.Fatalf("picker.Pick failed!idx:=%d", i) - } - sc := conn.(*testSubConn) - if sc.addr.Addr != res2[i] { - t.Fatalf("the (%d) subconn picked(%s),but expected(%s)", i, sc.addr.Addr, res2[i]) - } - } -} - -func TestBalancerDone(t *testing.T) { - scs := map[resolver.Address]balancer.SubConn{} - sc1 := &testSubConn{ - addr: resolver.Address{ - Addr: "test1", - Metadata: wmeta.MD{ - Weight: 8, - }, - }, - } - scs[sc1.addr] = sc1 - b := &wrrPickerBuilder{} - picker := b.Build(scs) - - _, done, _ := picker.Pick(context.Background(), balancer.PickInfo{}) - time.Sleep(100 * time.Millisecond) - done(balancer.DoneInfo{Err: status.Errorf(codes.Unknown, "test")}) - err, req := picker.(*wrrPicker).subConns[0].errSummary() - assert.Equal(t, int64(0), err) - assert.Equal(t, int64(1), req) - - latency, count := picker.(*wrrPicker).subConns[0].latencySummary() - expectLatency := float64(100*time.Millisecond) / 1e5 - if latency < expectLatency || latency > (expectLatency+500) { - t.Fatalf("latency is less than 100ms or greater than 150ms, %f", latency) - } - assert.Equal(t, int64(1), count) - - _, done, _ = picker.Pick(context.Background(), balancer.PickInfo{}) - done(balancer.DoneInfo{Err: status.Errorf(codes.Aborted, "test")}) - err, req = picker.(*wrrPicker).subConns[0].errSummary() - assert.Equal(t, int64(1), err) - assert.Equal(t, int64(2), req) -} - -func TestErrSummary(t *testing.T) { - sc := &subConn{ - err: metric.NewRollingCounter(metric.RollingCounterOpts{ - Size: 10, - BucketDuration: time.Millisecond * 100, - }), - latency: metric.NewRollingGauge(metric.RollingGaugeOpts{ - Size: 10, - BucketDuration: time.Millisecond * 100, - }), - } - for i := 0; i < 10; i++ { - sc.err.Add(0) - sc.err.Add(1) - } - err, req := sc.errSummary() - assert.Equal(t, int64(10), err) - assert.Equal(t, int64(20), req) -} - -func TestLatencySummary(t *testing.T) { - sc := &subConn{ - err: metric.NewRollingCounter(metric.RollingCounterOpts{ - Size: 10, - BucketDuration: time.Millisecond * 100, - }), - latency: metric.NewRollingGauge(metric.RollingGaugeOpts{ - Size: 10, - BucketDuration: time.Millisecond * 100, - }), - } - for i := 1; i <= 100; i++ { - sc.latency.Add(int64(i)) - } - latency, count := sc.latencySummary() - assert.Equal(t, 50.50, latency) - assert.Equal(t, int64(100), count) -} diff --git a/pkg/net/rpc/warden/client.go b/pkg/net/rpc/warden/client.go deleted file mode 100644 index 995a907d3..000000000 --- a/pkg/net/rpc/warden/client.go +++ /dev/null @@ -1,381 +0,0 @@ -package warden - -import ( - "context" - "fmt" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/go-kratos/kratos/pkg/net/rpc/warden/resolver" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/resolver/direct" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/conf/flagvar" - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/naming" - nmd "github.com/go-kratos/kratos/pkg/net/metadata" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/balancer/p2c" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/status" - "github.com/go-kratos/kratos/pkg/net/trace" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - gstatus "google.golang.org/grpc/status" -) - -var _grpcTarget flagvar.StringVars - -var ( - _once sync.Once - _defaultCliConf = &ClientConfig{ - Dial: xtime.Duration(time.Second * 10), - Timeout: xtime.Duration(time.Millisecond * 250), - Subset: 50, - KeepAliveInterval: xtime.Duration(time.Second * 60), - KeepAliveTimeout: xtime.Duration(time.Second * 20), - } - _defaultClient *Client -) - -func baseMetadata() metadata.MD { - gmd := metadata.MD{nmd.Caller: []string{env.AppID}} - if env.Color != "" { - gmd[nmd.Color] = []string{env.Color} - } - return gmd -} - -// Register direct resolver by default to handle direct:// scheme. -func init() { - resolver.Register(direct.New()) -} - -// ClientConfig is rpc client conf. -type ClientConfig struct { - Dial xtime.Duration - Timeout xtime.Duration - Breaker *breaker.Config - Method map[string]*ClientConfig - Clusters []string - Zone string - Subset int - NonBlock bool - KeepAliveInterval xtime.Duration - KeepAliveTimeout xtime.Duration - KeepAliveWithoutStream bool -} - -// Client is the framework's client side instance, it contains the ctx, opt and interceptors. -// Create an instance of Client, by using NewClient(). -type Client struct { - conf *ClientConfig - breaker *breaker.Group - mutex sync.RWMutex - - opts []grpc.DialOption - handlers []grpc.UnaryClientInterceptor -} - -// TimeoutCallOption timeout option. -type TimeoutCallOption struct { - *grpc.EmptyCallOption - Timeout time.Duration -} - -// WithTimeoutCallOption can override the timeout in ctx and the timeout in the configuration file -func WithTimeoutCallOption(timeout time.Duration) *TimeoutCallOption { - return &TimeoutCallOption{&grpc.EmptyCallOption{}, timeout} -} - -// handle returns a new unary client interceptor for OpenTracing\Logging\LinkTimeout. -func (c *Client) handle() grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) (err error) { - var ( - ok bool - t trace.Trace - gmd metadata.MD - conf *ClientConfig - cancel context.CancelFunc - addr string - p peer.Peer - ) - var ec ecode.Codes = ecode.OK - // apm tracing - if t, ok = trace.FromContext(ctx); ok { - t = t.Fork("", method) - defer t.Finish(&err) - } - - // setup metadata - gmd = baseMetadata() - trace.Inject(t, trace.GRPCFormat, gmd) - c.mutex.RLock() - if conf, ok = c.conf.Method[method]; !ok { - conf = c.conf - } - c.mutex.RUnlock() - brk := c.breaker.Get(method) - if err = brk.Allow(); err != nil { - _metricClientReqCodeTotal.Inc(method, "breaker") - return - } - defer onBreaker(brk, &err) - var timeOpt *TimeoutCallOption - for _, opt := range opts { - var tok bool - timeOpt, tok = opt.(*TimeoutCallOption) - if tok { - break - } - } - if timeOpt != nil && timeOpt.Timeout > 0 { - ctx, cancel = context.WithTimeout(nmd.WithContext(ctx), timeOpt.Timeout) - } else { - _, ctx, cancel = conf.Timeout.Shrink(ctx) - } - - defer cancel() - nmd.Range(ctx, - func(key string, value interface{}) { - if valstr, ok := value.(string); ok { - gmd[key] = []string{valstr} - } - }, - nmd.IsOutgoingKey) - // merge with old matadata if exists - if oldmd, ok := metadata.FromOutgoingContext(ctx); ok { - gmd = metadata.Join(gmd, oldmd) - } - ctx = metadata.NewOutgoingContext(ctx, gmd) - - opts = append(opts, grpc.Peer(&p)) - if err = invoker(ctx, method, req, reply, cc, opts...); err != nil { - gst, _ := gstatus.FromError(err) - ec = status.ToEcode(gst) - err = errors.WithMessage(ec, gst.Message()) - } - if p.Addr != nil { - addr = p.Addr.String() - } - if t != nil { - t.SetTag(trace.String(trace.TagAddress, addr), trace.String(trace.TagComment, "")) - } - return - } -} - -func onBreaker(breaker breaker.Breaker, err *error) { - if err != nil && *err != nil { - if ecode.EqualError(ecode.ServerErr, *err) || ecode.EqualError(ecode.ServiceUnavailable, *err) || ecode.EqualError(ecode.Deadline, *err) || ecode.EqualError(ecode.LimitExceed, *err) { - breaker.MarkFailed() - return - } - } - breaker.MarkSuccess() -} - -// NewConn will create a grpc conn by default config. -func NewConn(target string, opt ...grpc.DialOption) (*grpc.ClientConn, error) { - return DefaultClient().Dial(context.Background(), target, opt...) -} - -// NewClient returns a new blank Client instance with a default client interceptor. -// opt can be used to add grpc dial options. -func NewClient(conf *ClientConfig, opt ...grpc.DialOption) *Client { - c := new(Client) - if err := c.SetConfig(conf); err != nil { - panic(err) - } - c.UseOpt(grpc.WithBalancerName(p2c.Name)) - c.UseOpt(opt...) - return c -} - -// DefaultClient returns a new default Client instance with a default client interceptor and default dialoption. -// opt can be used to add grpc dial options. -func DefaultClient() *Client { - _once.Do(func() { - _defaultClient = NewClient(nil) - }) - return _defaultClient -} - -// SetConfig hot reloads client config -func (c *Client) SetConfig(conf *ClientConfig) (err error) { - if conf == nil { - conf = _defaultCliConf - } - if conf.Dial <= 0 { - conf.Dial = xtime.Duration(time.Second * 10) - } - if conf.Timeout <= 0 { - conf.Timeout = xtime.Duration(time.Millisecond * 250) - } - if conf.Subset <= 0 { - conf.Subset = 50 - } - if conf.KeepAliveInterval <= 0 { - conf.KeepAliveInterval = xtime.Duration(time.Second * 60) - } - if conf.KeepAliveTimeout <= 0 { - conf.KeepAliveTimeout = xtime.Duration(time.Second * 20) - } - - // FIXME(maojian) check Method dial/timeout - c.mutex.Lock() - c.conf = conf - if c.breaker == nil { - c.breaker = breaker.NewGroup(conf.Breaker) - } else { - c.breaker.Reload(conf.Breaker) - } - c.mutex.Unlock() - return nil -} - -// Use attachs a global inteceptor to the Client. -// For example, this is the right place for a circuit breaker or error management inteceptor. -func (c *Client) Use(handlers ...grpc.UnaryClientInterceptor) *Client { - finalSize := len(c.handlers) + len(handlers) - if finalSize >= int(_abortIndex) { - panic("warden: client use too many handlers") - } - mergedHandlers := make([]grpc.UnaryClientInterceptor, finalSize) - copy(mergedHandlers, c.handlers) - copy(mergedHandlers[len(c.handlers):], handlers) - c.handlers = mergedHandlers - return c -} - -// UseOpt attachs a global grpc DialOption to the Client. -func (c *Client) UseOpt(opts ...grpc.DialOption) *Client { - c.opts = append(c.opts, opts...) - return c -} - -func (c *Client) cloneOpts() []grpc.DialOption { - dialOptions := make([]grpc.DialOption, len(c.opts)) - copy(dialOptions, c.opts) - return dialOptions -} - -func (c *Client) dial(ctx context.Context, target string, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) { - dialOptions := c.cloneOpts() - if !c.conf.NonBlock { - dialOptions = append(dialOptions, grpc.WithBlock()) - } - dialOptions = append(dialOptions, grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: time.Duration(c.conf.KeepAliveInterval), - Timeout: time.Duration(c.conf.KeepAliveTimeout), - PermitWithoutStream: !c.conf.KeepAliveWithoutStream, - })) - dialOptions = append(dialOptions, opts...) - - // init default handler - var handlers []grpc.UnaryClientInterceptor - handlers = append(handlers, c.recovery()) - handlers = append(handlers, clientLogging(dialOptions...)) - handlers = append(handlers, c.handlers...) - // NOTE: c.handle must be a last interceptor. - handlers = append(handlers, c.handle()) - - dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(chainUnaryClient(handlers))) - c.mutex.RLock() - conf := c.conf - c.mutex.RUnlock() - if conf.Dial > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(conf.Dial)) - defer cancel() - } - if u, e := url.Parse(target); e == nil { - v := u.Query() - for _, c := range c.conf.Clusters { - v.Add(naming.MetaCluster, c) - } - if c.conf.Zone != "" { - v.Add(naming.MetaZone, c.conf.Zone) - } - if v.Get("subset") == "" && c.conf.Subset > 0 { - v.Add("subset", strconv.FormatInt(int64(c.conf.Subset), 10)) - } - u.RawQuery = v.Encode() - // 比较_grpcTarget中的appid是否等于u.path中的appid,并替换成mock的地址 - for _, t := range _grpcTarget { - strs := strings.SplitN(t, "=", 2) - if len(strs) == 2 && ("/"+strs[0]) == u.Path { - u.Path = "/" + strs[1] - u.Scheme = "passthrough" - u.RawQuery = "" - break - } - } - target = u.String() - } - if conn, err = grpc.DialContext(ctx, target, dialOptions...); err != nil { - fmt.Fprintf(os.Stderr, "warden client: dial %s error %v!", target, err) - } - err = errors.WithStack(err) - return -} - -// Dial creates a client connection to the given target. -// Target format is scheme://authority/endpoint?query_arg=value -// example: discovery://default/account.account.service?cluster=shfy01&cluster=shfy02 -func (c *Client) Dial(ctx context.Context, target string, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) { - opts = append(opts, grpc.WithInsecure()) - return c.dial(ctx, target, opts...) -} - -// DialTLS creates a client connection over tls transport to the given target. -func (c *Client) DialTLS(ctx context.Context, target string, file string, name string, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) { - var creds credentials.TransportCredentials - creds, err = credentials.NewClientTLSFromFile(file, name) - if err != nil { - err = errors.WithStack(err) - return - } - opts = append(opts, grpc.WithTransportCredentials(creds)) - return c.dial(ctx, target, opts...) -} - -// chainUnaryClient creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryClient(one, two, three) will execute one before two before three. -func chainUnaryClient(handlers []grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { - n := len(handlers) - if n == 0 { - return func(ctx context.Context, method string, req, reply interface{}, - cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - return invoker(ctx, method, req, reply, cc, opts...) - } - } - - return func(ctx context.Context, method string, req, reply interface{}, - cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - var ( - i int - chainHandler grpc.UnaryInvoker - ) - chainHandler = func(ictx context.Context, imethod string, ireq, ireply interface{}, ic *grpc.ClientConn, iopts ...grpc.CallOption) error { - if i == n-1 { - return invoker(ictx, imethod, ireq, ireply, ic, iopts...) - } - i++ - return handlers[i](ictx, imethod, ireq, ireply, ic, chainHandler, iopts...) - } - - return handlers[0](ctx, method, req, reply, cc, chainHandler, opts...) - } -} diff --git a/pkg/net/rpc/warden/client_test.go b/pkg/net/rpc/warden/client_test.go deleted file mode 100644 index 96be46368..000000000 --- a/pkg/net/rpc/warden/client_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package warden - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" -) - -func TestChainUnaryClient(t *testing.T) { - var orders []string - factory := func(name string) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - orders = append(orders, name+"-in") - err := invoker(ctx, method, req, reply, cc, opts...) - orders = append(orders, name+"-out") - return err - } - } - handlers := []grpc.UnaryClientInterceptor{factory("h1"), factory("h2"), factory("h3")} - interceptor := chainUnaryClient(handlers) - interceptor(context.Background(), "test", nil, nil, nil, func(context.Context, string, interface{}, interface{}, *grpc.ClientConn, ...grpc.CallOption) error { - return nil - }) - assert.Equal(t, []string{ - "h1-in", - "h2-in", - "h3-in", - "h3-out", - "h2-out", - "h1-out", - }, orders) -} diff --git a/pkg/net/rpc/warden/exapmle_test.go b/pkg/net/rpc/warden/exapmle_test.go deleted file mode 100644 index 0425d70dd..000000000 --- a/pkg/net/rpc/warden/exapmle_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package warden_test - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - pb "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/proto/testproto" - xtime "github.com/go-kratos/kratos/pkg/time" - - "google.golang.org/grpc" -) - -type helloServer struct { -} - -func (s *helloServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { - return &pb.HelloReply{Message: "Hello " + in.Name, Success: true}, nil -} - -func (s *helloServer) StreamHello(ss pb.Greeter_StreamHelloServer) error { - for i := 0; i < 3; i++ { - in, err := ss.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - ret := &pb.HelloReply{Message: "Hello " + in.Name, Success: true} - err = ss.Send(ret) - if err != nil { - return err - } - } - return nil -} - -func ExampleServer() { - s := warden.NewServer(&warden.ServerConfig{Timeout: xtime.Duration(time.Second), Addr: ":8080"}) - // apply server interceptor middleware - s.Use(func(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - newctx, cancel := context.WithTimeout(ctx, time.Second*10) - defer cancel() - resp, err := handler(newctx, req) - return resp, err - }) - pb.RegisterGreeterServer(s.Server(), &helloServer{}) - s.Start() -} - -func ExampleClient() { - client := warden.NewClient(&warden.ClientConfig{ - Dial: xtime.Duration(time.Second * 10), - Timeout: xtime.Duration(time.Second * 10), - Breaker: &breaker.Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - K: 1.5, - Request: 20, - }, - }) - // apply client interceptor middleware - client.Use(func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) (ret error) { - newctx, cancel := context.WithTimeout(ctx, time.Second*5) - defer cancel() - ret = invoker(newctx, method, req, reply, cc, opts...) - return - }) - conn, err := client.Dial(context.Background(), "127.0.0.1:8080") - if err != nil { - log.Error("did not connect: %v", err) - return - } - defer conn.Close() - - c := pb.NewGreeterClient(conn) - name := "2233" - rp, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name, Age: 18}) - if err != nil { - log.Error("could not greet: %v", err) - return - } - fmt.Println("rp", *rp) -} diff --git a/pkg/net/rpc/warden/internal/benchmark/bench/client/client.go b/pkg/net/rpc/warden/internal/benchmark/bench/client/client.go deleted file mode 100644 index b7bc5fc8b..000000000 --- a/pkg/net/rpc/warden/internal/benchmark/bench/client/client.go +++ /dev/null @@ -1,186 +0,0 @@ -package main - -import ( - "flag" - "log" - "reflect" - "sync" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/benchmark/bench/proto" - xtime "github.com/go-kratos/kratos/pkg/time" - - goproto "github.com/gogo/protobuf/proto" - "github.com/montanaflynn/stats" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -const ( - iws = 65535 * 1000 - iwsc = 65535 * 10000 - readBuffer = 32 * 1024 - writeBuffer = 32 * 1024 -) - -var concurrency = flag.Int("c", 50, "concurrency") -var total = flag.Int("t", 500000, "total requests for all clients") -var host = flag.String("s", "127.0.0.1:8972", "server ip and port") -var isWarden = flag.Bool("w", true, "is warden or grpc client") -var strLen = flag.Int("l", 600, "the length of the str") - -func wardenCli() proto.HelloClient { - log.Println("start warden cli") - client := warden.NewClient(&warden.ClientConfig{ - Dial: xtime.Duration(time.Second * 10), - Timeout: xtime.Duration(time.Second * 10), - Breaker: &breaker.Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 20, - K: 1.5, - }, - }, - grpc.WithInitialWindowSize(iws), - grpc.WithInitialConnWindowSize(iwsc), - grpc.WithReadBufferSize(readBuffer), - grpc.WithWriteBufferSize(writeBuffer)) - conn, err := client.Dial(context.Background(), *host) - if err != nil { - log.Fatalf("did not connect: %v", err) - } - cli := proto.NewHelloClient(conn) - return cli -} - -func grpcCli() proto.HelloClient { - log.Println("start grpc cli") - conn, err := grpc.Dial(*host, grpc.WithInsecure(), - grpc.WithInitialWindowSize(iws), - grpc.WithInitialConnWindowSize(iwsc), - grpc.WithReadBufferSize(readBuffer), - grpc.WithWriteBufferSize(writeBuffer)) - if err != nil { - log.Fatalf("did not connect: %v", err) - } - cli := proto.NewHelloClient(conn) - return cli -} - -func main() { - flag.Parse() - c := *concurrency - m := *total / c - var wg sync.WaitGroup - wg.Add(c) - log.Printf("concurrency: %d\nrequests per client: %d\n\n", c, m) - - args := prepareArgs() - b, _ := goproto.Marshal(args) - log.Printf("message size: %d bytes\n\n", len(b)) - - var trans uint64 - var transOK uint64 - d := make([][]int64, c) - for i := 0; i < c; i++ { - dt := make([]int64, 0, m) - d = append(d, dt) - } - var cli proto.HelloClient - if *isWarden { - cli = wardenCli() - } else { - cli = grpcCli() - } - //warmup - cli.Say(context.Background(), args) - - totalT := time.Now().UnixNano() - for i := 0; i < c; i++ { - go func(i int) { - for j := 0; j < m; j++ { - t := time.Now().UnixNano() - reply, err := cli.Say(context.Background(), args) - t = time.Now().UnixNano() - t - d[i] = append(d[i], t) - if err == nil && reply.Field1 == "OK" { - atomic.AddUint64(&transOK, 1) - } - atomic.AddUint64(&trans, 1) - } - wg.Done() - }(i) - } - wg.Wait() - - totalT = time.Now().UnixNano() - totalT - totalT = totalT / 1e6 - log.Printf("took %d ms for %d requests\n", totalT, *total) - totalD := make([]int64, 0, *total) - for _, k := range d { - totalD = append(totalD, k...) - } - totalD2 := make([]float64, 0, *total) - for _, k := range totalD { - totalD2 = append(totalD2, float64(k)) - } - - mean, _ := stats.Mean(totalD2) - median, _ := stats.Median(totalD2) - max, _ := stats.Max(totalD2) - min, _ := stats.Min(totalD2) - tp99, _ := stats.Percentile(totalD2, 99) - tp999, _ := stats.Percentile(totalD2, 99.9) - - log.Printf("sent requests : %d\n", *total) - log.Printf("received requests_OK : %d\n", atomic.LoadUint64(&transOK)) - log.Printf("throughput (TPS) : %d\n", int64(c*m)*1000/totalT) - log.Printf("mean: %v ms, median: %v ms, max: %v ms, min: %v ms, p99: %v ms, p999:%v ms\n", mean/1e6, median/1e6, max/1e6, min/1e6, tp99/1e6, tp999/1e6) -} - -func prepareArgs() *proto.BenchmarkMessage { - b := true - var i int32 = 120000 - var i64 int64 = 98765432101234 - var s = "许多往事在眼前一幕一幕,变的那麼模糊" - repeat := *strLen / (8 * 54) - if repeat == 0 { - repeat = 1 - } - var str string - for i := 0; i < repeat; i++ { - str += s - } - var args proto.BenchmarkMessage - - v := reflect.ValueOf(&args).Elem() - num := v.NumField() - for k := 0; k < num; k++ { - field := v.Field(k) - if field.Type().Kind() == reflect.Ptr { - switch v.Field(k).Type().Elem().Kind() { - case reflect.Int, reflect.Int32: - field.Set(reflect.ValueOf(&i)) - case reflect.Int64: - field.Set(reflect.ValueOf(&i64)) - case reflect.Bool: - field.Set(reflect.ValueOf(&b)) - case reflect.String: - field.Set(reflect.ValueOf(&str)) - } - } else { - switch field.Kind() { - case reflect.Int, reflect.Int32, reflect.Int64: - field.SetInt(9876543) - case reflect.Bool: - field.SetBool(true) - case reflect.String: - field.SetString(str) - } - } - } - return &args -} diff --git a/pkg/net/rpc/warden/internal/benchmark/bench/proto/hello.pb.go b/pkg/net/rpc/warden/internal/benchmark/bench/proto/hello.pb.go deleted file mode 100644 index 0bd9167ed..000000000 --- a/pkg/net/rpc/warden/internal/benchmark/bench/proto/hello.pb.go +++ /dev/null @@ -1,1686 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: hello.proto - -/* - Package grpc is a generated protocol buffer package. - - It is generated from these files: - hello.proto - - It has these top-level messages: - BenchmarkMessage -*/ -package proto - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import context "golang.org/x/net/context" -import grpc1 "google.golang.org/grpc" - -import binary "encoding/binary" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type BenchmarkMessage struct { - Field1 string `protobuf:"bytes,1,opt,name=field1,proto3" json:"field1,omitempty"` - Field9 string `protobuf:"bytes,9,opt,name=field9,proto3" json:"field9,omitempty"` - Field18 string `protobuf:"bytes,18,opt,name=field18,proto3" json:"field18,omitempty"` - Field80 bool `protobuf:"varint,80,opt,name=field80,proto3" json:"field80,omitempty"` - Field81 bool `protobuf:"varint,81,opt,name=field81,proto3" json:"field81,omitempty"` - Field2 int32 `protobuf:"varint,2,opt,name=field2,proto3" json:"field2,omitempty"` - Field3 int32 `protobuf:"varint,3,opt,name=field3,proto3" json:"field3,omitempty"` - Field280 int32 `protobuf:"varint,280,opt,name=field280,proto3" json:"field280,omitempty"` - Field6 int32 `protobuf:"varint,6,opt,name=field6,proto3" json:"field6,omitempty"` - Field22 int64 `protobuf:"varint,22,opt,name=field22,proto3" json:"field22,omitempty"` - Field4 string `protobuf:"bytes,4,opt,name=field4,proto3" json:"field4,omitempty"` - Field5 uint64 `protobuf:"fixed64,5,opt,name=field5,proto3" json:"field5,omitempty"` - Field59 bool `protobuf:"varint,59,opt,name=field59,proto3" json:"field59,omitempty"` - Field7 string `protobuf:"bytes,7,opt,name=field7,proto3" json:"field7,omitempty"` - Field16 int32 `protobuf:"varint,16,opt,name=field16,proto3" json:"field16,omitempty"` - Field130 int32 `protobuf:"varint,130,opt,name=field130,proto3" json:"field130,omitempty"` - Field12 bool `protobuf:"varint,12,opt,name=field12,proto3" json:"field12,omitempty"` - Field17 bool `protobuf:"varint,17,opt,name=field17,proto3" json:"field17,omitempty"` - Field13 bool `protobuf:"varint,13,opt,name=field13,proto3" json:"field13,omitempty"` - Field14 bool `protobuf:"varint,14,opt,name=field14,proto3" json:"field14,omitempty"` - Field104 int32 `protobuf:"varint,104,opt,name=field104,proto3" json:"field104,omitempty"` - Field100 int32 `protobuf:"varint,100,opt,name=field100,proto3" json:"field100,omitempty"` - Field101 int32 `protobuf:"varint,101,opt,name=field101,proto3" json:"field101,omitempty"` - Field102 string `protobuf:"bytes,102,opt,name=field102,proto3" json:"field102,omitempty"` - Field103 string `protobuf:"bytes,103,opt,name=field103,proto3" json:"field103,omitempty"` - Field29 int32 `protobuf:"varint,29,opt,name=field29,proto3" json:"field29,omitempty"` - Field30 bool `protobuf:"varint,30,opt,name=field30,proto3" json:"field30,omitempty"` - Field60 int32 `protobuf:"varint,60,opt,name=field60,proto3" json:"field60,omitempty"` - Field271 int32 `protobuf:"varint,271,opt,name=field271,proto3" json:"field271,omitempty"` - Field272 int32 `protobuf:"varint,272,opt,name=field272,proto3" json:"field272,omitempty"` - Field150 int32 `protobuf:"varint,150,opt,name=field150,proto3" json:"field150,omitempty"` - Field23 int32 `protobuf:"varint,23,opt,name=field23,proto3" json:"field23,omitempty"` - Field24 bool `protobuf:"varint,24,opt,name=field24,proto3" json:"field24,omitempty"` - Field25 int32 `protobuf:"varint,25,opt,name=field25,proto3" json:"field25,omitempty"` - Field78 bool `protobuf:"varint,78,opt,name=field78,proto3" json:"field78,omitempty"` - Field67 int32 `protobuf:"varint,67,opt,name=field67,proto3" json:"field67,omitempty"` - Field68 int32 `protobuf:"varint,68,opt,name=field68,proto3" json:"field68,omitempty"` - Field128 int32 `protobuf:"varint,128,opt,name=field128,proto3" json:"field128,omitempty"` - Field129 string `protobuf:"bytes,129,opt,name=field129,proto3" json:"field129,omitempty"` - Field131 int32 `protobuf:"varint,131,opt,name=field131,proto3" json:"field131,omitempty"` -} - -func (m *BenchmarkMessage) Reset() { *m = BenchmarkMessage{} } -func (m *BenchmarkMessage) String() string { return proto.CompactTextString(m) } -func (*BenchmarkMessage) ProtoMessage() {} -func (*BenchmarkMessage) Descriptor() ([]byte, []int) { return fileDescriptorHello, []int{0} } - -func init() { - proto.RegisterType((*BenchmarkMessage)(nil), "grpc.BenchmarkMessage") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc1.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc1.SupportPackageIsVersion4 - -// Client API for Hello service - -type HelloClient interface { - // Sends a greeting - Say(ctx context.Context, in *BenchmarkMessage, opts ...grpc1.CallOption) (*BenchmarkMessage, error) -} - -type helloClient struct { - cc *grpc1.ClientConn -} - -func NewHelloClient(cc *grpc1.ClientConn) HelloClient { - return &helloClient{cc} -} - -func (c *helloClient) Say(ctx context.Context, in *BenchmarkMessage, opts ...grpc1.CallOption) (*BenchmarkMessage, error) { - out := new(BenchmarkMessage) - err := grpc1.Invoke(ctx, "/grpc.Hello/Say", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Hello service - -type HelloServer interface { - // Sends a greeting - Say(context.Context, *BenchmarkMessage) (*BenchmarkMessage, error) -} - -func RegisterHelloServer(s *grpc1.Server, srv HelloServer) { - s.RegisterService(&_Hello_serviceDesc, srv) -} - -func _Hello_Say_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc1.UnaryServerInterceptor) (interface{}, error) { - in := new(BenchmarkMessage) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(HelloServer).Say(ctx, in) - } - info := &grpc1.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.Hello/Say", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(HelloServer).Say(ctx, req.(*BenchmarkMessage)) - } - return interceptor(ctx, in, info, handler) -} - -var _Hello_serviceDesc = grpc1.ServiceDesc{ - ServiceName: "grpc.Hello", - HandlerType: (*HelloServer)(nil), - Methods: []grpc1.MethodDesc{ - { - MethodName: "Say", - Handler: _Hello_Say_Handler, - }, - }, - Streams: []grpc1.StreamDesc{}, - Metadata: "hello.proto", -} - -func (m *BenchmarkMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BenchmarkMessage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Field1) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Field1))) - i += copy(dAtA[i:], m.Field1) - } - if m.Field2 != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field2)) - } - if m.Field3 != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field3)) - } - if len(m.Field4) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Field4))) - i += copy(dAtA[i:], m.Field4) - } - if m.Field5 != 0 { - dAtA[i] = 0x29 - i++ - binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Field5)) - i += 8 - } - if m.Field6 != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field6)) - } - if len(m.Field7) > 0 { - dAtA[i] = 0x3a - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Field7))) - i += copy(dAtA[i:], m.Field7) - } - if len(m.Field9) > 0 { - dAtA[i] = 0x4a - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Field9))) - i += copy(dAtA[i:], m.Field9) - } - if m.Field12 { - dAtA[i] = 0x60 - i++ - if m.Field12 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field13 { - dAtA[i] = 0x68 - i++ - if m.Field13 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field14 { - dAtA[i] = 0x70 - i++ - if m.Field14 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field16 != 0 { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field16)) - } - if m.Field17 { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if m.Field17 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Field18) > 0 { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Field18))) - i += copy(dAtA[i:], m.Field18) - } - if m.Field22 != 0 { - dAtA[i] = 0xb0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field22)) - } - if m.Field23 != 0 { - dAtA[i] = 0xb8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field23)) - } - if m.Field24 { - dAtA[i] = 0xc0 - i++ - dAtA[i] = 0x1 - i++ - if m.Field24 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field25 != 0 { - dAtA[i] = 0xc8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field25)) - } - if m.Field29 != 0 { - dAtA[i] = 0xe8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field29)) - } - if m.Field30 { - dAtA[i] = 0xf0 - i++ - dAtA[i] = 0x1 - i++ - if m.Field30 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field59 { - dAtA[i] = 0xd8 - i++ - dAtA[i] = 0x3 - i++ - if m.Field59 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field60 != 0 { - dAtA[i] = 0xe0 - i++ - dAtA[i] = 0x3 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field60)) - } - if m.Field67 != 0 { - dAtA[i] = 0x98 - i++ - dAtA[i] = 0x4 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field67)) - } - if m.Field68 != 0 { - dAtA[i] = 0xa0 - i++ - dAtA[i] = 0x4 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field68)) - } - if m.Field78 { - dAtA[i] = 0xf0 - i++ - dAtA[i] = 0x4 - i++ - if m.Field78 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field80 { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x5 - i++ - if m.Field80 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field81 { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x5 - i++ - if m.Field81 { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Field100 != 0 { - dAtA[i] = 0xa0 - i++ - dAtA[i] = 0x6 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field100)) - } - if m.Field101 != 0 { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x6 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field101)) - } - if len(m.Field102) > 0 { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x6 - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Field102))) - i += copy(dAtA[i:], m.Field102) - } - if len(m.Field103) > 0 { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x6 - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Field103))) - i += copy(dAtA[i:], m.Field103) - } - if m.Field104 != 0 { - dAtA[i] = 0xc0 - i++ - dAtA[i] = 0x6 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field104)) - } - if m.Field128 != 0 { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x8 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field128)) - } - if len(m.Field129) > 0 { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x8 - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Field129))) - i += copy(dAtA[i:], m.Field129) - } - if m.Field130 != 0 { - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x8 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field130)) - } - if m.Field131 != 0 { - dAtA[i] = 0x98 - i++ - dAtA[i] = 0x8 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field131)) - } - if m.Field150 != 0 { - dAtA[i] = 0xb0 - i++ - dAtA[i] = 0x9 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field150)) - } - if m.Field271 != 0 { - dAtA[i] = 0xf8 - i++ - dAtA[i] = 0x10 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field271)) - } - if m.Field272 != 0 { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x11 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field272)) - } - if m.Field280 != 0 { - dAtA[i] = 0xc0 - i++ - dAtA[i] = 0x11 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Field280)) - } - return i, nil -} - -func encodeVarintHello(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *BenchmarkMessage) Size() (n int) { - var l int - _ = l - l = len(m.Field1) - if l > 0 { - n += 1 + l + sovHello(uint64(l)) - } - if m.Field2 != 0 { - n += 1 + sovHello(uint64(m.Field2)) - } - if m.Field3 != 0 { - n += 1 + sovHello(uint64(m.Field3)) - } - l = len(m.Field4) - if l > 0 { - n += 1 + l + sovHello(uint64(l)) - } - if m.Field5 != 0 { - n += 9 - } - if m.Field6 != 0 { - n += 1 + sovHello(uint64(m.Field6)) - } - l = len(m.Field7) - if l > 0 { - n += 1 + l + sovHello(uint64(l)) - } - l = len(m.Field9) - if l > 0 { - n += 1 + l + sovHello(uint64(l)) - } - if m.Field12 { - n += 2 - } - if m.Field13 { - n += 2 - } - if m.Field14 { - n += 2 - } - if m.Field16 != 0 { - n += 2 + sovHello(uint64(m.Field16)) - } - if m.Field17 { - n += 3 - } - l = len(m.Field18) - if l > 0 { - n += 2 + l + sovHello(uint64(l)) - } - if m.Field22 != 0 { - n += 2 + sovHello(uint64(m.Field22)) - } - if m.Field23 != 0 { - n += 2 + sovHello(uint64(m.Field23)) - } - if m.Field24 { - n += 3 - } - if m.Field25 != 0 { - n += 2 + sovHello(uint64(m.Field25)) - } - if m.Field29 != 0 { - n += 2 + sovHello(uint64(m.Field29)) - } - if m.Field30 { - n += 3 - } - if m.Field59 { - n += 3 - } - if m.Field60 != 0 { - n += 2 + sovHello(uint64(m.Field60)) - } - if m.Field67 != 0 { - n += 2 + sovHello(uint64(m.Field67)) - } - if m.Field68 != 0 { - n += 2 + sovHello(uint64(m.Field68)) - } - if m.Field78 { - n += 3 - } - if m.Field80 { - n += 3 - } - if m.Field81 { - n += 3 - } - if m.Field100 != 0 { - n += 2 + sovHello(uint64(m.Field100)) - } - if m.Field101 != 0 { - n += 2 + sovHello(uint64(m.Field101)) - } - l = len(m.Field102) - if l > 0 { - n += 2 + l + sovHello(uint64(l)) - } - l = len(m.Field103) - if l > 0 { - n += 2 + l + sovHello(uint64(l)) - } - if m.Field104 != 0 { - n += 2 + sovHello(uint64(m.Field104)) - } - if m.Field128 != 0 { - n += 2 + sovHello(uint64(m.Field128)) - } - l = len(m.Field129) - if l > 0 { - n += 2 + l + sovHello(uint64(l)) - } - if m.Field130 != 0 { - n += 2 + sovHello(uint64(m.Field130)) - } - if m.Field131 != 0 { - n += 2 + sovHello(uint64(m.Field131)) - } - if m.Field150 != 0 { - n += 2 + sovHello(uint64(m.Field150)) - } - if m.Field271 != 0 { - n += 2 + sovHello(uint64(m.Field271)) - } - if m.Field272 != 0 { - n += 2 + sovHello(uint64(m.Field272)) - } - if m.Field280 != 0 { - n += 2 + sovHello(uint64(m.Field280)) - } - return n -} - -func sovHello(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozHello(x uint64) (n int) { - return sovHello(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *BenchmarkMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BenchmarkMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BenchmarkMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field1", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field1 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field2", wireType) - } - m.Field2 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field2 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field3", wireType) - } - m.Field3 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field3 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field4", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field4 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Field5", wireType) - } - m.Field5 = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.Field5 = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field6", wireType) - } - m.Field6 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field6 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field7", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field7 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field9", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field9 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field12", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field12 = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field13", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field13 = bool(v != 0) - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field14", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field14 = bool(v != 0) - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field16", wireType) - } - m.Field16 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field16 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field17", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field17 = bool(v != 0) - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field18", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field18 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field22", wireType) - } - m.Field22 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field22 |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field23", wireType) - } - m.Field23 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field23 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field24", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field24 = bool(v != 0) - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field25", wireType) - } - m.Field25 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field25 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 29: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field29", wireType) - } - m.Field29 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field29 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 30: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field30", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field30 = bool(v != 0) - case 59: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field59", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field59 = bool(v != 0) - case 60: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field60", wireType) - } - m.Field60 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field60 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 67: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field67", wireType) - } - m.Field67 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field67 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 68: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field68", wireType) - } - m.Field68 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field68 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 78: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field78", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field78 = bool(v != 0) - case 80: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field80", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field80 = bool(v != 0) - case 81: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field81", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Field81 = bool(v != 0) - case 100: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field100", wireType) - } - m.Field100 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field100 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 101: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field101", wireType) - } - m.Field101 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field101 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 102: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field102", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field102 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 103: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field103", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field103 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 104: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field104", wireType) - } - m.Field104 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field104 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 128: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field128", wireType) - } - m.Field128 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field128 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 129: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field129", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field129 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 130: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field130", wireType) - } - m.Field130 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field130 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 131: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field131", wireType) - } - m.Field131 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field131 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 150: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field150", wireType) - } - m.Field150 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field150 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 271: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field271", wireType) - } - m.Field271 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field271 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 272: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field272", wireType) - } - m.Field272 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field272 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 280: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Field280", wireType) - } - m.Field280 = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Field280 |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipHello(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthHello - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipHello(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHello - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHello - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHello - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthHello - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHello - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipHello(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthHello = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHello = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("hello.proto", fileDescriptorHello) } - -var fileDescriptorHello = []byte{ - // 495 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd4, 0x4d, 0x6e, 0xd3, 0x40, - 0x14, 0x07, 0xf0, 0x4c, 0xdb, 0xa4, 0xa9, 0xf9, 0x50, 0xf1, 0x22, 0xfc, 0x09, 0xc2, 0x8a, 0xba, - 0xca, 0x86, 0x74, 0x3c, 0x63, 0x7b, 0xc6, 0x82, 0x05, 0x2a, 0x2c, 0xba, 0x01, 0x41, 0x38, 0x41, - 0x92, 0x3a, 0x4e, 0x45, 0x8a, 0xab, 0xb4, 0x5d, 0xb0, 0xe3, 0xe3, 0x00, 0xb0, 0x42, 0x3d, 0x08, - 0x87, 0xe8, 0xb2, 0x47, 0xa0, 0xe1, 0x22, 0xa8, 0x4e, 0x3c, 0xef, 0x19, 0x89, 0x9d, 0xdf, 0xff, - 0xa7, 0xf7, 0xe6, 0x69, 0x2c, 0x8d, 0x77, 0x67, 0x96, 0xcd, 0xe7, 0xc5, 0xe0, 0x74, 0x51, 0x9c, - 0x17, 0xfe, 0x56, 0xbe, 0x38, 0x9d, 0x74, 0x9f, 0xe6, 0xc7, 0xe7, 0xb3, 0x8b, 0xf1, 0x60, 0x52, - 0x9c, 0xec, 0xe7, 0x45, 0x5e, 0xec, 0x97, 0x38, 0xbe, 0x98, 0x96, 0x55, 0x59, 0x94, 0x5f, 0xab, - 0xa6, 0xbd, 0x5f, 0x6d, 0x6f, 0xf7, 0x20, 0xfb, 0x38, 0x99, 0x9d, 0x8c, 0x16, 0x1f, 0x5e, 0x67, - 0x67, 0x67, 0xa3, 0x3c, 0xf3, 0x3b, 0x5e, 0x6b, 0x7a, 0x9c, 0xcd, 0x8f, 0x42, 0x88, 0x9e, 0xe8, - 0xef, 0x0c, 0xd7, 0x95, 0xcb, 0x15, 0x36, 0x7a, 0xa2, 0xdf, 0x5c, 0xe7, 0xca, 0xe5, 0x1a, 0x9b, - 0x2c, 0xd7, 0x2e, 0x8f, 0xb0, 0xc5, 0xe6, 0x44, 0x2e, 0x8f, 0xd1, 0xec, 0x89, 0x7e, 0x6b, 0x9d, - 0xc7, 0x2e, 0x4f, 0xd0, 0x62, 0x73, 0x12, 0x97, 0x1b, 0x6c, 0xb3, 0x39, 0xc6, 0xe5, 0x29, 0x76, - 0x58, 0x9e, 0xfa, 0xf0, 0xb6, 0x57, 0x1b, 0x2b, 0xdc, 0xed, 0x89, 0x7e, 0x7b, 0x58, 0x95, 0x24, - 0x1a, 0xf7, 0xb8, 0x68, 0x92, 0x08, 0xf7, 0xb9, 0x44, 0x24, 0x09, 0x76, 0xcb, 0xb5, 0xaa, 0x92, - 0xc4, 0xe0, 0x01, 0xef, 0x31, 0x24, 0x16, 0x7e, 0xb9, 0x5a, 0x55, 0x3a, 0x51, 0x0a, 0x9d, 0x9e, - 0xe8, 0x6f, 0x0e, 0xab, 0x92, 0x44, 0xe3, 0x21, 0x3b, 0x47, 0xd1, 0x6e, 0x2a, 0x02, 0xd8, 0x39, - 0x8a, 0x76, 0x53, 0x31, 0x1e, 0xf1, 0x9e, 0x98, 0x24, 0xc5, 0x13, 0x2e, 0x74, 0x3b, 0x5a, 0x22, - 0x60, 0xd3, 0xb4, 0x74, 0x12, 0xa7, 0x78, 0xc6, 0x24, 0xa6, 0x9e, 0x44, 0xe2, 0x39, 0x9b, 0x96, - 0x50, 0x4f, 0x62, 0xf0, 0x92, 0x0b, 0xdd, 0x41, 0x62, 0xf1, 0x8a, 0x0b, 0xdd, 0x81, 0xb1, 0x78, - 0xc3, 0xce, 0x31, 0x24, 0x56, 0xe2, 0x2d, 0x13, 0x4b, 0xe7, 0xd8, 0x10, 0xef, 0xb8, 0x84, 0x7e, - 0xd7, 0x6b, 0xaf, 0x2e, 0x57, 0x4a, 0x1c, 0x95, 0x07, 0xb9, 0x9a, 0x59, 0x88, 0xac, 0x66, 0xbc, - 0x4f, 0x61, 0x5a, 0xfe, 0x24, 0x57, 0x33, 0xd3, 0xc8, 0x6b, 0xa6, 0x99, 0x45, 0x98, 0xd5, 0x66, - 0x46, 0xfe, 0xe3, 0xca, 0x94, 0xc5, 0x67, 0xc1, 0x51, 0x59, 0x86, 0x29, 0xbe, 0x08, 0x3e, 0x55, - 0xa5, 0x84, 0x5a, 0xe2, 0x6b, 0xad, 0x53, 0x4b, 0x86, 0x21, 0xbe, 0xd5, 0x31, 0x24, 0x8c, 0x25, - 0x7e, 0xd6, 0x30, 0xa6, 0x4e, 0x65, 0x42, 0x7c, 0xdf, 0x60, 0xa8, 0x4c, 0xc8, 0x50, 0xe1, 0x47, - 0x1d, 0x15, 0xa1, 0x95, 0xb8, 0xac, 0xa1, 0x95, 0xea, 0x85, 0xd7, 0x3c, 0xbc, 0x7d, 0x7a, 0x7c, - 0xe3, 0x6d, 0xbe, 0x1f, 0x7d, 0xf2, 0x3b, 0x83, 0xdb, 0xc7, 0x67, 0xf0, 0xef, 0x4b, 0xd2, 0xfd, - 0x4f, 0xbe, 0xd7, 0x38, 0xe8, 0x5e, 0xdd, 0x04, 0x8d, 0xeb, 0x9b, 0xa0, 0x71, 0xb5, 0x0c, 0xc4, - 0xf5, 0x32, 0x10, 0xbf, 0x97, 0x81, 0xb8, 0xfc, 0x13, 0x34, 0x0e, 0xc5, 0xb8, 0x55, 0xbe, 0x4d, - 0xfa, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf2, 0xed, 0x6f, 0xb7, 0xdf, 0x04, 0x00, 0x00, -} diff --git a/pkg/net/rpc/warden/internal/benchmark/bench/proto/hello.proto b/pkg/net/rpc/warden/internal/benchmark/bench/proto/hello.proto deleted file mode 100644 index cb40908ca..000000000 --- a/pkg/net/rpc/warden/internal/benchmark/bench/proto/hello.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; -package proto; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option optimize_for = SPEED; -option (gogoproto.goproto_enum_prefix_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; - -service Hello { - // Sends a greeting - rpc Say (BenchmarkMessage) returns (BenchmarkMessage) {} -} - - -message BenchmarkMessage { - string field1 = 1; - string field9 = 9; - string field18 = 18; - bool field80 = 80; - bool field81 = 81; - int32 field2 = 2; - int32 field3 = 3; - int32 field280 = 280; - int32 field6 = 6; - int64 field22 = 22; - string field4 = 4; - fixed64 field5 = 5; - bool field59 = 59; - string field7 = 7; - int32 field16 = 16; - int32 field130 = 130; - bool field12 = 12; - bool field17 = 17; - bool field13 = 13; - bool field14 = 14; - int32 field104 = 104; - int32 field100 = 100; - int32 field101 = 101; - string field102 = 102; - string field103 = 103; - int32 field29 = 29; - bool field30 = 30; - int32 field60 = 60; - int32 field271 = 271; - int32 field272 = 272; - int32 field150 = 150; - int32 field23 = 23; - bool field24 = 24 ; - int32 field25 = 25 ; - bool field78 = 78; - int32 field67 = 67; - int32 field68 = 68; - int32 field128 = 128; - string field129 = 129; - int32 field131 = 131; -} \ No newline at end of file diff --git a/pkg/net/rpc/warden/internal/benchmark/bench/server/server.go b/pkg/net/rpc/warden/internal/benchmark/bench/server/server.go deleted file mode 100644 index 755718eed..000000000 --- a/pkg/net/rpc/warden/internal/benchmark/bench/server/server.go +++ /dev/null @@ -1,103 +0,0 @@ -package main - -import ( - "context" - "flag" - "log" - "net" - "net/http" - _ "net/http/pprof" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/benchmark/bench/proto" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "google.golang.org/grpc" -) - -const ( - iws = 65535 * 1000 - iwsc = 65535 * 10000 - readBuffer = 32 * 1024 - writeBuffer = 32 * 1024 -) - -var reqNum uint64 - -type Hello struct{} - -func (t *Hello) Say(ctx context.Context, args *proto.BenchmarkMessage) (reply *proto.BenchmarkMessage, err error) { - s := "OK" - var i int32 = 100 - args.Field1 = s - args.Field2 = i - atomic.AddUint64(&reqNum, 1) - return args, nil -} - -var host = flag.String("s", "0.0.0.0:8972", "listened ip and port") -var isWarden = flag.Bool("w", true, "is warden or grpc client") - -func main() { - go func() { - log.Println("run http at :6060") - http.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) { - h := promhttp.Handler() - h.ServeHTTP(w, r) - }) - log.Println(http.ListenAndServe("0.0.0.0:6060", nil)) - }() - - flag.Parse() - - go stat() - if *isWarden { - runWarden() - } else { - runGrpc() - } -} - -func runGrpc() { - log.Println("run grpc") - lis, err := net.Listen("tcp", *host) - if err != nil { - log.Fatalf("failed to listen: %v", err) - } - s := grpc.NewServer(grpc.InitialWindowSize(iws), - grpc.InitialConnWindowSize(iwsc), - grpc.ReadBufferSize(readBuffer), - grpc.WriteBufferSize(writeBuffer)) - proto.RegisterHelloServer(s, &Hello{}) - s.Serve(lis) -} - -func runWarden() { - log.Println("run warden") - s := warden.NewServer(&warden.ServerConfig{Timeout: xtime.Duration(time.Second * 3)}, - grpc.InitialWindowSize(iws), - grpc.InitialConnWindowSize(iwsc), - grpc.ReadBufferSize(readBuffer), - grpc.WriteBufferSize(writeBuffer)) - proto.RegisterHelloServer(s.Server(), &Hello{}) - s.Run(*host) -} - -func stat() { - ticker := time.NewTicker(time.Second * 5) - defer ticker.Stop() - var last uint64 - lastTs := uint64(time.Now().UnixNano()) - for { - <-ticker.C - now := atomic.LoadUint64(&reqNum) - nowTs := uint64(time.Now().UnixNano()) - qps := (now - last) * 1e6 / ((nowTs - lastTs) / 1e3) - last = now - lastTs = nowTs - log.Println("qps:", qps) - } -} diff --git a/pkg/net/rpc/warden/internal/benchmark/helloworld/client.sh b/pkg/net/rpc/warden/internal/benchmark/helloworld/client.sh deleted file mode 100755 index 3490e7139..000000000 --- a/pkg/net/rpc/warden/internal/benchmark/helloworld/client.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -go build -o client greeter_client.go -echo size 100 concurrent 30 -./client -s 100 -c 30 -echo size 1000 concurrent 30 -./client -s 1000 -c 30 -echo size 10000 concurrent 30 -./client -s 10000 -c 30 -echo size 100 concurrent 300 -./client -s 100 -c 300 -echo size 1000 concurrent 300 -./client -s 1000 -c 300 -echo size 10000 concurrent 300 -./client -s 10000 -c 300 -rm client \ No newline at end of file diff --git a/pkg/net/rpc/warden/internal/benchmark/helloworld/client/greeter_client.go b/pkg/net/rpc/warden/internal/benchmark/helloworld/client/greeter_client.go deleted file mode 100644 index fad9a8d6d..000000000 --- a/pkg/net/rpc/warden/internal/benchmark/helloworld/client/greeter_client.go +++ /dev/null @@ -1,83 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - pb "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/proto/testproto" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -var ( - ccf = &warden.ClientConfig{ - Dial: xtime.Duration(time.Second * 10), - Timeout: xtime.Duration(time.Second * 10), - Breaker: &breaker.Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 20, - K: 1.5, - }, - } - cli pb.GreeterClient - wg sync.WaitGroup - reqSize int - concurrency int - request int - all int64 -) - -func init() { - flag.IntVar(&reqSize, "s", 10, "request size") - flag.IntVar(&concurrency, "c", 10, "concurrency") - flag.IntVar(&request, "r", 1000, "request per routine") -} - -func main() { - flag.Parse() - name := randSeq(reqSize) - cli = newClient() - for i := 0; i < concurrency; i++ { - wg.Add(1) - go sayHello(&pb.HelloRequest{Name: name}) - } - wg.Wait() - fmt.Printf("per request cost %v\n", all/int64(request*concurrency)) -} - -func sayHello(in *pb.HelloRequest) { - defer wg.Done() - now := time.Now() - for i := 0; i < request; i++ { - cli.SayHello(context.TODO(), in) - } - delta := time.Since(now) - atomic.AddInt64(&all, int64(delta/time.Millisecond)) -} - -var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -func randSeq(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] - } - return string(b) -} - -func newClient() (cli pb.GreeterClient) { - client := warden.NewClient(ccf) - conn, err := client.Dial(context.TODO(), "127.0.0.1:9999") - if err != nil { - return - } - cli = pb.NewGreeterClient(conn) - return -} diff --git a/pkg/net/rpc/warden/internal/benchmark/helloworld/server/greeter_server.go b/pkg/net/rpc/warden/internal/benchmark/helloworld/server/greeter_server.go deleted file mode 100644 index 7e621deb6..000000000 --- a/pkg/net/rpc/warden/internal/benchmark/helloworld/server/greeter_server.go +++ /dev/null @@ -1,49 +0,0 @@ -package main - -import ( - "context" - "net/http" - "time" - - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - pb "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/proto/testproto" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -var ( - config = &warden.ServerConfig{Timeout: xtime.Duration(time.Second)} -) - -func main() { - newServer() -} - -type hello struct { -} - -func (s *hello) SayHello(c context.Context, in *pb.HelloRequest) (out *pb.HelloReply, err error) { - out = new(pb.HelloReply) - out.Message = in.Name - return -} - -func (s *hello) StreamHello(ss pb.Greeter_StreamHelloServer) error { - return nil -} -func newServer() { - server := warden.NewServer(config) - pb.RegisterGreeterServer(server.Server(), &hello{}) - go func() { - http.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) { - h := promhttp.Handler() - h.ServeHTTP(w, r) - }) - http.ListenAndServe("0.0.0.0:9998", nil) - }() - err := server.Run(":9999") - if err != nil { - return - } -} diff --git a/pkg/net/rpc/warden/internal/encoding/json/json.go b/pkg/net/rpc/warden/internal/encoding/json/json.go deleted file mode 100644 index dd1f320d5..000000000 --- a/pkg/net/rpc/warden/internal/encoding/json/json.go +++ /dev/null @@ -1,53 +0,0 @@ -package codec - -import ( - "bytes" - "encoding/json" - - "github.com/gogo/protobuf/jsonpb" - "github.com/gogo/protobuf/proto" - "google.golang.org/grpc/encoding" -) - -//Reference https://jbrandhorst.com/post/grpc-json/ -func init() { - encoding.RegisterCodec(JSON{ - Marshaler: jsonpb.Marshaler{ - EmitDefaults: true, - OrigName: true, - }, - }) -} - -// JSON is impl of encoding.Codec -type JSON struct { - jsonpb.Marshaler - jsonpb.Unmarshaler -} - -// Name is name of JSON -func (j JSON) Name() string { - return "json" -} - -// Marshal is json marshal -func (j JSON) Marshal(v interface{}) (out []byte, err error) { - if pm, ok := v.(proto.Message); ok { - b := new(bytes.Buffer) - err := j.Marshaler.Marshal(b, pm) - if err != nil { - return nil, err - } - return b.Bytes(), nil - } - return json.Marshal(v) -} - -// Unmarshal is json unmarshal -func (j JSON) Unmarshal(data []byte, v interface{}) (err error) { - if pm, ok := v.(proto.Message); ok { - b := bytes.NewBuffer(data) - return j.Unmarshaler.Unmarshal(b, pm) - } - return json.Unmarshal(data, v) -} diff --git a/pkg/net/rpc/warden/internal/examples/client/client.go b/pkg/net/rpc/warden/internal/examples/client/client.go deleted file mode 100644 index 54bd46b37..000000000 --- a/pkg/net/rpc/warden/internal/examples/client/client.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - pb "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/proto/testproto" -) - -// usage: ./client -grpc.target=test.service=127.0.0.1:9000 -func main() { - log.Init(&log.Config{Stdout: true}) - flag.Parse() - conn, err := warden.NewClient(nil).Dial(context.Background(), "direct://default/127.0.0.1:9000") - if err != nil { - panic(err) - } - cli := pb.NewGreeterClient(conn) - normalCall(cli) -} - -func normalCall(cli pb.GreeterClient) { - reply, err := cli.SayHello(context.Background(), &pb.HelloRequest{Name: "tom", Age: 23}) - if err != nil { - panic(err) - } - fmt.Println("get reply:", *reply) -} diff --git a/pkg/net/rpc/warden/internal/examples/grpcDebug/client.go b/pkg/net/rpc/warden/internal/examples/grpcDebug/client.go deleted file mode 100644 index d2f1f232c..000000000 --- a/pkg/net/rpc/warden/internal/examples/grpcDebug/client.go +++ /dev/null @@ -1,191 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "math/rand" - "net/http" - "os" - "strings" - - "github.com/gogo/protobuf/jsonpb" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/encoding" -) - -// Reply for test -type Reply struct { - res []byte -} - -type Discovery struct { - HttpClient *http.Client - Nodes []string -} - -var ( - data string - file string - method string - addr string - tlsCert string - tlsServerName string - appID string - env string -) - -//Reference https://jbrandhorst.com/post/grpc-json/ -func init() { - encoding.RegisterCodec(JSON{ - Marshaler: jsonpb.Marshaler{ - EmitDefaults: true, - OrigName: true, - }, - }) - flag.StringVar(&data, "data", `{"name":"longxia","age":19}`, `{"name":"longxia","age":19}`) - flag.StringVar(&file, "file", ``, `./data.json`) - flag.StringVar(&method, "method", "/testproto.Greeter/SayHello", `/testproto.Greeter/SayHello`) - flag.StringVar(&addr, "addr", "127.0.0.1:8080", `127.0.0.1:8080`) - flag.StringVar(&tlsCert, "cert", "", `./cert.pem`) - flag.StringVar(&tlsServerName, "server_name", "", `hello_server`) - flag.StringVar(&appID, "appid", "", `appid`) - flag.StringVar(&env, "env", "", `env`) -} - -// 该example因为使用的是json传输格式所以只能用于调试或测试,用于线上会导致性能下降 -// 使用方法: -// ./grpcDebug -data='{"name":"xia","age":19}' -addr=127.0.0.1:8080 -method=/testproto.Greeter/SayHello -// ./grpcDebug -file=data.json -addr=127.0.0.1:8080 -method=/testproto.Greeter/SayHello -// DEPLOY_ENV=uat ./grpcDebug -appid=main.community.reply-service -method=/reply.service.v1.Reply/ReplyInfoCache -data='{"rp_id"=1493769244}' -func main() { - flag.Parse() - opts := []grpc.DialOption{ - grpc.WithInsecure(), - grpc.WithDefaultCallOptions(grpc.CallContentSubtype(JSON{}.Name())), - } - if tlsCert != "" { - creds, err := credentials.NewClientTLSFromFile(tlsCert, tlsServerName) - if err != nil { - panic(err) - } - opts = append(opts, grpc.WithTransportCredentials(creds)) - } - if file != "" { - content, err := ioutil.ReadFile(file) - if err != nil { - fmt.Printf("ioutil.ReadFile(%s) error(%v)\n", file, err) - os.Exit(1) - } - if len(content) > 0 { - data = string(content) - } - } - if appID != "" { - addr = ipFromDiscovery(appID, env) - } - conn, err := grpc.Dial(addr, opts...) - if err != nil { - panic(err) - } - var reply Reply - err = grpc.Invoke(context.Background(), method, []byte(data), &reply, conn) - if err != nil { - panic(err) - } - fmt.Println(string(reply.res)) -} - -func ipFromDiscovery(appID, env string) string { - d := &Discovery{ - Nodes: []string{"discovery.bilibili.co", "api.bilibili.co"}, - HttpClient: http.DefaultClient, - } - deployEnv := os.Getenv("DEPLOY_ENV") - if deployEnv != "" { - env = deployEnv - } - return d.addr(appID, env, d.nodes()) -} - -func (d *Discovery) nodes() (addrs []string) { - res := new(struct { - Code int `json:"code"` - Data []struct { - Addr string `json:"addr"` - } `json:"data"` - }) - resp, err := d.HttpClient.Get(fmt.Sprintf("http://%s/discovery/nodes", d.Nodes[rand.Intn(len(d.Nodes))])) - if err != nil { - panic(err) - } - defer resp.Body.Close() - if err = json.NewDecoder(resp.Body).Decode(&res); err != nil { - panic(err) - } - for _, data := range res.Data { - addrs = append(addrs, data.Addr) - } - return -} - -func (d *Discovery) addr(appID, env string, nodes []string) (ip string) { - res := new(struct { - Code int `json:"code"` - Message string `json:"message"` - Data map[string]*struct { - ZoneInstances map[string][]*struct { - AppID string `json:"appid"` - Addrs []string `json:"addrs"` - } `json:"zone_instances"` - } `json:"data"` - }) - host, _ := os.Hostname() - resp, err := d.HttpClient.Get(fmt.Sprintf("http://%s/discovery/polls?appid=%s&env=%s&hostname=%s", nodes[rand.Intn(len(nodes))], appID, env, host)) - if err != nil { - panic(err) - } - defer resp.Body.Close() - if err = json.NewDecoder(resp.Body).Decode(&res); err != nil { - panic(err) - } - for _, data := range res.Data { - for _, zoneInstance := range data.ZoneInstances { - for _, instance := range zoneInstance { - if instance.AppID == appID { - for _, addr := range instance.Addrs { - if strings.Contains(addr, "grpc://") { - return strings.Replace(addr, "grpc://", "", -1) - } - } - } - } - } - } - return -} - -// JSON is impl of encoding.Codec -type JSON struct { - jsonpb.Marshaler - jsonpb.Unmarshaler -} - -// Name is name of JSON -func (j JSON) Name() string { - return "json" -} - -// Marshal is json marshal -func (j JSON) Marshal(v interface{}) (out []byte, err error) { - return v.([]byte), nil -} - -// Unmarshal is json unmarshal -func (j JSON) Unmarshal(data []byte, v interface{}) (err error) { - v.(*Reply).res = data - return nil -} diff --git a/pkg/net/rpc/warden/internal/examples/grpcDebug/data.json b/pkg/net/rpc/warden/internal/examples/grpcDebug/data.json deleted file mode 100644 index 3e65f07e1..000000000 --- a/pkg/net/rpc/warden/internal/examples/grpcDebug/data.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"xia","age":19} \ No newline at end of file diff --git a/pkg/net/rpc/warden/internal/examples/server/main.go b/pkg/net/rpc/warden/internal/examples/server/main.go deleted file mode 100644 index 3c37abdb8..000000000 --- a/pkg/net/rpc/warden/internal/examples/server/main.go +++ /dev/null @@ -1,104 +0,0 @@ -package main - -import ( - "context" - "fmt" - "io" - "os" - "os/signal" - "syscall" - "time" - - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - pb "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/proto/testproto" - xtime "github.com/go-kratos/kratos/pkg/time" - - "google.golang.org/grpc" -) - -type helloServer struct { - addr string -} - -func (s *helloServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { - if in.Name == "err_detail_test" { - err, _ := ecode.Error(ecode.AccessDenied, "AccessDenied").WithDetails(&pb.HelloReply{Success: true, Message: "this is test detail"}) - return nil, err - } - return &pb.HelloReply{Message: fmt.Sprintf("hello %s from %s", in.Name, s.addr)}, nil -} - -func (s *helloServer) StreamHello(ss pb.Greeter_StreamHelloServer) error { - for i := 0; i < 3; i++ { - in, err := ss.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - ret := &pb.HelloReply{Message: "Hello " + in.Name, Success: true} - err = ss.Send(ret) - if err != nil { - return err - } - } - return nil -} - -func runServer(addr string) *warden.Server { - server := warden.NewServer(&warden.ServerConfig{ - //服务端每个请求的默认超时时间 - Timeout: xtime.Duration(time.Second), - }) - server.Use(middleware()) - pb.RegisterGreeterServer(server.Server(), &helloServer{addr: addr}) - go func() { - err := server.Run(addr) - if err != nil { - panic("run server failed!" + err.Error()) - } - }() - return server -} - -func main() { - log.Init(&log.Config{Stdout: true}) - server := runServer("0.0.0.0:8081") - signalHandler(server) -} - -//类似于中间件 -func middleware() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - //记录调用方法 - log.Info("method:%s", info.FullMethod) - //call chain - resp, err = handler(ctx, req) - return - } -} - -func signalHandler(s *warden.Server) { - var ( - ch = make(chan os.Signal, 1) - ) - signal.Notify(ch, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT) - for { - si := <-ch - switch si { - case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: - log.Info("get a signal %s, stop the consume process", si.String()) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() - //gracefully shutdown with timeout - s.Shutdown(ctx) - return - case syscall.SIGHUP: - default: - return - } - } -} diff --git a/pkg/net/rpc/warden/internal/metadata/metadata.go b/pkg/net/rpc/warden/internal/metadata/metadata.go deleted file mode 100644 index e1fe5ba78..000000000 --- a/pkg/net/rpc/warden/internal/metadata/metadata.go +++ /dev/null @@ -1,11 +0,0 @@ -package metadata - -const ( - CPUUsage = "cpu_usage" -) - -// MD is context metadata for balancer and resolver -type MD struct { - Weight uint64 - Color string -} diff --git a/pkg/net/rpc/warden/internal/proto/testproto/hello.pb.go b/pkg/net/rpc/warden/internal/proto/testproto/hello.pb.go deleted file mode 100644 index 9c9679b9e..000000000 --- a/pkg/net/rpc/warden/internal/proto/testproto/hello.pb.go +++ /dev/null @@ -1,642 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: hello.proto - -/* - Package testproto is a generated protocol buffer package. - - It is generated from these files: - hello.proto - - It has these top-level messages: - HelloRequest - HelloReply -*/ -package testproto - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The request message containing the user's name. -type HelloRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" validate:"required"` - Age int32 `protobuf:"varint,2,opt,name=age,proto3" json:"age" validate:"min=0"` -} - -func (m *HelloRequest) Reset() { *m = HelloRequest{} } -func (m *HelloRequest) String() string { return proto.CompactTextString(m) } -func (*HelloRequest) ProtoMessage() {} -func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptorHello, []int{0} } - -// The response message containing the greetings -type HelloReply struct { - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` - Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` -} - -func (m *HelloReply) Reset() { *m = HelloReply{} } -func (m *HelloReply) String() string { return proto.CompactTextString(m) } -func (*HelloReply) ProtoMessage() {} -func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptorHello, []int{1} } - -func init() { - proto.RegisterType((*HelloRequest)(nil), "testproto.HelloRequest") - proto.RegisterType((*HelloReply)(nil), "testproto.HelloReply") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Greeter service - -type GreeterClient interface { - // Sends a greeting - SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) - // A bidirectional streaming RPC call recvice HelloRequest return HelloReply - StreamHello(ctx context.Context, opts ...grpc.CallOption) (Greeter_StreamHelloClient, error) -} - -type greeterClient struct { - cc *grpc.ClientConn -} - -func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { - return &greeterClient{cc} -} - -func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { - out := new(HelloReply) - err := grpc.Invoke(ctx, "/testproto.Greeter/SayHello", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *greeterClient) StreamHello(ctx context.Context, opts ...grpc.CallOption) (Greeter_StreamHelloClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Greeter_serviceDesc.Streams[0], c.cc, "/testproto.Greeter/StreamHello", opts...) - if err != nil { - return nil, err - } - x := &greeterStreamHelloClient{stream} - return x, nil -} - -type Greeter_StreamHelloClient interface { - Send(*HelloRequest) error - Recv() (*HelloReply, error) - grpc.ClientStream -} - -type greeterStreamHelloClient struct { - grpc.ClientStream -} - -func (x *greeterStreamHelloClient) Send(m *HelloRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *greeterStreamHelloClient) Recv() (*HelloReply, error) { - m := new(HelloReply) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for Greeter service - -type GreeterServer interface { - // Sends a greeting - SayHello(context.Context, *HelloRequest) (*HelloReply, error) - // A bidirectional streaming RPC call recvice HelloRequest return HelloReply - StreamHello(Greeter_StreamHelloServer) error -} - -func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { - s.RegisterService(&_Greeter_serviceDesc, srv) -} - -func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HelloRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GreeterServer).SayHello(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/testproto.Greeter/SayHello", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Greeter_StreamHello_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(GreeterServer).StreamHello(&greeterStreamHelloServer{stream}) -} - -type Greeter_StreamHelloServer interface { - Send(*HelloReply) error - Recv() (*HelloRequest, error) - grpc.ServerStream -} - -type greeterStreamHelloServer struct { - grpc.ServerStream -} - -func (x *greeterStreamHelloServer) Send(m *HelloReply) error { - return x.ServerStream.SendMsg(m) -} - -func (x *greeterStreamHelloServer) Recv() (*HelloRequest, error) { - m := new(HelloRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Greeter_serviceDesc = grpc.ServiceDesc{ - ServiceName: "testproto.Greeter", - HandlerType: (*GreeterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SayHello", - Handler: _Greeter_SayHello_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamHello", - Handler: _Greeter_StreamHello_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "hello.proto", -} - -func (m *HelloRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HelloRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Age != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintHello(dAtA, i, uint64(m.Age)) - } - return i, nil -} - -func (m *HelloReply) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HelloReply) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Message) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintHello(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - } - if m.Success { - dAtA[i] = 0x10 - i++ - if m.Success { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func encodeVarintHello(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *HelloRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovHello(uint64(l)) - } - if m.Age != 0 { - n += 1 + sovHello(uint64(m.Age)) - } - return n -} - -func (m *HelloReply) Size() (n int) { - var l int - _ = l - l = len(m.Message) - if l > 0 { - n += 1 + l + sovHello(uint64(l)) - } - if m.Success { - n += 2 - } - return n -} - -func sovHello(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozHello(x uint64) (n int) { - return sovHello(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *HelloRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HelloRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HelloRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Age", wireType) - } - m.Age = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Age |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipHello(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthHello - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HelloReply) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HelloReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HelloReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHello - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHello - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Success = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipHello(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthHello - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipHello(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHello - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHello - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHello - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthHello - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHello - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipHello(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthHello = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHello = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("hello.proto", fileDescriptorHello) } - -var fileDescriptorHello = []byte{ - // 296 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0x3f, 0x4e, 0xc3, 0x30, - 0x14, 0xc6, 0x63, 0xfe, 0xb5, 0x75, 0x19, 0x90, 0x11, 0x22, 0x2a, 0x92, 0x53, 0x79, 0xca, 0xd2, - 0xb4, 0xa2, 0x1b, 0x02, 0x09, 0x85, 0x01, 0xe6, 0xf4, 0x04, 0x4e, 0xfa, 0x48, 0x23, 0x25, 0x75, - 0x6a, 0x3b, 0x48, 0xb9, 0x03, 0x07, 0xe0, 0x48, 0x1d, 0x7b, 0x82, 0x88, 0x86, 0xad, 0x63, 0x4f, - 0x80, 0x62, 0x28, 0x20, 0xb1, 0x75, 0x7b, 0x3f, 0x7f, 0xfa, 0x7e, 0x4f, 0x7e, 0xb8, 0x3b, 0x83, - 0x34, 0x15, 0x5e, 0x2e, 0x85, 0x16, 0xa4, 0xa3, 0x41, 0x69, 0x33, 0xf6, 0x06, 0x71, 0xa2, 0x67, - 0x45, 0xe8, 0x45, 0x22, 0x1b, 0xc6, 0x22, 0x16, 0x43, 0xf3, 0x1c, 0x16, 0xcf, 0x86, 0x0c, 0x98, - 0xe9, 0xab, 0xc9, 0x24, 0x3e, 0x7d, 0x6a, 0x44, 0x01, 0x2c, 0x0a, 0x50, 0x9a, 0x8c, 0xf1, 0xd1, - 0x9c, 0x67, 0x60, 0xa3, 0x3e, 0x72, 0x3b, 0xbe, 0xb3, 0xa9, 0x1c, 0xc3, 0xdb, 0xca, 0x39, 0x7f, - 0xe1, 0x69, 0x32, 0xe5, 0x1a, 0x6e, 0x98, 0x84, 0x45, 0x91, 0x48, 0x98, 0xb2, 0xc0, 0x84, 0x64, - 0x80, 0x0f, 0x79, 0x0c, 0xf6, 0x41, 0x1f, 0xb9, 0xc7, 0xfe, 0xd5, 0xa6, 0x72, 0x1a, 0xdc, 0x56, - 0xce, 0xd9, 0x6f, 0x25, 0x4b, 0xe6, 0x77, 0x23, 0x16, 0x34, 0x01, 0xbb, 0xc7, 0xf8, 0x7b, 0x67, - 0x9e, 0x96, 0xc4, 0xc6, 0xad, 0x0c, 0x94, 0x6a, 0x04, 0x66, 0x69, 0xb0, 0xc3, 0x26, 0x51, 0x45, - 0x14, 0x81, 0x52, 0x46, 0xdd, 0x0e, 0x76, 0x78, 0xfd, 0x8a, 0x70, 0xeb, 0x51, 0x02, 0x68, 0x90, - 0xe4, 0x16, 0xb7, 0x27, 0xbc, 0x34, 0x42, 0x72, 0xe9, 0xfd, 0x1c, 0xc2, 0xfb, 0xfb, 0xad, 0xde, - 0xc5, 0xff, 0x20, 0x4f, 0x4b, 0x66, 0x91, 0x07, 0xdc, 0x9d, 0x68, 0x09, 0x3c, 0xdb, 0x53, 0xe0, - 0xa2, 0x11, 0xf2, 0xed, 0xe5, 0x9a, 0x5a, 0xab, 0x35, 0xb5, 0x96, 0x35, 0x45, 0xab, 0x9a, 0xa2, - 0xf7, 0x9a, 0xa2, 0xb7, 0x0f, 0x6a, 0x85, 0x27, 0xa6, 0x31, 0xfe, 0x0c, 0x00, 0x00, 0xff, 0xff, - 0x13, 0x57, 0x88, 0x03, 0xae, 0x01, 0x00, 0x00, -} diff --git a/pkg/net/rpc/warden/internal/proto/testproto/hello.proto b/pkg/net/rpc/warden/internal/proto/testproto/hello.proto deleted file mode 100644 index 4fd6d24c3..000000000 --- a/pkg/net/rpc/warden/internal/proto/testproto/hello.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package testproto; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.goproto_enum_prefix_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.goproto_registration) = true; - -// The greeting service definition. -service Greeter { - // Sends a greeting - rpc SayHello (HelloRequest) returns (HelloReply) {} - - // A bidirectional streaming RPC call recvice HelloRequest return HelloReply - rpc StreamHello(stream HelloRequest) returns (stream HelloReply) {} -} - -// The request message containing the user's name. -message HelloRequest { - string name = 1 [(gogoproto.jsontag) = "name", (gogoproto.moretags) = "validate:\"required\""]; - int32 age = 2 [(gogoproto.jsontag) = "age", (gogoproto.moretags) = "validate:\"min=0\""]; -} - -// The response message containing the greetings -message HelloReply { - string message = 1; - bool success = 2; -} diff --git a/pkg/net/rpc/warden/internal/status/status.go b/pkg/net/rpc/warden/internal/status/status.go deleted file mode 100644 index 3e3b02b1e..000000000 --- a/pkg/net/rpc/warden/internal/status/status.go +++ /dev/null @@ -1,120 +0,0 @@ -package status - -import ( - "context" - "strconv" - - "github.com/go-kratos/kratos/pkg/ecode" - - "github.com/golang/protobuf/proto" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// togRPCCode convert ecode.Codo to gRPC code -func togRPCCode(code ecode.Codes) codes.Code { - switch code.Code() { - case ecode.OK.Code(): - return codes.OK - case ecode.RequestErr.Code(): - return codes.InvalidArgument - case ecode.NothingFound.Code(): - return codes.NotFound - case ecode.Unauthorized.Code(): - return codes.Unauthenticated - case ecode.AccessDenied.Code(): - return codes.PermissionDenied - case ecode.LimitExceed.Code(): - return codes.ResourceExhausted - case ecode.MethodNotAllowed.Code(): - return codes.Unimplemented - case ecode.Deadline.Code(): - return codes.DeadlineExceeded - case ecode.ServiceUnavailable.Code(): - return codes.Unavailable - } - return codes.Unknown -} - -func toECode(gst *status.Status) ecode.Code { - gcode := gst.Code() - switch gcode { - case codes.OK: - return ecode.OK - case codes.InvalidArgument: - return ecode.RequestErr - case codes.NotFound: - return ecode.NothingFound - case codes.PermissionDenied: - return ecode.AccessDenied - case codes.Unauthenticated: - return ecode.Unauthorized - case codes.ResourceExhausted: - return ecode.LimitExceed - case codes.Unimplemented: - return ecode.MethodNotAllowed - case codes.DeadlineExceeded: - return ecode.Deadline - case codes.Unavailable: - return ecode.ServiceUnavailable - case codes.Unknown: - return ecode.String(gst.Message()) - } - return ecode.ServerErr -} - -// FromError convert error for service reply and try to convert it to grpc.Status. -func FromError(svrErr error) (gst *status.Status) { - var err error - svrErr = errors.Cause(svrErr) - if code, ok := svrErr.(ecode.Codes); ok { - // TODO: deal with err - if gst, err = gRPCStatusFromEcode(code); err == nil { - return - } - } - // for some special error convert context.Canceled to ecode.Canceled, - // context.DeadlineExceeded to ecode.DeadlineExceeded only for raw error - // if err be wrapped will not effect. - switch svrErr { - case context.Canceled: - gst, _ = gRPCStatusFromEcode(ecode.Canceled) - case context.DeadlineExceeded: - gst, _ = gRPCStatusFromEcode(ecode.Deadline) - default: - gst, _ = status.FromError(svrErr) - } - return -} - -func gRPCStatusFromEcode(code ecode.Codes) (*status.Status, error) { - var st *ecode.Status - switch v := code.(type) { - case *ecode.Status: - st = v - case ecode.Code: - st = ecode.FromCode(v) - default: - st = ecode.Error(ecode.Code(code.Code()), code.Message()) - for _, detail := range code.Details() { - if msg, ok := detail.(proto.Message); ok { - st.WithDetails(msg) - } - } - } - gst := status.New(codes.Unknown, strconv.Itoa(st.Code())) - return gst.WithDetails(st.Proto()) -} - -// ToEcode convert grpc.status to ecode.Codes -func ToEcode(gst *status.Status) ecode.Codes { - details := gst.Details() - for _, detail := range details { - // convert detail to status only use first detail - if pb, ok := detail.(proto.Message); ok { - return ecode.FromProto(pb) - } - } - return toECode(gst) -} diff --git a/pkg/net/rpc/warden/internal/status/status_test.go b/pkg/net/rpc/warden/internal/status/status_test.go deleted file mode 100644 index 5d996419d..000000000 --- a/pkg/net/rpc/warden/internal/status/status_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package status - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/golang/protobuf/ptypes/timestamp" - pkgerr "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/go-kratos/kratos/pkg/ecode" -) - -func TestCodeConvert(t *testing.T) { - var table = map[codes.Code]ecode.Code{ - codes.OK: ecode.OK, - // codes.Canceled - codes.Unknown: ecode.ServerErr, - codes.InvalidArgument: ecode.RequestErr, - codes.DeadlineExceeded: ecode.Deadline, - codes.NotFound: ecode.NothingFound, - // codes.AlreadyExists - codes.PermissionDenied: ecode.AccessDenied, - codes.ResourceExhausted: ecode.LimitExceed, - // codes.FailedPrecondition - // codes.Aborted - // codes.OutOfRange - codes.Unimplemented: ecode.MethodNotAllowed, - codes.Unavailable: ecode.ServiceUnavailable, - // codes.DataLoss - codes.Unauthenticated: ecode.Unauthorized, - } - for k, v := range table { - assert.Equal(t, toECode(status.New(k, "-500")), v) - } - for k, v := range table { - assert.Equal(t, togRPCCode(v), k, fmt.Sprintf("togRPC code error: %d -> %d", v, k)) - } -} - -func TestNoDetailsConvert(t *testing.T) { - gst := status.New(codes.Unknown, "-2233") - assert.Equal(t, toECode(gst).Code(), -2233) - - gst = status.New(codes.Internal, "") - assert.Equal(t, toECode(gst).Code(), -500) -} - -func TestFromError(t *testing.T) { - t.Run("input general error", func(t *testing.T) { - err := errors.New("general error") - gst := FromError(err) - - assert.Equal(t, codes.Unknown, gst.Code()) - assert.Contains(t, gst.Message(), "general") - }) - t.Run("input wrap error", func(t *testing.T) { - err := pkgerr.Wrap(ecode.RequestErr, "hh") - gst := FromError(err) - - assert.Equal(t, "-400", gst.Message()) - }) - t.Run("input ecode.Code", func(t *testing.T) { - err := ecode.RequestErr - gst := FromError(err) - - //assert.Equal(t, codes.InvalidArgument, gst.Code()) - // NOTE: set all grpc.status as Unknown when error is ecode.Codes for compatible - assert.Equal(t, codes.Unknown, gst.Code()) - // NOTE: gst.Message == str(ecode.Code) for compatible php leagcy code - assert.Equal(t, err.Message(), gst.Message()) - }) - t.Run("input raw Canceled", func(t *testing.T) { - gst := FromError(context.Canceled) - - assert.Equal(t, codes.Unknown, gst.Code()) - assert.Equal(t, "-498", gst.Message()) - }) - t.Run("input raw DeadlineExceeded", func(t *testing.T) { - gst := FromError(context.DeadlineExceeded) - - assert.Equal(t, codes.Unknown, gst.Code()) - assert.Equal(t, "-504", gst.Message()) - }) - t.Run("input ecode.Status", func(t *testing.T) { - m := ×tamp.Timestamp{Seconds: time.Now().Unix()} - err, _ := ecode.Error(ecode.Unauthorized, "unauthorized").WithDetails(m) - gst := FromError(err) - - //assert.Equal(t, codes.Unauthenticated, gst.Code()) - // NOTE: set all grpc.status as Unknown when error is ecode.Codes for compatible - assert.Equal(t, codes.Unknown, gst.Code()) - assert.Len(t, gst.Details(), 1) - details := gst.Details() - assert.IsType(t, err.Proto(), details[0]) - }) -} - -func TestToEcode(t *testing.T) { - t.Run("input general grpc.Status", func(t *testing.T) { - gst := status.New(codes.Unknown, "unknown") - ec := ToEcode(gst) - - assert.Equal(t, int(ecode.ServerErr), ec.Code()) - assert.Equal(t, "-500", ec.Message()) - assert.Len(t, ec.Details(), 0) - }) - t.Run("input encode.Status", func(t *testing.T) { - m := ×tamp.Timestamp{Seconds: time.Now().Unix()} - st, _ := ecode.Errorf(ecode.Unauthorized, "Unauthorized").WithDetails(m) - gst := status.New(codes.InvalidArgument, "requesterr") - gst, _ = gst.WithDetails(st.Proto()) - ec := ToEcode(gst) - - assert.Equal(t, int(ecode.Unauthorized), ec.Code()) - assert.Equal(t, "Unauthorized", ec.Message()) - assert.Len(t, ec.Details(), 1) - assert.IsType(t, m, ec.Details()[0]) - }) -} diff --git a/pkg/net/rpc/warden/logging.go b/pkg/net/rpc/warden/logging.go deleted file mode 100644 index 5c93aea5c..000000000 --- a/pkg/net/rpc/warden/logging.go +++ /dev/null @@ -1,174 +0,0 @@ -package warden - -import ( - "context" - "fmt" - "strconv" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/peer" - - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/metadata" -) - -// Warden Log Flag -const ( - // disable all log. - LogFlagDisable = 1 << iota - // disable print args on log. - LogFlagDisableArgs - // disable info level log. - LogFlagDisableInfo -) - -type logOption struct { - grpc.EmptyDialOption - grpc.EmptyCallOption - flag int8 -} - -// WithLogFlag disable client access log. -func WithLogFlag(flag int8) grpc.CallOption { - return logOption{flag: flag} -} - -// WithDialLogFlag set client level log behaviour. -func WithDialLogFlag(flag int8) grpc.DialOption { - return logOption{flag: flag} -} - -func extractLogCallOption(opts []grpc.CallOption) (flag int8) { - for _, opt := range opts { - if logOpt, ok := opt.(logOption); ok { - return logOpt.flag - } - } - return -} - -func extractLogDialOption(opts []grpc.DialOption) (flag int8) { - for _, opt := range opts { - if logOpt, ok := opt.(logOption); ok { - return logOpt.flag - } - } - return -} - -func logFn(code int, dt time.Duration) func(context.Context, ...log.D) { - switch { - case code < 0: - return log.Errorv - case dt >= time.Millisecond*500: - // TODO: slowlog make it configurable. - return log.Warnv - case code > 0: - return log.Warnv - } - return log.Infov -} - -// clientLogging warden grpc logging -func clientLogging(dialOptions ...grpc.DialOption) grpc.UnaryClientInterceptor { - defaultFlag := extractLogDialOption(dialOptions) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - logFlag := extractLogCallOption(opts) | defaultFlag - - startTime := time.Now() - var peerInfo peer.Peer - opts = append(opts, grpc.Peer(&peerInfo)) - - // invoker requests - err := invoker(ctx, method, req, reply, cc, opts...) - - // after request - code := ecode.Cause(err).Code() - duration := time.Since(startTime) - // monitor - _metricClientReqDur.Observe(int64(duration/time.Millisecond), method) - _metricClientReqCodeTotal.Inc(method, strconv.Itoa(code)) - - if logFlag&LogFlagDisable != 0 { - return err - } - // TODO: find better way to deal with slow log. - if logFlag&LogFlagDisableInfo != 0 && err == nil && duration < 500*time.Millisecond { - return err - } - logFields := make([]log.D, 0, 7) - logFields = append(logFields, log.KVString("path", method)) - logFields = append(logFields, log.KVInt("ret", code)) - logFields = append(logFields, log.KVFloat64("ts", duration.Seconds())) - logFields = append(logFields, log.KVString("source", "grpc-access-log")) - if peerInfo.Addr != nil { - logFields = append(logFields, log.KVString("ip", peerInfo.Addr.String())) - } - if logFlag&LogFlagDisableArgs == 0 { - // TODO: it will panic if someone remove String method from protobuf message struct that auto generate from protoc. - logFields = append(logFields, log.KVString("args", req.(fmt.Stringer).String())) - } - if err != nil { - logFields = append(logFields, log.KVString("error", err.Error()), log.KVString("stack", fmt.Sprintf("%+v", err))) - } - logFn(code, duration)(ctx, logFields...) - return err - } -} - -// serverLogging warden grpc logging -func serverLogging(logFlag int8) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - startTime := time.Now() - caller := metadata.String(ctx, metadata.Caller) - if caller == "" { - caller = "no_user" - } - var remoteIP string - if peerInfo, ok := peer.FromContext(ctx); ok { - remoteIP = peerInfo.Addr.String() - } - var quota float64 - if deadline, ok := ctx.Deadline(); ok { - quota = time.Until(deadline).Seconds() - } - - // call server handler - resp, err := handler(ctx, req) - - // after server response - code := ecode.Cause(err).Code() - duration := time.Since(startTime) - // monitor - _metricServerReqDur.Observe(int64(duration/time.Millisecond), info.FullMethod, caller) - _metricServerReqCodeTotal.Inc(info.FullMethod, caller, strconv.Itoa(code)) - - if logFlag&LogFlagDisable != 0 { - return resp, err - } - // TODO: find better way to deal with slow log. - if logFlag&LogFlagDisableInfo != 0 && err == nil && duration < 500*time.Millisecond { - return resp, err - } - logFields := []log.D{ - log.KVString("user", caller), - log.KVString("ip", remoteIP), - log.KVString("path", info.FullMethod), - log.KVInt("ret", code), - log.KVFloat64("ts", duration.Seconds()), - log.KVFloat64("timeout_quota", quota), - log.KVString("source", "grpc-access-log"), - } - if logFlag&LogFlagDisableArgs == 0 { - // TODO: it will panic if someone remove String method from protobuf message struct that auto generate from protoc. - logFields = append(logFields, log.KVString("args", req.(fmt.Stringer).String())) - } - if err != nil { - logFields = append(logFields, log.KVString("error", err.Error()), log.KVString("stack", fmt.Sprintf("%+v", err))) - } - logFn(code, duration)(ctx, logFields...) - return resp, err - } -} diff --git a/pkg/net/rpc/warden/logging_test.go b/pkg/net/rpc/warden/logging_test.go deleted file mode 100644 index 74ba602df..000000000 --- a/pkg/net/rpc/warden/logging_test.go +++ /dev/null @@ -1,294 +0,0 @@ -package warden - -import ( - "bytes" - "context" - "errors" - "io/ioutil" - "os" - "reflect" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" - - "github.com/go-kratos/kratos/pkg/log" -) - -func Test_logFn(t *testing.T) { - type args struct { - code int - dt time.Duration - } - tests := []struct { - name string - args args - want func(context.Context, ...log.D) - }{ - { - name: "ok", - args: args{code: 0, dt: time.Millisecond}, - want: log.Infov, - }, - { - name: "slowlog", - args: args{code: 0, dt: time.Second}, - want: log.Warnv, - }, - { - name: "business error", - args: args{code: 2233, dt: time.Millisecond}, - want: log.Warnv, - }, - { - name: "system error", - args: args{code: -1, dt: 0}, - want: log.Errorv, - }, - { - name: "system error and slowlog", - args: args{code: -1, dt: time.Second}, - want: log.Errorv, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := logFn(tt.args.code, tt.args.dt); reflect.ValueOf(got).Pointer() != reflect.ValueOf(tt.want).Pointer() { - t.Errorf("unexpect log function!") - } - }) - } -} - -func callInterceptor(err error, interceptor grpc.UnaryClientInterceptor, opts ...grpc.CallOption) { - interceptor(context.Background(), - "test-method", - bytes.NewBufferString("test-req"), - "test_reply", - &grpc.ClientConn{}, - func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error { - return err - }, opts...) -} - -func TestClientLog(t *testing.T) { - stderr, err := ioutil.TempFile(os.TempDir(), "stderr") - if err != nil { - t.Fatal(err) - } - old := os.Stderr - os.Stderr = stderr - t.Logf("capture stderr file: %s", stderr.Name()) - - t.Run("test no option", func(t *testing.T) { - callInterceptor(nil, clientLogging()) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.Contains(t, string(data), "test-req") - assert.Contains(t, string(data), "path") - assert.Contains(t, string(data), "ret") - assert.Contains(t, string(data), "ts") - assert.Contains(t, string(data), "grpc-access-log") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - - t.Run("test disable args", func(t *testing.T) { - callInterceptor(nil, clientLogging(WithDialLogFlag(LogFlagDisableArgs))) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.NotContains(t, string(data), "test-req") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - - t.Run("test disable args and disable info", func(t *testing.T) { - callInterceptor(nil, clientLogging(WithDialLogFlag(LogFlagDisableArgs|LogFlagDisableInfo))) - callInterceptor(errors.New("test-error"), clientLogging(WithDialLogFlag(LogFlagDisableArgs|LogFlagDisableInfo))) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.Contains(t, string(data), "test-error") - assert.NotContains(t, string(data), "INFO") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - - t.Run("test call option", func(t *testing.T) { - callInterceptor(nil, clientLogging(), WithLogFlag(LogFlagDisableArgs)) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.NotContains(t, string(data), "test-req") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - - t.Run("test combine option", func(t *testing.T) { - interceptor := clientLogging(WithDialLogFlag(LogFlagDisableInfo)) - callInterceptor(nil, interceptor, WithLogFlag(LogFlagDisableArgs)) - callInterceptor(errors.New("test-error"), interceptor, WithLogFlag(LogFlagDisableArgs)) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.Contains(t, string(data), "test-error") - assert.NotContains(t, string(data), "INFO") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - t.Run("test no log", func(t *testing.T) { - callInterceptor(errors.New("test error"), clientLogging(WithDialLogFlag(LogFlagDisable))) - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Empty(t, data) - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - - t.Run("test multi flag", func(t *testing.T) { - interceptor := clientLogging(WithDialLogFlag(LogFlagDisableInfo | LogFlagDisableArgs)) - callInterceptor(nil, interceptor) - callInterceptor(errors.New("test-error"), interceptor) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.Contains(t, string(data), "test-error") - assert.NotContains(t, string(data), "INFO") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - os.Stderr = old -} - -func callServerInterceptor(err error, interceptor grpc.UnaryServerInterceptor) { - interceptor(context.Background(), - bytes.NewBufferString("test-req"), - &grpc.UnaryServerInfo{ - FullMethod: "test-method", - }, - func(ctx context.Context, req interface{}) (interface{}, error) { return nil, err }) -} - -func TestServerLog(t *testing.T) { - stderr, err := ioutil.TempFile(os.TempDir(), "stderr") - if err != nil { - t.Fatal(err) - } - old := os.Stderr - os.Stderr = stderr - t.Logf("capture stderr file: %s", stderr.Name()) - - t.Run("test no option", func(t *testing.T) { - callServerInterceptor(nil, serverLogging(0)) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.Contains(t, string(data), "test-req") - assert.Contains(t, string(data), "path") - assert.Contains(t, string(data), "ret") - assert.Contains(t, string(data), "ts") - assert.Contains(t, string(data), "grpc-access-log") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - - t.Run("test disable args", func(t *testing.T) { - callServerInterceptor(nil, serverLogging(LogFlagDisableArgs)) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.NotContains(t, string(data), "test-req") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - - t.Run("test no log", func(t *testing.T) { - callServerInterceptor(errors.New("test error"), serverLogging(LogFlagDisable)) - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Empty(t, data) - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - - t.Run("test multi flag", func(t *testing.T) { - interceptor := serverLogging(LogFlagDisableInfo | LogFlagDisableArgs) - callServerInterceptor(nil, interceptor) - callServerInterceptor(errors.New("test-error"), interceptor) - - stderr.Seek(0, os.SEEK_SET) - - data, err := ioutil.ReadAll(stderr) - if err != nil { - t.Error(err) - } - assert.Contains(t, string(data), "test-method") - assert.Contains(t, string(data), "test-error") - assert.NotContains(t, string(data), "INFO") - - stderr.Seek(0, os.SEEK_SET) - stderr.Truncate(0) - }) - os.Stderr = old -} diff --git a/pkg/net/rpc/warden/metrics.go b/pkg/net/rpc/warden/metrics.go deleted file mode 100644 index 3fd09bb3e..000000000 --- a/pkg/net/rpc/warden/metrics.go +++ /dev/null @@ -1,41 +0,0 @@ -package warden - -import "github.com/go-kratos/kratos/pkg/stat/metric" - -const ( - clientNamespace = "grpc_client" - serverNamespace = "grpc_server" -) - -var ( - _metricServerReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: serverNamespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "grpc server requests duration(ms).", - Labels: []string{"method", "caller"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000}, - }) - _metricServerReqCodeTotal = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: serverNamespace, - Subsystem: "requests", - Name: "code_total", - Help: "grpc server requests code count.", - Labels: []string{"method", "caller", "code"}, - }) - _metricClientReqDur = metric.NewHistogramVec(&metric.HistogramVecOpts{ - Namespace: clientNamespace, - Subsystem: "requests", - Name: "duration_ms", - Help: "grpc client requests duration(ms).", - Labels: []string{"method"}, - Buckets: []float64{5, 10, 25, 50, 100, 250, 500, 1000}, - }) - _metricClientReqCodeTotal = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: clientNamespace, - Subsystem: "requests", - Name: "code_total", - Help: "grpc client requests code count.", - Labels: []string{"method", "code"}, - }) -) diff --git a/pkg/net/rpc/warden/ratelimiter/ratelimiter.go b/pkg/net/rpc/warden/ratelimiter/ratelimiter.go deleted file mode 100644 index 4696b5d5f..000000000 --- a/pkg/net/rpc/warden/ratelimiter/ratelimiter.go +++ /dev/null @@ -1,65 +0,0 @@ -package ratelimiter - -import ( - "context" - "sync/atomic" - "time" - - "google.golang.org/grpc" - - "github.com/go-kratos/kratos/pkg/log" - limit "github.com/go-kratos/kratos/pkg/ratelimit" - "github.com/go-kratos/kratos/pkg/ratelimit/bbr" - "github.com/go-kratos/kratos/pkg/stat/metric" -) - -var ( - _metricServerBBR = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: "grpc_server", - Subsystem: "", - Name: "bbr_total", - Help: "grpc server bbr total.", - Labels: []string{"url"}, - }) -) - -// RateLimiter bbr middleware. -type RateLimiter struct { - group *bbr.Group - logTime int64 -} - -// New return a ratelimit middleware. -func New(conf *bbr.Config) (s *RateLimiter) { - return &RateLimiter{ - group: bbr.NewGroup(conf), - logTime: time.Now().UnixNano(), - } -} - -func (b *RateLimiter) printStats(fullMethod string, limiter limit.Limiter) { - now := time.Now().UnixNano() - if now-atomic.LoadInt64(&b.logTime) > int64(time.Second*3) { - atomic.StoreInt64(&b.logTime, now) - log.Info("grpc.bbr path:%s stat:%+v", fullMethod, limiter.(*bbr.BBR).Stat()) - } -} - -// Limit is a server interceptor that detects and rejects overloaded traffic. -func (b *RateLimiter) Limit() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - uri := args.FullMethod - limiter := b.group.Get(uri) - done, err := limiter.Allow(ctx) - if err != nil { - _metricServerBBR.Inc(uri) - return - } - defer func() { - done(limit.DoneInfo{Op: limit.Success}) - b.printStats(uri, limiter) - }() - resp, err = handler(ctx, req) - return - } -} diff --git a/pkg/net/rpc/warden/recovery.go b/pkg/net/rpc/warden/recovery.go deleted file mode 100644 index 7fa33144c..000000000 --- a/pkg/net/rpc/warden/recovery.go +++ /dev/null @@ -1,61 +0,0 @@ -package warden - -import ( - "context" - "fmt" - "os" - "runtime" - - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// recovery is a server interceptor that recovers from any panics. -func (s *Server) recovery() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - defer func() { - if rerr := recover(); rerr != nil { - const size = 64 << 10 - buf := make([]byte, size) - rs := runtime.Stack(buf, false) - if rs > size { - rs = size - } - buf = buf[:rs] - pl := fmt.Sprintf("grpc server panic: %v\n%v\n%s\n", req, rerr, buf) - fmt.Fprint(os.Stderr, pl) - log.Error(pl) - err = status.Errorf(codes.Unknown, ecode.ServerErr.Error()) - } - }() - resp, err = handler(ctx, req) - return - } -} - -// recovery return a client interceptor that recovers from any panics. -func (c *Client) recovery() grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) (err error) { - defer func() { - if rerr := recover(); rerr != nil { - const size = 64 << 10 - buf := make([]byte, size) - rs := runtime.Stack(buf, false) - if rs > size { - rs = size - } - buf = buf[:rs] - pl := fmt.Sprintf("grpc client panic: %v\n%v\n%v\n%s\n", req, reply, rerr, buf) - fmt.Fprintf(os.Stderr, pl) - log.Error(pl) - err = ecode.ServerErr - } - }() - err = invoker(ctx, method, req, reply, cc, opts...) - return - } -} diff --git a/pkg/net/rpc/warden/resolver/README.md b/pkg/net/rpc/warden/resolver/README.md deleted file mode 100644 index 456e74090..000000000 --- a/pkg/net/rpc/warden/resolver/README.md +++ /dev/null @@ -1,5 +0,0 @@ -#### warden/resolver - -##### 项目简介 - -warden 的 服务发现模块,用于从底层的注册中心中获取Server节点列表并返回给GRPC diff --git a/pkg/net/rpc/warden/resolver/direct/README.md b/pkg/net/rpc/warden/resolver/direct/README.md deleted file mode 100644 index 28f5d0ace..000000000 --- a/pkg/net/rpc/warden/resolver/direct/README.md +++ /dev/null @@ -1,6 +0,0 @@ -#### warden/resolver/direct - -##### 项目简介 - -warden 的直连服务模块,用于通过IP地址列表直接连接后端服务 -连接字符串格式: direct://default/192.168.1.1:8080,192.168.1.2:8081 diff --git a/pkg/net/rpc/warden/resolver/direct/direct.go b/pkg/net/rpc/warden/resolver/direct/direct.go deleted file mode 100644 index 6490b93c5..000000000 --- a/pkg/net/rpc/warden/resolver/direct/direct.go +++ /dev/null @@ -1,78 +0,0 @@ -package direct - -import ( - "context" - "fmt" - "strings" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/naming" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/resolver" -) - -const ( - // Name is the name of direct resolver - Name = "direct" -) - -var _ naming.Resolver = &Direct{} - -// New return Direct -func New() *Direct { - return &Direct{} -} - -// Build build direct. -func Build(id string) *Direct { - return &Direct{id: id} -} - -// Direct is a resolver for conneting endpoints directly. -// example format: direct://default/192.168.1.1:8080,192.168.1.2:8081 -type Direct struct { - id string -} - -// Build direct build. -func (d *Direct) Build(id string, opt ...naming.BuildOpt) naming.Resolver { - return &Direct{id: id} -} - -// Scheme return the Scheme of Direct -func (d *Direct) Scheme() string { - return Name -} - -// Watch a tree. -func (d *Direct) Watch() <-chan struct{} { - ch := make(chan struct{}, 1) - ch <- struct{}{} - return ch -} - -// Unwatch a tree. -func (d *Direct) Unwatch(id string) { -} - -//Fetch fetch instances. -func (d *Direct) Fetch(ctx context.Context) (res *naming.InstancesInfo, found bool) { - var ins []*naming.Instance - addrs := strings.Split(d.id, ",") - for _, addr := range addrs { - ins = append(ins, &naming.Instance{ - Addrs: []string{fmt.Sprintf("%s://%s", resolver.Scheme, addr)}, - }) - } - if len(ins) > 0 { - found = true - } - res = &naming.InstancesInfo{ - Instances: map[string][]*naming.Instance{env.Zone: ins}, - } - return -} - -//Close close Direct -func (d *Direct) Close() error { - return nil -} diff --git a/pkg/net/rpc/warden/resolver/direct/test/direct_test.go b/pkg/net/rpc/warden/resolver/direct/test/direct_test.go deleted file mode 100644 index 690f3415a..000000000 --- a/pkg/net/rpc/warden/resolver/direct/test/direct_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package direct - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - pb "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/proto/testproto" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/resolver" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/resolver/direct" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -type testServer struct { - name string -} - -func (ts *testServer) SayHello(context.Context, *pb.HelloRequest) (*pb.HelloReply, error) { - return &pb.HelloReply{Message: ts.name, Success: true}, nil -} - -func (ts *testServer) StreamHello(ss pb.Greeter_StreamHelloServer) error { - panic("not implement error") -} - -func createServer(name, listen string) *warden.Server { - s := warden.NewServer(&warden.ServerConfig{Timeout: xtime.Duration(time.Second)}) - ts := &testServer{name} - pb.RegisterGreeterServer(s.Server(), ts) - go func() { - if err := s.Run(listen); err != nil { - panic(fmt.Sprintf("run warden server fail! err: %s", err)) - } - }() - return s -} - -func TestMain(m *testing.M) { - resolver.Register(direct.New()) - ctx := context.TODO() - s1 := createServer("server1", "127.0.0.1:18001") - s2 := createServer("server2", "127.0.0.1:18002") - defer s1.Shutdown(ctx) - defer s2.Shutdown(ctx) - os.Exit(m.Run()) -} - -func createTestClient(t *testing.T, connStr string) pb.GreeterClient { - client := warden.NewClient(&warden.ClientConfig{ - Dial: xtime.Duration(time.Second * 10), - Timeout: xtime.Duration(time.Second * 10), - Breaker: &breaker.Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 20, - K: 1.5, - }, - }) - conn, err := client.Dial(context.TODO(), connStr) - if err != nil { - t.Fatalf("create client fail!err%s", err) - } - return pb.NewGreeterClient(conn) -} - -func TestDirect(t *testing.T) { - cli := createTestClient(t, "direct://default/127.0.0.1:18003,127.0.0.1:18002") - count := 0 - for i := 0; i < 10; i++ { - if resp, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("TestDirect: SayHello failed!err:=%v", err) - } else { - if resp.Message == "server2" { - count++ - } - } - } - if count != 10 { - t.Fatalf("TestDirect: get server2 times must be 10") - } -} diff --git a/pkg/net/rpc/warden/resolver/resolver.go b/pkg/net/rpc/warden/resolver/resolver.go deleted file mode 100644 index e72d6352a..000000000 --- a/pkg/net/rpc/warden/resolver/resolver.go +++ /dev/null @@ -1,162 +0,0 @@ -package resolver - -import ( - "context" - "net/url" - "strconv" - "strings" - "sync" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/naming" - wmeta "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/metadata" - - "github.com/pkg/errors" - "google.golang.org/grpc/resolver" -) - -const ( - // Scheme is the scheme of discovery address - Scheme = "grpc" -) - -var ( - _ resolver.Resolver = &Resolver{} - _ resolver.Builder = &Builder{} - mu sync.Mutex -) - -// Register register resolver builder if nil. -func Register(b naming.Builder) { - mu.Lock() - defer mu.Unlock() - if resolver.Get(b.Scheme()) == nil { - resolver.Register(&Builder{b}) - } -} - -// Set override any registered builder -func Set(b naming.Builder) { - mu.Lock() - defer mu.Unlock() - resolver.Register(&Builder{b}) -} - -// Builder is also a resolver builder. -// It's build() function always returns itself. -type Builder struct { - naming.Builder -} - -// Build returns itself for Resolver, because it's both a builder and a resolver. -func (b *Builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - var zone = env.Zone - ss := int64(50) - clusters := map[string]struct{}{} - str := strings.SplitN(target.Endpoint, "?", 2) - if len(str) == 0 { - return nil, errors.Errorf("warden resolver: parse target.Endpoint(%s) failed!err:=endpoint is empty", target.Endpoint) - } else if len(str) == 2 { - m, err := url.ParseQuery(str[1]) - if err == nil { - for _, c := range m[naming.MetaCluster] { - clusters[c] = struct{}{} - } - zones := m[naming.MetaZone] - if len(zones) > 0 { - zone = zones[0] - } - if sub, ok := m["subset"]; ok { - if t, err := strconv.ParseInt(sub[0], 10, 64); err == nil { - ss = t - } - } - } - } - r := &Resolver{ - nr: b.Builder.Build(str[0], naming.Filter(Scheme, clusters), naming.ScheduleNode(zone), naming.Subset(int(ss))), - cc: cc, - quit: make(chan struct{}, 1), - zone: zone, - } - go r.updateproc() - return r, nil -} - -// Resolver watches for the updates on the specified target. -// Updates include address updates and service config updates. -type Resolver struct { - nr naming.Resolver - cc resolver.ClientConn - quit chan struct{} - - clusters map[string]struct{} - zone string - subsetSize int64 -} - -// Close is a noop for Resolver. -func (r *Resolver) Close() { - select { - case r.quit <- struct{}{}: - r.nr.Close() - default: - } -} - -// ResolveNow is a noop for Resolver. -func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { -} - -func (r *Resolver) updateproc() { - event := r.nr.Watch() - for { - select { - case <-r.quit: - return - case _, ok := <-event: - if !ok { - return - } - } - if ins, ok := r.nr.Fetch(context.Background()); ok { - instances, _ := ins.Instances[r.zone] - if len(instances) == 0 { - for _, value := range ins.Instances { - instances = append(instances, value...) - } - } - r.newAddress(instances) - } - } -} - -func (r *Resolver) newAddress(instances []*naming.Instance) { - if len(instances) <= 0 { - return - } - addrs := make([]resolver.Address, 0, len(instances)) - for _, ins := range instances { - var weight int64 - if weight, _ = strconv.ParseInt(ins.Metadata[naming.MetaWeight], 10, 64); weight <= 0 { - weight = 10 - } - var rpc string - for _, a := range ins.Addrs { - u, err := url.Parse(a) - if err == nil && u.Scheme == Scheme { - rpc = u.Host - } - } - addr := resolver.Address{ - Addr: rpc, - Type: resolver.Backend, - ServerName: ins.AppID, - Metadata: wmeta.MD{Weight: uint64(weight), Color: ins.Metadata[naming.MetaColor]}, - } - addrs = append(addrs, addr) - } - log.Info("resolver: finally get %d instances", len(addrs)) - r.cc.NewAddress(addrs) -} diff --git a/pkg/net/rpc/warden/resolver/test/mockdiscovery.go b/pkg/net/rpc/warden/resolver/test/mockdiscovery.go deleted file mode 100644 index 586913049..000000000 --- a/pkg/net/rpc/warden/resolver/test/mockdiscovery.go +++ /dev/null @@ -1,87 +0,0 @@ -package resolver - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/naming" -) - -type mockDiscoveryBuilder struct { - instances map[string]*naming.Instance - watchch map[string][]*mockDiscoveryResolver -} - -func (mb *mockDiscoveryBuilder) Build(id string, opts ...naming.BuildOpt) naming.Resolver { - mr := &mockDiscoveryResolver{ - d: mb, - watchch: make(chan struct{}, 1), - } - mb.watchch[id] = append(mb.watchch[id], mr) - mr.watchch <- struct{}{} - return mr -} -func (mb *mockDiscoveryBuilder) Scheme() string { - return "mockdiscovery" -} - -type mockDiscoveryResolver struct { - d *mockDiscoveryBuilder - watchch chan struct{} -} - -var _ naming.Resolver = &mockDiscoveryResolver{} - -func (md *mockDiscoveryResolver) Fetch(ctx context.Context) (*naming.InstancesInfo, bool) { - zones := make(map[string][]*naming.Instance) - for _, v := range md.d.instances { - zones[v.Zone] = append(zones[v.Zone], v) - } - return &naming.InstancesInfo{Instances: zones}, len(zones) > 0 -} - -func (md *mockDiscoveryResolver) Watch() <-chan struct{} { - return md.watchch -} - -func (md *mockDiscoveryResolver) Close() error { - close(md.watchch) - return nil -} - -func (md *mockDiscoveryResolver) Scheme() string { - return "mockdiscovery" -} - -func (mb *mockDiscoveryBuilder) registry(appID string, hostname, rpc string, metadata map[string]string) { - ins := &naming.Instance{ - AppID: appID, - Env: "hello=world", - Hostname: hostname, - Addrs: []string{"grpc://" + rpc}, - Version: "1.1", - Zone: env.Zone, - Metadata: metadata, - } - mb.instances[hostname] = ins - if ch, ok := mb.watchch[appID]; ok { - var bullet struct{} - for _, c := range ch { - c.watchch <- bullet - } - } -} - -func (mb *mockDiscoveryBuilder) cancel(hostname string) { - ins, ok := mb.instances[hostname] - if !ok { - return - } - delete(mb.instances, hostname) - if ch, ok := mb.watchch[ins.AppID]; ok { - var bullet struct{} - for _, c := range ch { - c.watchch <- bullet - } - } -} diff --git a/pkg/net/rpc/warden/resolver/test/resovler_test.go b/pkg/net/rpc/warden/resolver/test/resovler_test.go deleted file mode 100644 index 9d763a653..000000000 --- a/pkg/net/rpc/warden/resolver/test/resovler_test.go +++ /dev/null @@ -1,314 +0,0 @@ -package resolver - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/naming" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - pb "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/proto/testproto" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/resolver" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/stretchr/testify/assert" -) - -var testServerMap map[string]*testServer - -func init() { - testServerMap = make(map[string]*testServer) -} - -const testAppID = "main.test" - -type testServer struct { - SayHelloCount int -} - -func resetCount() { - for _, s := range testServerMap { - s.SayHelloCount = 0 - } -} - -func (ts *testServer) SayHello(context.Context, *pb.HelloRequest) (*pb.HelloReply, error) { - ts.SayHelloCount++ - return &pb.HelloReply{Message: "hello", Success: true}, nil -} - -func (ts *testServer) StreamHello(ss pb.Greeter_StreamHelloServer) error { - panic("not implement error") -} - -func createServer(name, listen string) *warden.Server { - s := warden.NewServer(&warden.ServerConfig{Timeout: xtime.Duration(time.Second)}) - ts := &testServer{} - testServerMap[name] = ts - pb.RegisterGreeterServer(s.Server(), ts) - go func() { - if err := s.Run(listen); err != nil { - panic(fmt.Sprintf("run warden server fail! err: %s", err)) - } - }() - return s -} - -func NSayHello(c pb.GreeterClient, n int) func(*testing.T) { - return func(t *testing.T) { - for i := 0; i < n; i++ { - if _, err := c.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - } - } -} - -func createTestClient(t *testing.T) pb.GreeterClient { - client := warden.NewClient(&warden.ClientConfig{ - Dial: xtime.Duration(time.Second * 10), - Timeout: xtime.Duration(time.Second * 10), - Breaker: &breaker.Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 20, - K: 1.5, - }, - }) - conn, err := client.Dial(context.TODO(), "mockdiscovery://authority/main.test") - if err != nil { - t.Fatalf("create client fail!err%s", err) - } - return pb.NewGreeterClient(conn) -} - -var mockResolver *mockDiscoveryBuilder - -func newMockDiscoveryBuilder() *mockDiscoveryBuilder { - return &mockDiscoveryBuilder{ - instances: make(map[string]*naming.Instance), - watchch: make(map[string][]*mockDiscoveryResolver), - } -} -func TestMain(m *testing.M) { - ctx := context.TODO() - mockResolver = newMockDiscoveryBuilder() - resolver.Set(mockResolver) - s1 := createServer("server1", "127.0.0.1:18081") - s2 := createServer("server2", "127.0.0.1:18082") - s3 := createServer("server3", "127.0.0.1:18083") - s4 := createServer("server4", "127.0.0.1:18084") - s5 := createServer("server5", "127.0.0.1:18085") - defer s1.Shutdown(ctx) - defer s2.Shutdown(ctx) - defer s3.Shutdown(ctx) - defer s4.Shutdown(ctx) - defer s5.Shutdown(ctx) - os.Exit(m.Run()) -} - -func TestAddResolver(t *testing.T) { - mockResolver.registry(testAppID, "server1", "127.0.0.1:18081", map[string]string{}) - c := createTestClient(t) - t.Run("test_say_hello", NSayHello(c, 10)) - assert.Equal(t, 10, testServerMap["server1"].SayHelloCount) - resetCount() -} - -func TestDeleteResolver(t *testing.T) { - mockResolver.registry(testAppID, "server1", "127.0.0.1:18081", map[string]string{}) - mockResolver.registry(testAppID, "server2", "127.0.0.1:18082", map[string]string{}) - c := createTestClient(t) - t.Run("test_say_hello", NSayHello(c, 10)) - assert.Equal(t, 10, testServerMap["server1"].SayHelloCount+testServerMap["server2"].SayHelloCount) - - mockResolver.cancel("server1") - resetCount() - time.Sleep(time.Millisecond * 10) - t.Run("test_say_hello", NSayHello(c, 10)) - assert.Equal(t, 0, testServerMap["server1"].SayHelloCount) - - resetCount() -} - -func TestUpdateResolver(t *testing.T) { - mockResolver.registry(testAppID, "server1", "127.0.0.1:18081", map[string]string{}) - mockResolver.registry(testAppID, "server2", "127.0.0.1:18082", map[string]string{}) - - c := createTestClient(t) - t.Run("test_say_hello", NSayHello(c, 10)) - assert.Equal(t, 10, testServerMap["server1"].SayHelloCount+testServerMap["server2"].SayHelloCount) - - mockResolver.registry(testAppID, "server1", "127.0.0.1:18083", map[string]string{}) - mockResolver.registry(testAppID, "server2", "127.0.0.1:18084", map[string]string{}) - resetCount() - time.Sleep(time.Millisecond * 10) - t.Run("test_say_hello", NSayHello(c, 10)) - assert.Equal(t, 0, testServerMap["server1"].SayHelloCount+testServerMap["server2"].SayHelloCount) - assert.Equal(t, 10, testServerMap["server3"].SayHelloCount+testServerMap["server4"].SayHelloCount) - - resetCount() -} - -func TestErrorResolver(t *testing.T) { - mockResolver := newMockDiscoveryBuilder() - resolver.Set(mockResolver) - mockResolver.registry(testAppID, "server1", "127.0.0.1:18081", map[string]string{}) - mockResolver.registry(testAppID, "server6", "127.0.0.1:18086", map[string]string{}) - - c := createTestClient(t) - t.Run("test_say_hello", NSayHello(c, 10)) - assert.Equal(t, 10, testServerMap["server1"].SayHelloCount) - - resetCount() -} - -// FIXME -func testClusterResolver(t *testing.T) { - mockResolver := newMockDiscoveryBuilder() - resolver.Set(mockResolver) - mockResolver.registry(testAppID, "server1", "127.0.0.1:18081", map[string]string{"cluster": "c1"}) - mockResolver.registry(testAppID, "server2", "127.0.0.1:18082", map[string]string{"cluster": "c1"}) - mockResolver.registry(testAppID, "server3", "127.0.0.1:18083", map[string]string{"cluster": "c2"}) - mockResolver.registry(testAppID, "server4", "127.0.0.1:18084", map[string]string{}) - mockResolver.registry(testAppID, "server5", "127.0.0.1:18084", map[string]string{}) - - client := warden.NewClient(&warden.ClientConfig{Clusters: []string{"c1"}}) - conn, err := client.Dial(context.TODO(), "mockdiscovery://authority/main.test?cluster=c2") - if err != nil { - t.Fatalf("create client fail!err%s", err) - } - time.Sleep(time.Millisecond * 10) - cli := pb.NewGreeterClient(conn) - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - assert.Equal(t, 1, testServerMap["server1"].SayHelloCount) - assert.Equal(t, 1, testServerMap["server2"].SayHelloCount) - assert.Equal(t, 1, testServerMap["server3"].SayHelloCount) - - resetCount() -} - -// FIXME -func testNoClusterResolver(t *testing.T) { - mockResolver := newMockDiscoveryBuilder() - resolver.Set(mockResolver) - mockResolver.registry(testAppID, "server1", "127.0.0.1:18081", map[string]string{"cluster": "c1"}) - mockResolver.registry(testAppID, "server2", "127.0.0.1:18082", map[string]string{"cluster": "c1"}) - mockResolver.registry(testAppID, "server3", "127.0.0.1:18083", map[string]string{"cluster": "c2"}) - mockResolver.registry(testAppID, "server4", "127.0.0.1:18084", map[string]string{}) - client := warden.NewClient(&warden.ClientConfig{}) - conn, err := client.Dial(context.TODO(), "mockdiscovery://authority/main.test") - if err != nil { - t.Fatalf("create client fail!err%s", err) - } - time.Sleep(time.Millisecond * 20) - cli := pb.NewGreeterClient(conn) - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - assert.Equal(t, 1, testServerMap["server1"].SayHelloCount) - assert.Equal(t, 1, testServerMap["server2"].SayHelloCount) - assert.Equal(t, 1, testServerMap["server3"].SayHelloCount) - assert.Equal(t, 1, testServerMap["server4"].SayHelloCount) - - resetCount() -} - -func TestZoneResolver(t *testing.T) { - mockResolver := newMockDiscoveryBuilder() - resolver.Set(mockResolver) - mockResolver.registry(testAppID, "server1", "127.0.0.1:18081", map[string]string{}) - env.Zone = "testsh" - mockResolver.registry(testAppID, "server2", "127.0.0.1:18082", map[string]string{}) - env.Zone = "hhhh" - client := warden.NewClient(&warden.ClientConfig{Zone: "testsh"}) - conn, err := client.Dial(context.TODO(), "mockdiscovery://authority/main.test") - if err != nil { - t.Fatalf("create client fail!err%s", err) - } - time.Sleep(time.Millisecond * 10) - cli := pb.NewGreeterClient(conn) - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - assert.Equal(t, 0, testServerMap["server1"].SayHelloCount) - assert.Equal(t, 3, testServerMap["server2"].SayHelloCount) - - resetCount() -} - -// FIXME -func testSubsetConn(t *testing.T) { - mockResolver := newMockDiscoveryBuilder() - resolver.Set(mockResolver) - mockResolver.registry(testAppID, "server1", "127.0.0.1:18081", map[string]string{}) - mockResolver.registry(testAppID, "server2", "127.0.0.1:18082", map[string]string{}) - mockResolver.registry(testAppID, "server3", "127.0.0.1:18083", map[string]string{}) - mockResolver.registry(testAppID, "server4", "127.0.0.1:18084", map[string]string{}) - mockResolver.registry(testAppID, "server5", "127.0.0.1:18085", map[string]string{}) - - client := warden.NewClient(nil) - conn, err := client.Dial(context.TODO(), "mockdiscovery://authority/main.test?subset=3") - if err != nil { - t.Fatalf("create client fail!err%s", err) - } - time.Sleep(time.Millisecond * 20) - cli := pb.NewGreeterClient(conn) - for i := 0; i < 6; i++ { - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - } - assert.Equal(t, 2, testServerMap["server2"].SayHelloCount) - assert.Equal(t, 2, testServerMap["server5"].SayHelloCount) - assert.Equal(t, 2, testServerMap["server4"].SayHelloCount) - resetCount() - mockResolver.cancel("server4") - time.Sleep(time.Millisecond * 20) - for i := 0; i < 6; i++ { - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - } - assert.Equal(t, 2, testServerMap["server5"].SayHelloCount) - assert.Equal(t, 2, testServerMap["server2"].SayHelloCount) - assert.Equal(t, 2, testServerMap["server3"].SayHelloCount) - resetCount() - mockResolver.registry(testAppID, "server4", "127.0.0.1:18084", map[string]string{}) - time.Sleep(time.Millisecond * 20) - for i := 0; i < 6; i++ { - if _, err := cli.SayHello(context.TODO(), &pb.HelloRequest{Age: 1, Name: "hello"}); err != nil { - t.Fatalf("call sayhello fail! err: %s", err) - } - } - assert.Equal(t, 2, testServerMap["server2"].SayHelloCount) - assert.Equal(t, 2, testServerMap["server5"].SayHelloCount) - assert.Equal(t, 2, testServerMap["server4"].SayHelloCount) -} diff --git a/pkg/net/rpc/warden/resolver/util.go b/pkg/net/rpc/warden/resolver/util.go deleted file mode 100644 index 73df667c3..000000000 --- a/pkg/net/rpc/warden/resolver/util.go +++ /dev/null @@ -1,16 +0,0 @@ -package resolver - -import ( - "flag" - "fmt" -) - -// RegisterTarget will register grpc discovery mock address flag -func RegisterTarget(target *string, discoveryID string) { - flag.CommandLine.StringVar( - target, - fmt.Sprintf("grpc.%s", discoveryID), - fmt.Sprintf("discovery://default/%s", discoveryID), - fmt.Sprintf("App's grpc target.\n example: -grpc.%s=\"127.0.0.1:9090\"", discoveryID), - ) -} diff --git a/pkg/net/rpc/warden/server.go b/pkg/net/rpc/warden/server.go deleted file mode 100644 index 3d59beb7d..000000000 --- a/pkg/net/rpc/warden/server.go +++ /dev/null @@ -1,362 +0,0 @@ -package warden - -import ( - "context" - "flag" - "fmt" - "math" - "net" - "os" - "sync" - "time" - - "github.com/go-kratos/kratos/pkg/conf/dsn" - "github.com/go-kratos/kratos/pkg/log" - nmd "github.com/go-kratos/kratos/pkg/net/metadata" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/ratelimiter" - "github.com/go-kratos/kratos/pkg/net/trace" - xtime "github.com/go-kratos/kratos/pkg/time" - - //this package is for json format response - _ "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/encoding/json" - "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/status" - - "github.com/pkg/errors" - "google.golang.org/grpc" - _ "google.golang.org/grpc/encoding/gzip" // NOTE: use grpc gzip by header grpc-accept-encoding - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/reflection" -) - -var ( - _grpcDSN string - _defaultSerConf = &ServerConfig{ - Network: "tcp", - Addr: "0.0.0.0:9000", - Timeout: xtime.Duration(time.Second), - IdleTimeout: xtime.Duration(time.Second * 180), - MaxLifeTime: xtime.Duration(time.Hour * 2), - ForceCloseWait: xtime.Duration(time.Second * 20), - KeepAliveInterval: xtime.Duration(time.Second * 60), - KeepAliveTimeout: xtime.Duration(time.Second * 20), - } - _abortIndex int8 = math.MaxInt8 / 2 -) - -// ServerConfig is rpc server conf. -type ServerConfig struct { - // Network is grpc listen network,default value is tcp - Network string `dsn:"network"` - // Addr is grpc listen addr,default value is 0.0.0.0:9000 - Addr string `dsn:"address"` - // Timeout is context timeout for per rpc call. - Timeout xtime.Duration `dsn:"query.timeout"` - // IdleTimeout is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. - // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. - IdleTimeout xtime.Duration `dsn:"query.idleTimeout"` - // MaxLifeTime is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. - // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. - MaxLifeTime xtime.Duration `dsn:"query.maxLife"` - // ForceCloseWait is an additive period after MaxLifeTime after which the connection will be forcibly closed. - ForceCloseWait xtime.Duration `dsn:"query.closeWait"` - // KeepAliveInterval is after a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. - KeepAliveInterval xtime.Duration `dsn:"query.keepaliveInterval"` - // KeepAliveTimeout is After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that - // the connection is closed. - KeepAliveTimeout xtime.Duration `dsn:"query.keepaliveTimeout"` - // LogFlag to control log behaviour. e.g. LogFlag: warden.LogFlagDisableLog. - // Disable: 1 DisableArgs: 2 DisableInfo: 4 - LogFlag int8 `dsn:"query.logFlag"` -} - -// Server is the framework's server side instance, it contains the GrpcServer, interceptor and interceptors. -// Create an instance of Server, by using NewServer(). -type Server struct { - conf *ServerConfig - mutex sync.RWMutex - - server *grpc.Server - handlers []grpc.UnaryServerInterceptor -} - -// handle return a new unary server interceptor for OpenTracing\Logging\LinkTimeout. -func (s *Server) handle() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - var ( - cancel func() - addr string - ) - s.mutex.RLock() - conf := s.conf - s.mutex.RUnlock() - // get derived timeout from grpc context, - // compare with the warden configured, - // and use the minimum one - timeout := time.Duration(conf.Timeout) - if dl, ok := ctx.Deadline(); ok { - ctimeout := time.Until(dl) - if ctimeout-time.Millisecond*20 > 0 { - ctimeout = ctimeout - time.Millisecond*20 - } - if timeout > ctimeout { - timeout = ctimeout - } - } - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - - // get grpc metadata(trace & remote_ip & color) - var t trace.Trace - cmd := nmd.MD{} - if gmd, ok := metadata.FromIncomingContext(ctx); ok { - t, _ = trace.Extract(trace.GRPCFormat, gmd) - for key, vals := range gmd { - if nmd.IsIncomingKey(key) { - cmd[key] = vals[0] - } - } - } - if t == nil { - t = trace.New(args.FullMethod) - } else { - t.SetTitle(args.FullMethod) - } - - if pr, ok := peer.FromContext(ctx); ok { - addr = pr.Addr.String() - t.SetTag(trace.String(trace.TagAddress, addr)) - } - defer t.Finish(&err) - - // use common meta data context instead of grpc context - ctx = nmd.NewContext(ctx, cmd) - ctx = trace.NewContext(ctx, t) - - resp, err = handler(ctx, req) - return resp, status.FromError(err).Err() - } -} - -func init() { - addFlag(flag.CommandLine) -} - -func addFlag(fs *flag.FlagSet) { - v := os.Getenv("GRPC") - if v == "" { - v = "tcp://0.0.0.0:9000/?timeout=1s&idle_timeout=60s" - } - fs.StringVar(&_grpcDSN, "grpc", v, "listen grpc dsn, or use GRPC env variable.") - fs.Var(&_grpcTarget, "grpc.target", "usage: -grpc.target=seq.service=127.0.0.1:9000 -grpc.target=fav.service=192.168.10.1:9000") -} - -func parseDSN(rawdsn string) *ServerConfig { - conf := new(ServerConfig) - d, err := dsn.Parse(rawdsn) - if err != nil { - panic(errors.WithMessage(err, fmt.Sprintf("warden: invalid dsn: %s", rawdsn))) - } - if _, err = d.Bind(conf); err != nil { - panic(errors.WithMessage(err, fmt.Sprintf("warden: invalid dsn: %s", rawdsn))) - } - return conf -} - -// NewServer returns a new blank Server instance with a default server interceptor. -func NewServer(conf *ServerConfig, opt ...grpc.ServerOption) (s *Server) { - if conf == nil { - if !flag.Parsed() { - fmt.Fprint(os.Stderr, "[warden] please call flag.Parse() before Init warden server, some configure may not effect\n") - } - conf = parseDSN(_grpcDSN) - } else { - fmt.Fprintf(os.Stderr, "[warden] config is Deprecated, argument will be ignored. please use -grpc flag or GRPC env to configure warden server.\n") - } - s = new(Server) - if err := s.SetConfig(conf); err != nil { - panic(errors.Errorf("warden: set config failed!err: %s", err.Error())) - } - keepParam := grpc.KeepaliveParams(keepalive.ServerParameters{ - MaxConnectionIdle: time.Duration(s.conf.IdleTimeout), - MaxConnectionAgeGrace: time.Duration(s.conf.ForceCloseWait), - Time: time.Duration(s.conf.KeepAliveInterval), - Timeout: time.Duration(s.conf.KeepAliveTimeout), - MaxConnectionAge: time.Duration(s.conf.MaxLifeTime), - }) - opt = append(opt, keepParam, grpc.UnaryInterceptor(s.interceptor)) - s.server = grpc.NewServer(opt...) - s.Use(s.recovery(), s.handle(), serverLogging(conf.LogFlag), s.stats(), s.validate()) - s.Use(ratelimiter.New(nil).Limit()) - return -} - -// SetConfig hot reloads server config -func (s *Server) SetConfig(conf *ServerConfig) (err error) { - if conf == nil { - conf = _defaultSerConf - } - if conf.Timeout <= 0 { - conf.Timeout = xtime.Duration(time.Second) - } - if conf.IdleTimeout <= 0 { - conf.IdleTimeout = xtime.Duration(time.Second * 60) - } - if conf.MaxLifeTime <= 0 { - conf.MaxLifeTime = xtime.Duration(time.Hour * 2) - } - if conf.ForceCloseWait <= 0 { - conf.ForceCloseWait = xtime.Duration(time.Second * 20) - } - if conf.KeepAliveInterval <= 0 { - conf.KeepAliveInterval = xtime.Duration(time.Second * 60) - } - if conf.KeepAliveTimeout <= 0 { - conf.KeepAliveTimeout = xtime.Duration(time.Second * 20) - } - if conf.Addr == "" { - conf.Addr = "0.0.0.0:9000" - } - if conf.Network == "" { - conf.Network = "tcp" - } - s.mutex.Lock() - s.conf = conf - s.mutex.Unlock() - return nil -} - -// interceptor is a single interceptor out of a chain of many interceptors. -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three -// will see context changes of one and two. -func (s *Server) interceptor(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - var ( - i int - chain grpc.UnaryHandler - ) - - n := len(s.handlers) - if n == 0 { - return handler(ctx, req) - } - - chain = func(ic context.Context, ir interface{}) (interface{}, error) { - if i == n-1 { - return handler(ic, ir) - } - i++ - return s.handlers[i](ic, ir, args, chain) - } - - return s.handlers[0](ctx, req, args, chain) -} - -// Server return the grpc server for registering service. -func (s *Server) Server() *grpc.Server { - return s.server -} - -// Use attachs a global inteceptor to the server. -// For example, this is the right place for a rate limiter or error management inteceptor. -func (s *Server) Use(handlers ...grpc.UnaryServerInterceptor) *Server { - finalSize := len(s.handlers) + len(handlers) - if finalSize >= int(_abortIndex) { - panic("warden: server use too many handlers") - } - mergedHandlers := make([]grpc.UnaryServerInterceptor, finalSize) - copy(mergedHandlers, s.handlers) - copy(mergedHandlers[len(s.handlers):], handlers) - s.handlers = mergedHandlers - return s -} - -// Run create a tcp listener and start goroutine for serving each incoming request. -// Run will return a non-nil error unless Stop or GracefulStop is called. -func (s *Server) Run(addr string) error { - lis, err := net.Listen("tcp", addr) - if err != nil { - err = errors.WithStack(err) - log.Error("failed to listen: %v", err) - return err - } - reflection.Register(s.server) - return s.Serve(lis) -} - -// RunUnix create a unix listener and start goroutine for serving each incoming request. -// RunUnix will return a non-nil error unless Stop or GracefulStop is called. -func (s *Server) RunUnix(file string) error { - lis, err := net.Listen("unix", file) - if err != nil { - err = errors.WithStack(err) - log.Error("failed to listen: %v", err) - return err - } - reflection.Register(s.server) - return s.Serve(lis) -} - -// Start create a new goroutine run server with configured listen addr -// will panic if any error happened -// return server itself -func (s *Server) Start() (*Server, error) { - _, err := s.startWithAddr() - if err != nil { - return nil, err - } - return s, nil -} - -// StartWithAddr create a new goroutine run server with configured listen addr -// will panic if any error happened -// return server itself and the actually listened address (if configured listen -// port is zero, the os will allocate an unused port) -func (s *Server) StartWithAddr() (*Server, net.Addr, error) { - addr, err := s.startWithAddr() - if err != nil { - return nil, nil, err - } - return s, addr, nil -} - -func (s *Server) startWithAddr() (net.Addr, error) { - lis, err := net.Listen(s.conf.Network, s.conf.Addr) - if err != nil { - return nil, err - } - log.Info("warden: start grpc listen addr: %v", lis.Addr()) - reflection.Register(s.server) - go func() { - if err := s.Serve(lis); err != nil { - panic(err) - } - }() - return lis.Addr(), nil -} - -// Serve accepts incoming connections on the listener lis, creating a new -// ServerTransport and service goroutine for each. -// Serve will return a non-nil error unless Stop or GracefulStop is called. -func (s *Server) Serve(lis net.Listener) error { - return s.server.Serve(lis) -} - -// Shutdown stops the server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished or the context deadline is reached. -func (s *Server) Shutdown(ctx context.Context) (err error) { - ch := make(chan struct{}) - go func() { - s.server.GracefulStop() - close(ch) - }() - select { - case <-ctx.Done(): - s.server.Stop() - err = ctx.Err() - case <-ch: - } - return -} diff --git a/pkg/net/rpc/warden/server_test.go b/pkg/net/rpc/warden/server_test.go deleted file mode 100644 index 19f09fc9d..000000000 --- a/pkg/net/rpc/warden/server_test.go +++ /dev/null @@ -1,605 +0,0 @@ -package warden - -import ( - "context" - "fmt" - "io" - "math/rand" - "net" - "os" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - nmd "github.com/go-kratos/kratos/pkg/net/metadata" - "github.com/go-kratos/kratos/pkg/net/netutil/breaker" - pb "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/proto/testproto" - xtrace "github.com/go-kratos/kratos/pkg/net/trace" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const ( - _separator = "\001" - - _testAddr = "127.0.0.1:9090" -) - -var ( - outPut []string - _testOnce sync.Once - server *Server - - clientConfig = ClientConfig{ - Dial: xtime.Duration(time.Second * 10), - Timeout: xtime.Duration(time.Second * 10), - Breaker: &breaker.Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - K: 1.5, - }, - } - clientConfig2 = ClientConfig{ - Dial: xtime.Duration(time.Second * 10), - Timeout: xtime.Duration(time.Second * 10), - Breaker: &breaker.Config{ - Window: xtime.Duration(3 * time.Second), - Bucket: 10, - Request: 20, - K: 1.5, - }, - Method: map[string]*ClientConfig{`/testproto.Greeter/SayHello`: {Timeout: xtime.Duration(time.Millisecond * 200)}}, - } -) - -type helloServer struct { - t *testing.T -} - -func (s *helloServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { - if in.Name == "trace_test" { - t, isok := xtrace.FromContext(ctx) - if !isok { - t = xtrace.New("test title") - s.t.Fatalf("no trace extracted from server context") - } - newCtx := xtrace.NewContext(ctx, t) - if in.Age == 0 { - runClient(newCtx, &clientConfig, s.t, "trace_test", 1) - } - } else if in.Name == "recovery_test" { - panic("test recovery") - } else if in.Name == "graceful_shutdown" { - time.Sleep(time.Second * 3) - } else if in.Name == "timeout_test" { - if in.Age > 10 { - s.t.Fatalf("can not deliver requests over 10 times because of link timeout") - return &pb.HelloReply{Message: "Hello " + in.Name, Success: true}, nil - } - time.Sleep(time.Millisecond * 10) - _, err := runClient(ctx, &clientConfig, s.t, "timeout_test", in.Age+1) - return &pb.HelloReply{Message: "Hello " + in.Name, Success: true}, err - } else if in.Name == "timeout_test2" { - if in.Age > 10 { - s.t.Fatalf("can not deliver requests over 10 times because of link timeout") - return &pb.HelloReply{Message: "Hello " + in.Name, Success: true}, nil - } - time.Sleep(time.Millisecond * 10) - _, err := runClient(ctx, &clientConfig2, s.t, "timeout_test2", in.Age+1) - return &pb.HelloReply{Message: "Hello " + in.Name, Success: true}, err - } else if in.Name == "color_test" { - if in.Age == 0 { - resp, err := runClient(ctx, &clientConfig, s.t, "color_test", in.Age+1) - return resp, err - } - color := nmd.String(ctx, nmd.Color) - return &pb.HelloReply{Message: "Hello " + color, Success: true}, nil - } else if in.Name == "breaker_test" { - if rand.Intn(100) <= 50 { - return nil, status.Errorf(codes.ResourceExhausted, "test") - } - return &pb.HelloReply{Message: "Hello " + in.Name, Success: true}, nil - } else if in.Name == "error_detail" { - err, _ := ecode.Error(ecode.Code(123456), "test_error_detail").WithDetails(&pb.HelloReply{Success: true}) - return nil, err - } else if in.Name == "ecode_status" { - reply := &pb.HelloReply{Message: "status", Success: true} - st, _ := ecode.Error(ecode.RequestErr, "RequestErr").WithDetails(reply) - return nil, st - } else if in.Name == "general_error" { - return nil, fmt.Errorf("haha is error") - } else if in.Name == "ecode_code_error" { - return nil, ecode.Conflict - } else if in.Name == "pb_error_error" { - return nil, ecode.Error(ecode.Code(11122), "haha") - } else if in.Name == "ecode_status_error" { - return nil, ecode.Error(ecode.RequestErr, "RequestErr") - } else if in.Name == "test_remote_port" { - if strconv.Itoa(int(in.Age)) != nmd.String(ctx, nmd.RemotePort) { - return nil, fmt.Errorf("error port %d", in.Age) - } - reply := &pb.HelloReply{Message: "status", Success: true} - return reply, nil - } else if in.Name == "time_opt" { - time.Sleep(time.Second) - reply := &pb.HelloReply{Message: "status", Success: true} - return reply, nil - } - - return &pb.HelloReply{Message: "Hello " + in.Name, Success: true}, nil -} - -func (s *helloServer) StreamHello(ss pb.Greeter_StreamHelloServer) error { - for i := 0; i < 3; i++ { - in, err := ss.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - ret := &pb.HelloReply{Message: "Hello " + in.Name, Success: true} - err = ss.Send(ret) - if err != nil { - return err - } - } - return nil -} - -func runServer(t *testing.T, interceptors ...grpc.UnaryServerInterceptor) func() { - return func() { - server = NewServer(&ServerConfig{Addr: _testAddr, Timeout: xtime.Duration(time.Second)}) - pb.RegisterGreeterServer(server.Server(), &helloServer{t}) - server.Use( - func(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - outPut = append(outPut, "1") - resp, err := handler(ctx, req) - outPut = append(outPut, "2") - return resp, err - }, - func(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - outPut = append(outPut, "3") - resp, err := handler(ctx, req) - outPut = append(outPut, "4") - return resp, err - }) - if _, err := server.Start(); err != nil { - t.Fatal(err) - } - } -} - -func runClient(ctx context.Context, cc *ClientConfig, t *testing.T, name string, age int32, interceptors ...grpc.UnaryClientInterceptor) (resp *pb.HelloReply, err error) { - client := NewClient(cc) - client.Use(interceptors...) - conn, err := client.Dial(context.Background(), _testAddr) - if err != nil { - panic(fmt.Errorf("did not connect: %v,req: %v %v", err, name, age)) - } - defer conn.Close() - c := pb.NewGreeterClient(conn) - resp, err = c.SayHello(ctx, &pb.HelloRequest{Name: name, Age: age}) - return -} - -func TestMain(t *testing.T) { - log.Init(nil) -} - -func Test_Warden(t *testing.T) { - xtrace.Init(&xtrace.Config{Addr: "127.0.0.1:9982", Timeout: xtime.Duration(time.Second * 3)}) - go _testOnce.Do(runServer(t)) - go runClient(context.Background(), &clientConfig, t, "trace_test", 0) - //testTrace(t, 9982, false) - //testInterceptorChain(t) - testValidation(t) - testServerRecovery(t) - testClientRecovery(t) - testTimeoutOpt(t) - testErrorDetail(t) - testECodeStatus(t) - testColorPass(t) - testRemotePort(t) - testLinkTimeout(t) - testClientConfig(t) - testBreaker(t) - testAllErrorCase(t) - testGracefulShutDown(t) -} - -func testValidation(t *testing.T) { - _, err := runClient(context.Background(), &clientConfig, t, "", 0) - if !ecode.EqualError(ecode.RequestErr, err) { - t.Fatalf("testValidation should return ecode.RequestErr,but is %v", err) - } -} - -func testTimeoutOpt(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) - defer cancel() - client := NewClient(&clientConfig) - conn, err := client.Dial(ctx, _testAddr) - if err != nil { - t.Fatalf("did not connect: %v", err) - } - defer conn.Close() - c := pb.NewGreeterClient(conn) - start := time.Now() - _, err = c.SayHello(ctx, &pb.HelloRequest{Name: "time_opt", Age: 0}, WithTimeoutCallOption(time.Millisecond*500)) - if err == nil { - t.Fatalf("recovery must return error") - } - if time.Since(start) < time.Millisecond*400 { - t.Fatalf("client timeout must be greater than 400 Milliseconds;err:=%v", err) - } -} - -func testAllErrorCase(t *testing.T) { - // } else if in.Name == "general_error" { - // return nil, fmt.Errorf("haha is error") - // } else if in.Name == "ecode_code_error" { - // return nil, ecode.CreativeArticleTagErr - // } else if in.Name == "pb_error_error" { - // return nil, &errpb.Error{ErrCode: 11122, ErrMessage: "haha"} - // } else if in.Name == "ecode_status_error" { - // return nil, ecode.Error(ecode.RequestErr, "RequestErr") - // } - ctx := context.Background() - t.Run("general_error", func(t *testing.T) { - _, err := runClient(ctx, &clientConfig, t, "general_error", 0) - assert.Contains(t, err.Error(), "haha") - ec := ecode.Cause(err) - assert.Equal(t, -500, ec.Code()) - // remove this assert in future - assert.Equal(t, "-500", ec.Message()) - }) - t.Run("ecode_code_error", func(t *testing.T) { - _, err := runClient(ctx, &clientConfig, t, "ecode_code_error", 0) - ec := ecode.Cause(err) - assert.Equal(t, ecode.Conflict.Code(), ec.Code()) - // remove this assert in future - assert.Equal(t, "-409", ec.Message()) - }) - t.Run("pb_error_error", func(t *testing.T) { - _, err := runClient(ctx, &clientConfig, t, "pb_error_error", 0) - ec := ecode.Cause(err) - assert.Equal(t, 11122, ec.Code()) - assert.Equal(t, "haha", ec.Message()) - }) - t.Run("ecode_status_error", func(t *testing.T) { - _, err := runClient(ctx, &clientConfig, t, "ecode_status_error", 0) - ec := ecode.Cause(err) - assert.Equal(t, ecode.RequestErr.Code(), ec.Code()) - assert.Equal(t, "RequestErr", ec.Message()) - }) -} - -func testBreaker(t *testing.T) { - client := NewClient(&clientConfig) - conn, err := client.Dial(context.Background(), _testAddr) - if err != nil { - t.Fatalf("did not connect: %v", err) - } - defer conn.Close() - c := pb.NewGreeterClient(conn) - for i := 0; i < 1000; i++ { - _, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: "breaker_test"}) - if err != nil { - if ecode.EqualError(ecode.ServiceUnavailable, err) { - return - } - } - } - t.Fatalf("testBreaker failed!No breaker was triggered") -} - -func testColorPass(t *testing.T) { - ctx := nmd.NewContext(context.Background(), nmd.MD{ - nmd.Color: "red", - }) - resp, err := runClient(ctx, &clientConfig, t, "color_test", 0) - if err != nil { - t.Fatalf("testColorPass return error %v", err) - } - if resp == nil || resp.Message != "Hello red" { - t.Fatalf("testColorPass resp.Message must be red,%v", *resp) - } -} - -func testRemotePort(t *testing.T) { - ctx := nmd.NewContext(context.Background(), nmd.MD{ - nmd.RemotePort: "8000", - }) - _, err := runClient(ctx, &clientConfig, t, "test_remote_port", 8000) - if err != nil { - t.Fatalf("testRemotePort return error %v", err) - } -} - -func testLinkTimeout(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*200) - defer cancel() - _, err := runClient(ctx, &clientConfig, t, "timeout_test", 0) - if err == nil { - t.Fatalf("testLinkTimeout must return error") - } - if !ecode.EqualError(ecode.Deadline, err) { - t.Fatalf("testLinkTimeout must return error RPCDeadline,err:%v", err) - } -} -func testClientConfig(t *testing.T) { - _, err := runClient(context.Background(), &clientConfig2, t, "timeout_test2", 0) - if err == nil { - t.Fatalf("testLinkTimeout must return error") - } - if !ecode.EqualError(ecode.Deadline, err) { - t.Fatalf("testLinkTimeout must return error RPCDeadline,err:%v", err) - } -} - -func testGracefulShutDown(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - resp, err := runClient(context.Background(), &clientConfig, t, "graceful_shutdown", 0) - if err != nil { - panic(fmt.Errorf("run graceful_shutdown client return(%v)", err)) - } - if !resp.Success || resp.Message != "Hello graceful_shutdown" { - panic(fmt.Errorf("run graceful_shutdown client return(%v,%v)", err, *resp)) - } - }() - } - go func() { - time.Sleep(time.Second) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*3) - defer cancel() - server.Shutdown(ctx) - }() - wg.Wait() -} - -func testClientRecovery(t *testing.T) { - ctx := context.Background() - client := NewClient(&clientConfig) - client.Use(func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) (ret error) { - invoker(ctx, method, req, reply, cc, opts...) - panic("client recovery test") - }) - - conn, err := client.Dial(ctx, _testAddr) - if err != nil { - t.Fatalf("did not connect: %v", err) - } - defer conn.Close() - c := pb.NewGreeterClient(conn) - - _, err = c.SayHello(ctx, &pb.HelloRequest{Name: "other_test", Age: 0}) - if err == nil { - t.Fatalf("recovery must return error") - } - e, ok := errors.Cause(err).(ecode.Codes) - if !ok { - t.Fatalf("recovery must return ecode error") - } - - if !ecode.EqualError(ecode.ServerErr, e) { - t.Fatalf("recovery must return ecode.RPCClientErr") - } -} - -func testServerRecovery(t *testing.T) { - ctx := context.Background() - client := NewClient(&clientConfig) - - conn, err := client.Dial(ctx, _testAddr) - if err != nil { - t.Fatalf("did not connect: %v", err) - } - defer conn.Close() - c := pb.NewGreeterClient(conn) - - _, err = c.SayHello(ctx, &pb.HelloRequest{Name: "recovery_test", Age: 0}) - if err == nil { - t.Fatalf("recovery must return error") - } - e, ok := errors.Cause(err).(ecode.Codes) - if !ok { - t.Fatalf("recovery must return ecode error") - } - - if e.Code() != ecode.ServerErr.Code() { - t.Fatalf("recovery must return ecode.ServerErr") - } -} - -func testInterceptorChain(t *testing.T) { - // NOTE: don't delete this sleep - time.Sleep(time.Millisecond) - if outPut[0] != "1" || outPut[1] != "3" || outPut[2] != "1" || outPut[3] != "3" || outPut[4] != "4" || outPut[5] != "2" || outPut[6] != "4" || outPut[7] != "2" { - t.Fatalf("outPut shoud be [1 3 1 3 4 2 4 2]!") - } -} - -func testErrorDetail(t *testing.T) { - _, err := runClient(context.Background(), &clientConfig2, t, "error_detail", 0) - if err == nil { - t.Fatalf("testErrorDetail must return error") - } - if ec, ok := errors.Cause(err).(ecode.Codes); !ok { - t.Fatalf("testErrorDetail must return ecode error") - } else if ec.Code() != 123456 || ec.Message() != "test_error_detail" || len(ec.Details()) == 0 { - t.Fatalf("testErrorDetail must return code:123456 and message:test_error_detail, code: %d, message: %s, details length: %d", ec.Code(), ec.Message(), len(ec.Details())) - } else if _, ok := ec.Details()[len(ec.Details())-1].(*pb.HelloReply); !ok { - t.Fatalf("expect get pb.HelloReply %#v", ec.Details()[len(ec.Details())-1]) - } -} - -func testECodeStatus(t *testing.T) { - _, err := runClient(context.Background(), &clientConfig2, t, "ecode_status", 0) - if err == nil { - t.Fatalf("testECodeStatus must return error") - } - st, ok := errors.Cause(err).(*ecode.Status) - if !ok { - t.Fatalf("testECodeStatus must return *ecode.Status") - } - if st.Code() != int(ecode.RequestErr) && st.Message() != "RequestErr" { - t.Fatalf("testECodeStatus must return code: -400, message: RequestErr get: code: %d, message: %s", st.Code(), st.Message()) - } - detail := st.Details()[0].(*pb.HelloReply) - if !detail.Success || detail.Message != "status" { - t.Fatalf("wrong detail") - } -} - -func testTrace(t *testing.T, port int, isStream bool) { - listener, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: port}) - if err != nil { - t.Fatalf("listent udp failed, %v", err) - return - } - data := make([]byte, 1024) - strs := make([][]string, 0) - for { - var n int - n, _, err = listener.ReadFromUDP(data) - if err != nil { - t.Fatalf("read from udp faild, %v", err) - } - str := strings.Split(string(data[:n]), _separator) - strs = append(strs, str) - - if len(strs) == 2 { - break - } - } - if len(strs[0]) == 0 || len(strs[1]) == 0 { - t.Fatalf("trace str's length must be greater than 0") - } -} - -func BenchmarkServer(b *testing.B) { - server := NewServer(&ServerConfig{Addr: _testAddr, Timeout: xtime.Duration(time.Second)}) - go func() { - pb.RegisterGreeterServer(server.Server(), &helloServer{}) - if _, err := server.Start(); err != nil { - os.Exit(0) - return - } - }() - defer func() { - server.Server().Stop() - }() - client := NewClient(&clientConfig) - conn, err := client.Dial(context.Background(), _testAddr) - if err != nil { - conn.Close() - b.Fatalf("did not connect: %v", err) - } - b.ResetTimer() - b.RunParallel(func(parab *testing.PB) { - for parab.Next() { - c := pb.NewGreeterClient(conn) - resp, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: "benchmark_test", Age: 1}) - if err != nil { - conn.Close() - b.Fatalf("c.SayHello failed: %v,req: %v %v", err, "benchmark", 1) - } - if !resp.Success { - b.Error("response not success!") - } - } - }) - conn.Close() -} - -func TestParseDSN(t *testing.T) { - dsn := "tcp://0.0.0.0:80/?timeout=100ms&idleTimeout=120s&keepaliveInterval=120s&keepaliveTimeout=20s&maxLife=4h&closeWait=3s" - config := parseDSN(dsn) - if config.Network != "tcp" || config.Addr != "0.0.0.0:80" || time.Duration(config.Timeout) != time.Millisecond*100 || - time.Duration(config.IdleTimeout) != time.Second*120 || time.Duration(config.KeepAliveInterval) != time.Second*120 || - time.Duration(config.MaxLifeTime) != time.Hour*4 || time.Duration(config.ForceCloseWait) != time.Second*3 || time.Duration(config.KeepAliveTimeout) != time.Second*20 { - t.Fatalf("parseDSN(%s) not compare config result(%+v)", dsn, config) - } - - dsn = "unix:///temp/warden.sock?timeout=300ms" - config = parseDSN(dsn) - if config.Network != "unix" || config.Addr != "/temp/warden.sock" || time.Duration(config.Timeout) != time.Millisecond*300 { - t.Fatalf("parseDSN(%s) not compare config result(%+v)", dsn, config) - } -} - -type testServer struct { - helloFn func(ctx context.Context, req *pb.HelloRequest) (*pb.HelloReply, error) -} - -func (t *testServer) SayHello(ctx context.Context, req *pb.HelloRequest) (*pb.HelloReply, error) { - return t.helloFn(ctx, req) -} - -func (t *testServer) StreamHello(pb.Greeter_StreamHelloServer) error { panic("not implemented") } - -// NewTestServerClient . -func NewTestServerClient(invoker func(ctx context.Context, req *pb.HelloRequest) (*pb.HelloReply, error), svrcfg *ServerConfig, clicfg *ClientConfig) (pb.GreeterClient, func() error) { - srv := NewServer(svrcfg) - pb.RegisterGreeterServer(srv.Server(), &testServer{helloFn: invoker}) - - lis, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(err) - } - ch := make(chan bool, 1) - go func() { - ch <- true - srv.Serve(lis) - }() - <-ch - println(lis.Addr().String()) - conn, err := NewConn(lis.Addr().String()) - if err != nil { - panic(err) - } - return pb.NewGreeterClient(conn), func() error { return srv.Shutdown(context.Background()) } -} - -func TestMetadata(t *testing.T) { - cli, cancel := NewTestServerClient(func(ctx context.Context, req *pb.HelloRequest) (*pb.HelloReply, error) { - assert.Equal(t, "red", nmd.String(ctx, nmd.Color)) - assert.Equal(t, "2.2.3.3", nmd.String(ctx, nmd.RemoteIP)) - assert.Equal(t, "2233", nmd.String(ctx, nmd.RemotePort)) - return &pb.HelloReply{}, nil - }, nil, nil) - defer cancel() - - ctx := nmd.NewContext(context.Background(), nmd.MD{ - nmd.Color: "red", - nmd.RemoteIP: "2.2.3.3", - nmd.RemotePort: "2233", - }) - _, err := cli.SayHello(ctx, &pb.HelloRequest{Name: "test"}) - assert.Nil(t, err) -} - -func TestStartWithAddr(t *testing.T) { - configuredAddr := "127.0.0.1:0" - server = NewServer(&ServerConfig{Addr: configuredAddr, Timeout: xtime.Duration(time.Second)}) - if _, realAddr, err := server.StartWithAddr(); err == nil && realAddr != nil { - assert.NotEqual(t, realAddr.String(), configuredAddr) - } else { - assert.NotNil(t, realAddr) - assert.Nil(t, err) - } -} diff --git a/pkg/net/rpc/warden/stats.go b/pkg/net/rpc/warden/stats.go deleted file mode 100644 index 924ba51f5..000000000 --- a/pkg/net/rpc/warden/stats.go +++ /dev/null @@ -1,25 +0,0 @@ -package warden - -import ( - "context" - "strconv" - - nmd "github.com/go-kratos/kratos/pkg/net/rpc/warden/internal/metadata" - "github.com/go-kratos/kratos/pkg/stat/sys/cpu" - - "google.golang.org/grpc" - gmd "google.golang.org/grpc/metadata" -) - -func (s *Server) stats() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - resp, err = handler(ctx, req) - var cpustat cpu.Stat - cpu.ReadStat(&cpustat) - if cpustat.Usage != 0 { - trailer := gmd.Pairs([]string{nmd.CPUUsage, strconv.FormatInt(int64(cpustat.Usage), 10)}...) - grpc.SetTrailer(ctx, trailer) - } - return - } -} diff --git a/pkg/net/rpc/warden/validate.go b/pkg/net/rpc/warden/validate.go deleted file mode 100644 index 706123b40..000000000 --- a/pkg/net/rpc/warden/validate.go +++ /dev/null @@ -1,37 +0,0 @@ -package warden - -import ( - "context" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "google.golang.org/grpc" - "gopkg.in/go-playground/validator.v9" -) - -var validate = validator.New() - -// Validate return a client interceptor validate incoming request per RPC call. -func (s *Server) validate() grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - if err = validate.Struct(req); err != nil { - err = status.Error(codes.InvalidArgument, err.Error()) - return - } - resp, err = handler(ctx, req) - return - } -} - -// RegisterValidation adds a validation Func to a Validate's map of validators denoted by the key -// NOTE: if the key already exists, the previous validation function will be replaced. -// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation -func (s *Server) RegisterValidation(key string, fn validator.Func) error { - return validate.RegisterValidation(key, fn) -} - -// GetValidate return the default validate -func (s *Server) GetValidate() *validator.Validate { - return validate -} diff --git a/pkg/net/trace/README.md b/pkg/net/trace/README.md deleted file mode 100644 index ce87fa101..000000000 --- a/pkg/net/trace/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# net/trace - -## 项目简介 -1. 提供Trace的接口规范 -2. 提供 trace 对Tracer接口的实现,供业务接入使用 - -## 接入示例 -1. 启动接入示例 - ```go - trace.Init(traceConfig) // traceConfig is Config object with value. - ``` -2. 配置参考 - ```toml - [tracer] - network = "unixgram" - addr = "/var/run/dapper-collect/dapper-collect.sock" - ``` - -## 测试 -1. 执行当前目录下所有测试文件,测试所有功能 diff --git a/pkg/net/trace/config.go b/pkg/net/trace/config.go deleted file mode 100644 index 3f46cf9cc..000000000 --- a/pkg/net/trace/config.go +++ /dev/null @@ -1,75 +0,0 @@ -package trace - -import ( - "flag" - "fmt" - "os" - "time" - - "github.com/pkg/errors" - - "github.com/go-kratos/kratos/pkg/conf/dsn" - "github.com/go-kratos/kratos/pkg/conf/env" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -var _traceDSN = "unixgram:///var/run/dapper-collect/dapper-collect.sock" - -func init() { - if v := os.Getenv("TRACE"); v != "" { - _traceDSN = v - } - flag.StringVar(&_traceDSN, "trace", _traceDSN, "trace report dsn, or use TRACE env.") -} - -// Config config. -type Config struct { - // Report network e.g. unixgram, tcp, udp - Network string `dsn:"network"` - // For TCP and UDP networks, the addr has the form "host:port". - // For Unix networks, the address must be a file system path. - Addr string `dsn:"address"` - // Report timeout - Timeout xtime.Duration `dsn:"query.timeout,200ms"` - // DisableSample - DisableSample bool `dsn:"query.disable_sample"` - // ProtocolVersion - ProtocolVersion int32 `dsn:"query.protocol_version,1"` - // Probability probability sampling - Probability float32 `dsn:"-"` -} - -func parseDSN(rawdsn string) (*Config, error) { - d, err := dsn.Parse(rawdsn) - if err != nil { - return nil, errors.Wrapf(err, "trace: invalid dsn: %s", rawdsn) - } - cfg := new(Config) - if _, err = d.Bind(cfg); err != nil { - return nil, errors.Wrapf(err, "trace: invalid dsn: %s", rawdsn) - } - return cfg, nil -} - -// TracerFromEnvFlag new tracer from env and flag -func TracerFromEnvFlag() (Tracer, error) { - cfg, err := parseDSN(_traceDSN) - if err != nil { - return nil, err - } - report := newReport(cfg.Network, cfg.Addr, time.Duration(cfg.Timeout), cfg.ProtocolVersion) - return NewTracer(env.AppID, report, cfg.DisableSample), nil -} - -// Init init trace report. -func Init(cfg *Config) { - if cfg == nil { - // paser config from env - var err error - if cfg, err = parseDSN(_traceDSN); err != nil { - panic(fmt.Errorf("parse trace dsn error: %s", err)) - } - } - report := newReport(cfg.Network, cfg.Addr, time.Duration(cfg.Timeout), cfg.ProtocolVersion) - SetGlobalTracer(NewTracer(env.AppID, report, cfg.DisableSample)) -} diff --git a/pkg/net/trace/config_test.go b/pkg/net/trace/config_test.go deleted file mode 100644 index 1d7726806..000000000 --- a/pkg/net/trace/config_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package trace - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestParseDSN(t *testing.T) { - _, err := parseDSN(_traceDSN) - if err != nil { - t.Error(err) - } -} - -func TestTraceFromEnvFlag(t *testing.T) { - _, err := TracerFromEnvFlag() - if err != nil { - t.Error(err) - } -} - -func TestInit(t *testing.T) { - Init(nil) - _, ok := _tracer.(nooptracer) - assert.False(t, ok) - - _tracer = nooptracer{} - - Init(&Config{Network: "unixgram", Addr: "unixgram:///var/run/dapper-collect/dapper-collect.sock"}) - _, ok = _tracer.(nooptracer) - assert.False(t, ok) -} diff --git a/pkg/net/trace/const.go b/pkg/net/trace/const.go deleted file mode 100644 index 616b0fe08..000000000 --- a/pkg/net/trace/const.go +++ /dev/null @@ -1,7 +0,0 @@ -package trace - -// Trace key -const ( - KratosTraceID = "kratos-trace-id" - KratosTraceDebug = "kratos-trace-debug" -) diff --git a/pkg/net/trace/context.go b/pkg/net/trace/context.go deleted file mode 100644 index 50e0be170..000000000 --- a/pkg/net/trace/context.go +++ /dev/null @@ -1,110 +0,0 @@ -package trace - -import ( - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const ( - flagSampled = 0x01 - flagDebug = 0x02 -) - -var ( - errEmptyTracerString = errors.New("trace: cannot convert empty string to spancontext") - errInvalidTracerString = errors.New("trace: string does not match spancontext string format") -) - -// SpanContext implements opentracing.SpanContext -type spanContext struct { - // TraceID represents globally unique ID of the trace. - // Usually generated as a random number. - TraceID uint64 - - // SpanID represents span ID that must be unique within its trace, - // but does not have to be globally unique. - SpanID uint64 - - // ParentID refers to the ID of the parent span. - // Should be 0 if the current span is a root span. - ParentID uint64 - - // Flags is a bitmap containing such bits as 'sampled' and 'debug'. - Flags byte - - // Probability - Probability float32 - - // Level current level - Level int -} - -func (c spanContext) isSampled() bool { - return (c.Flags & flagSampled) == flagSampled -} - -func (c spanContext) isDebug() bool { - return (c.Flags & flagDebug) == flagDebug -} - -// IsValid check spanContext valid -func (c spanContext) IsValid() bool { - return c.TraceID != 0 && c.SpanID != 0 -} - -// emptyContext emptyContext -var emptyContext = spanContext{} - -// String convert spanContext to String -// {TraceID}:{SpanID}:{ParentID}:{flags}:[extend...] -// TraceID: uint64 base16 -// SpanID: uint64 base16 -// ParentID: uint64 base16 -// flags: -// - :0 sampled flag -// - :1 debug flag -// extend: -// sample-rate: s-{base16(BigEndian(float32))} -func (c spanContext) String() string { - base := make([]string, 4) - base[0] = strconv.FormatUint(c.TraceID, 16) - base[1] = strconv.FormatUint(c.SpanID, 16) - base[2] = strconv.FormatUint(c.ParentID, 16) - base[3] = strconv.FormatUint(uint64(c.Flags), 16) - return strings.Join(base, ":") -} - -// ContextFromString parse spanContext form string -func contextFromString(value string) (spanContext, error) { - if value == "" { - return emptyContext, errEmptyTracerString - } - items := strings.Split(value, ":") - if len(items) < 4 { - return emptyContext, errInvalidTracerString - } - parseHexUint64 := func(hexs []string) ([]uint64, error) { - rets := make([]uint64, len(hexs)) - var err error - for i, hex := range hexs { - rets[i], err = strconv.ParseUint(hex, 16, 64) - if err != nil { - break - } - } - return rets, err - } - rets, err := parseHexUint64(items[0:4]) - if err != nil { - return emptyContext, errInvalidTracerString - } - sctx := spanContext{ - TraceID: rets[0], - SpanID: rets[1], - ParentID: rets[2], - Flags: byte(rets[3]), - } - return sctx, nil -} diff --git a/pkg/net/trace/context_test.go b/pkg/net/trace/context_test.go deleted file mode 100644 index fb5e02f92..000000000 --- a/pkg/net/trace/context_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package trace - -import ( - "testing" -) - -func TestSpanContext(t *testing.T) { - pctx := &spanContext{ - ParentID: genID(), - SpanID: genID(), - TraceID: genID(), - Flags: flagSampled, - } - if !pctx.isSampled() { - t.Error("expect sampled") - } - value := pctx.String() - t.Logf("bili-trace-id: %s", value) - pctx2, err := contextFromString(value) - if err != nil { - t.Error(err) - } - if pctx2.ParentID != pctx.ParentID || pctx2.SpanID != pctx.SpanID || pctx2.TraceID != pctx.TraceID || pctx2.Flags != pctx.Flags { - t.Errorf("wrong spancontext get %+v -> %+v", pctx, pctx2) - } -} diff --git a/pkg/net/trace/dapper.go b/pkg/net/trace/dapper.go deleted file mode 100644 index 88934b103..000000000 --- a/pkg/net/trace/dapper.go +++ /dev/null @@ -1,189 +0,0 @@ -package trace - -import ( - "log" - "os" - "sync" - "time" -) - -const ( - _maxLevel = 64 - // hard code reset probability at 0.00025, 1/4000 - _probability = 0.00025 -) - -// NewTracer new a tracer. -func NewTracer(serviceName string, report reporter, disableSample bool) Tracer { - sampler := newSampler(_probability) - - // default internal tags - tags := extendTag() - stdlog := log.New(os.Stderr, "trace", log.LstdFlags) - return &dapper{ - serviceName: serviceName, - disableSample: disableSample, - propagators: map[interface{}]propagator{ - HTTPFormat: httpPropagator{}, - GRPCFormat: grpcPropagator{}, - }, - reporter: report, - sampler: sampler, - tags: tags, - pool: &sync.Pool{New: func() interface{} { return new(Span) }}, - stdlog: stdlog, - } -} - -type dapper struct { - serviceName string - disableSample bool - tags []Tag - reporter reporter - propagators map[interface{}]propagator - pool *sync.Pool - stdlog *log.Logger - sampler sampler -} - -func (d *dapper) New(operationName string, opts ...Option) Trace { - opt := defaultOption - for _, fn := range opts { - fn(&opt) - } - traceID := genID() - var sampled bool - var probability float32 - if d.disableSample { - sampled = true - probability = 1 - } else { - sampled, probability = d.sampler.IsSampled(traceID, operationName) - } - pctx := spanContext{TraceID: traceID} - if sampled { - pctx.Flags = flagSampled - pctx.Probability = probability - } - if opt.Debug { - pctx.Flags |= flagDebug - return d.newSpanWithContext(operationName, pctx).SetTag(TagString(TagSpanKind, "server")).SetTag(TagBool("debug", true)) - } - // 为了兼容临时为 New 的 Span 设置 span.kind - return d.newSpanWithContext(operationName, pctx).SetTag(TagString(TagSpanKind, "server")) -} - -func (d *dapper) newSpanWithContext(operationName string, pctx spanContext) Trace { - sp := d.getSpan() - // is span is not sampled just return a span with this context, no need clear it - //if !pctx.isSampled() { - // sp.context = pctx - // return sp - //} - if pctx.Level > _maxLevel { - // if span reach max limit level return noopspan - return noopspan{} - } - level := pctx.Level + 1 - nctx := spanContext{ - TraceID: pctx.TraceID, - ParentID: pctx.SpanID, - Flags: pctx.Flags, - Level: level, - } - if pctx.SpanID == 0 { - nctx.SpanID = pctx.TraceID - } else { - nctx.SpanID = genID() - } - sp.operationName = operationName - sp.context = nctx - sp.startTime = time.Now() - sp.tags = append(sp.tags, d.tags...) - return sp -} - -func (d *dapper) Inject(t Trace, format interface{}, carrier interface{}) error { - // if carrier implement Carrier use direct, ignore format - carr, ok := carrier.(Carrier) - if ok { - t.Visit(carr.Set) - return nil - } - // use Built-in propagators - pp, ok := d.propagators[format] - if !ok { - return ErrUnsupportedFormat - } - carr, err := pp.Inject(carrier) - if err != nil { - return err - } - if t != nil { - t.Visit(carr.Set) - } - return nil -} - -func (d *dapper) Extract(format interface{}, carrier interface{}) (Trace, error) { - sp, err := d.extract(format, carrier) - if err != nil { - return sp, err - } - // 为了兼容临时为 New 的 Span 设置 span.kind - return sp.SetTag(TagString(TagSpanKind, "server")), nil -} - -func (d *dapper) extract(format interface{}, carrier interface{}) (Trace, error) { - // if carrier implement Carrier use direct, ignore format - carr, ok := carrier.(Carrier) - if !ok { - // use Built-in propagators - pp, ok := d.propagators[format] - if !ok { - return nil, ErrUnsupportedFormat - } - var err error - if carr, err = pp.Extract(carrier); err != nil { - return nil, err - } - } - pctx, err := contextFromString(carr.Get(KratosTraceID)) - if err != nil { - return nil, err - } - // NOTE: call SetTitle after extract trace - return d.newSpanWithContext("", pctx), nil -} - -func (d *dapper) Close() error { - return d.reporter.Close() -} - -func (d *dapper) report(sp *Span) { - if sp.context.isSampled() { - if err := d.reporter.WriteSpan(sp); err != nil { - d.stdlog.Printf("marshal trace span error: %s", err) - } - } - d.putSpan(sp) -} - -func (d *dapper) putSpan(sp *Span) { - if len(sp.tags) > 32 { - sp.tags = nil - } - if len(sp.logs) > 32 { - sp.logs = nil - } - d.pool.Put(sp) -} - -func (d *dapper) getSpan() *Span { - sp := d.pool.Get().(*Span) - sp.dapper = d - sp.childs = 0 - sp.tags = sp.tags[:0] - sp.logs = sp.logs[:0] - return sp -} diff --git a/pkg/net/trace/dapper_test.go b/pkg/net/trace/dapper_test.go deleted file mode 100644 index 0ee9cbbf0..000000000 --- a/pkg/net/trace/dapper_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package trace - -import ( - "fmt" - "net/http" - "testing" - - "github.com/stretchr/testify/assert" - "google.golang.org/grpc/metadata" -) - -type mockReport struct { - sps []*Span -} - -func (m *mockReport) WriteSpan(sp *Span) error { - m.sps = append(m.sps, sp) - return nil -} - -func (m *mockReport) Close() error { - return nil -} - -func TestDapperPropagation(t *testing.T) { - t.Run("test HTTP progagation", func(t *testing.T) { - report := &mockReport{} - t1 := NewTracer("service1", report, true) - t2 := NewTracer("service2", report, true) - sp1 := t1.New("opt_1") - sp2 := sp1.Fork("", "opt_client") - header := make(http.Header) - t1.Inject(sp2, HTTPFormat, header) - sp3, err := t2.Extract(HTTPFormat, header) - if err != nil { - t.Fatal(err) - } - sp3.Finish(nil) - sp2.Finish(nil) - sp1.Finish(nil) - - assert.Len(t, report.sps, 3) - assert.Equal(t, report.sps[2].context.ParentID, uint64(0)) - assert.Equal(t, report.sps[0].context.TraceID, report.sps[1].context.TraceID) - assert.Equal(t, report.sps[2].context.TraceID, report.sps[1].context.TraceID) - - assert.Equal(t, report.sps[1].context.ParentID, report.sps[2].context.SpanID) - assert.Equal(t, report.sps[0].context.ParentID, report.sps[1].context.SpanID) - }) - t.Run("test gRPC progagation", func(t *testing.T) { - report := &mockReport{} - t1 := NewTracer("service1", report, true) - t2 := NewTracer("service2", report, true) - sp1 := t1.New("opt_1") - sp2 := sp1.Fork("", "opt_client") - md := make(metadata.MD) - t1.Inject(sp2, GRPCFormat, md) - sp3, err := t2.Extract(GRPCFormat, md) - if err != nil { - t.Fatal(err) - } - sp3.Finish(nil) - sp2.Finish(nil) - sp1.Finish(nil) - - assert.Len(t, report.sps, 3) - assert.Equal(t, report.sps[2].context.ParentID, uint64(0)) - assert.Equal(t, report.sps[0].context.TraceID, report.sps[1].context.TraceID) - assert.Equal(t, report.sps[2].context.TraceID, report.sps[1].context.TraceID) - - assert.Equal(t, report.sps[1].context.ParentID, report.sps[2].context.SpanID) - assert.Equal(t, report.sps[0].context.ParentID, report.sps[1].context.SpanID) - }) - t.Run("test normal", func(t *testing.T) { - report := &mockReport{} - t1 := NewTracer("service1", report, true) - sp1 := t1.New("test123") - sp1.Finish(nil) - }) - t.Run("test debug progagation", func(t *testing.T) { - report := &mockReport{} - t1 := NewTracer("service1", report, true) - t2 := NewTracer("service2", report, true) - sp1 := t1.New("opt_1", EnableDebug()) - sp2 := sp1.Fork("", "opt_client") - header := make(http.Header) - t1.Inject(sp2, HTTPFormat, header) - sp3, err := t2.Extract(HTTPFormat, header) - if err != nil { - t.Fatal(err) - } - sp3.Finish(nil) - sp2.Finish(nil) - sp1.Finish(nil) - - assert.Len(t, report.sps, 3) - assert.Equal(t, report.sps[2].context.ParentID, uint64(0)) - assert.Equal(t, report.sps[0].context.TraceID, report.sps[1].context.TraceID) - assert.Equal(t, report.sps[2].context.TraceID, report.sps[1].context.TraceID) - - assert.Equal(t, report.sps[1].context.ParentID, report.sps[2].context.SpanID) - assert.Equal(t, report.sps[0].context.ParentID, report.sps[1].context.SpanID) - }) -} - -func BenchmarkSample(b *testing.B) { - err := fmt.Errorf("test error") - report := &mockReport{} - t1 := NewTracer("service1", report, true) - for i := 0; i < b.N; i++ { - sp1 := t1.New("test_opt1") - sp1.SetTag(TagString("test", "123")) - sp2 := sp1.Fork("", "opt2") - sp3 := sp2.Fork("", "opt3") - sp3.SetTag(TagString("test", "123")) - sp3.Finish(nil) - sp2.Finish(&err) - sp1.Finish(nil) - } -} - -func BenchmarkDisableSample(b *testing.B) { - err := fmt.Errorf("test error") - report := &mockReport{} - t1 := NewTracer("service1", report, true) - for i := 0; i < b.N; i++ { - sp1 := t1.New("test_opt1") - sp1.SetTag(TagString("test", "123")) - sp2 := sp1.Fork("", "opt2") - sp3 := sp2.Fork("", "opt3") - sp3.SetTag(TagString("test", "123")) - sp3.Finish(nil) - sp2.Finish(&err) - sp1.Finish(nil) - } -} diff --git a/pkg/net/trace/jaeger/config.go b/pkg/net/trace/jaeger/config.go deleted file mode 100644 index e5170cc25..000000000 --- a/pkg/net/trace/jaeger/config.go +++ /dev/null @@ -1,33 +0,0 @@ -package jaeger - -import ( - "flag" - "os" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/net/trace" -) - -var ( - _jaegerAppID = env.AppID - _jaegerEndpoint = "http://127.0.0.1:9191" -) - -func init() { - if v := os.Getenv("JAEGER_ENDPOINT"); v != "" { - _jaegerEndpoint = v - } - - if v := os.Getenv("JAEGER_APPID"); v != "" { - _jaegerAppID = v - } - - flag.StringVar(&_jaegerEndpoint, "jaeger_endpoint", _jaegerEndpoint, "jaeger report endpoint, or use JAEGER_ENDPOINT env.") - flag.StringVar(&_jaegerAppID, "jaeger_appid", _jaegerAppID, "jaeger report appid, or use JAEGER_APPID env.") -} - -// Init Init -func Init() { - c := &Config{Endpoint: _jaegerEndpoint, BatchSize: 120} - trace.SetGlobalTracer(trace.NewTracer(_jaegerAppID, newReport(c), true)) -} diff --git a/pkg/net/trace/jaeger/http_transport.go b/pkg/net/trace/jaeger/http_transport.go deleted file mode 100644 index 17ab38e2b..000000000 --- a/pkg/net/trace/jaeger/http_transport.go +++ /dev/null @@ -1,314 +0,0 @@ -package jaeger - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/opentracing/opentracing-go" - ja "github.com/uber/jaeger-client-go" - "github.com/uber/jaeger-client-go/thrift" - j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" -) - -// Default timeout for http request in seconds -const defaultHTTPTimeout = time.Second * 5 - -// HTTPTransport implements Transport by forwarding spans to a http server. -type HTTPTransport struct { - url string - client *http.Client - batchSize int - spans []*j.Span - process *j.Process - httpCredentials *HTTPBasicAuthCredentials - headers map[string]string -} - -// HTTPBasicAuthCredentials stores credentials for HTTP basic auth. -type HTTPBasicAuthCredentials struct { - username string - password string -} - -// HTTPOption sets a parameter for the HttpCollector -type HTTPOption func(c *HTTPTransport) - -// HTTPTimeout sets maximum timeout for http request. -func HTTPTimeout(duration time.Duration) HTTPOption { - return func(c *HTTPTransport) { c.client.Timeout = duration } -} - -// HTTPBatchSize sets the maximum batch size, after which a collect will be -// triggered. The default batch size is 100 spans. -func HTTPBatchSize(n int) HTTPOption { - return func(c *HTTPTransport) { c.batchSize = n } -} - -// HTTPBasicAuth sets the credentials required to perform HTTP basic auth -func HTTPBasicAuth(username string, password string) HTTPOption { - return func(c *HTTPTransport) { - c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password} - } -} - -// HTTPRoundTripper configures the underlying Transport on the *http.Client -// that is used -func HTTPRoundTripper(transport http.RoundTripper) HTTPOption { - return func(c *HTTPTransport) { - c.client.Transport = transport - } -} - -// HTTPHeaders defines the HTTP headers that will be attached to the jaeger client's HTTP request -func HTTPHeaders(headers map[string]string) HTTPOption { - return func(c *HTTPTransport) { - c.headers = headers - } -} - -// NewHTTPTransport returns a new HTTP-backend transport. url should be an http -// url of the collector to handle POST request, typically something like: -// http://hostname:14268/api/traces?format=jaeger.thrift -func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport { - c := &HTTPTransport{ - url: url, - client: &http.Client{Timeout: defaultHTTPTimeout}, - batchSize: 100, - spans: []*j.Span{}, - } - - for _, option := range options { - option(c) - } - return c -} - -// Append implements Transport. -func (c *HTTPTransport) Append(span *Span) (int, error) { - if c.process == nil { - process := j.NewProcess() - process.ServiceName = span.ServiceName() - c.process = process - } - jSpan := BuildJaegerThrift(span) - c.spans = append(c.spans, jSpan) - if len(c.spans) >= c.batchSize { - return c.Flush() - } - return 0, nil -} - -// Flush implements Transport. -func (c *HTTPTransport) Flush() (int, error) { - count := len(c.spans) - if count == 0 { - return 0, nil - } - err := c.send(c.spans) - c.spans = c.spans[:0] - return count, err -} - -// Close implements Transport. -func (c *HTTPTransport) Close() error { - return nil -} - -func (c *HTTPTransport) send(spans []*j.Span) error { - batch := &j.Batch{ - Spans: spans, - Process: c.process, - } - body, err := serializeThrift(batch) - if err != nil { - return err - } - req, err := http.NewRequest("POST", c.url, body) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/x-thrift") - for k, v := range c.headers { - req.Header.Set(k, v) - } - - if c.httpCredentials != nil { - req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password) - } - - resp, err := c.client.Do(req) - if err != nil { - return err - } - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - if resp.StatusCode >= http.StatusBadRequest { - return fmt.Errorf("error from collector: %d", resp.StatusCode) - } - return nil -} - -func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) { - t := thrift.NewTMemoryBuffer() - p := thrift.NewTBinaryProtocolTransport(t) - if err := obj.Write(p); err != nil { - return nil, err - } - return t.Buffer, nil -} - -func BuildJaegerThrift(span *Span) *j.Span { - span.Lock() - defer span.Unlock() - startTime := span.startTime.UnixNano() / 1000 - duration := span.duration.Nanoseconds() / int64(time.Microsecond) - jaegerSpan := &j.Span{ - TraceIdLow: int64(span.context.traceID.Low), - TraceIdHigh: int64(span.context.traceID.High), - SpanId: int64(span.context.spanID), - ParentSpanId: int64(span.context.parentID), - OperationName: span.operationName, - Flags: int32(span.context.samplingState.flags()), - StartTime: startTime, - Duration: duration, - Tags: buildTags(span.tags, 100), - Logs: buildLogs(span.logs), - References: buildReferences(span.references), - } - return jaegerSpan -} - -func stringify(value interface{}) string { - if s, ok := value.(string); ok { - return s - } - return fmt.Sprintf("%+v", value) -} - -func truncateString(value string, maxLength int) string { - // we ignore the problem of utf8 runes possibly being sliced in the middle, - // as it is rather expensive to iterate through each tag just to find rune - // boundaries. - if len(value) > maxLength { - return value[:maxLength] - } - return value -} - -func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag { - jTags := make([]*j.Tag, 0, len(tags)) - for _, tag := range tags { - jTag := buildTag(&tag, maxTagValueLength) - jTags = append(jTags, jTag) - } - return jTags -} -func buildTag(tag *Tag, maxTagValueLength int) *j.Tag { - jTag := &j.Tag{Key: tag.key} - switch value := tag.value.(type) { - case string: - vStr := truncateString(value, maxTagValueLength) - jTag.VStr = &vStr - jTag.VType = j.TagType_STRING - case []byte: - if len(value) > maxTagValueLength { - value = value[:maxTagValueLength] - } - jTag.VBinary = value - jTag.VType = j.TagType_BINARY - case int: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int8: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint8: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int16: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint16: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int32: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint32: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case int64: - vLong := value - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case uint64: - vLong := int64(value) - jTag.VLong = &vLong - jTag.VType = j.TagType_LONG - case float32: - vDouble := float64(value) - jTag.VDouble = &vDouble - jTag.VType = j.TagType_DOUBLE - case float64: - vDouble := value - jTag.VDouble = &vDouble - jTag.VType = j.TagType_DOUBLE - case bool: - vBool := value - jTag.VBool = &vBool - jTag.VType = j.TagType_BOOL - default: - vStr := truncateString(stringify(value), maxTagValueLength) - jTag.VStr = &vStr - jTag.VType = j.TagType_STRING - } - return jTag -} - -func buildLogs(logs []opentracing.LogRecord) []*j.Log { - jLogs := make([]*j.Log, 0, len(logs)) - for _, log := range logs { - jLog := &j.Log{ - Timestamp: log.Timestamp.UnixNano() / 1000, - Fields: ja.ConvertLogsToJaegerTags(log.Fields), - } - jLogs = append(jLogs, jLog) - } - return jLogs -} - -func buildReferences(references []Reference) []*j.SpanRef { - retMe := make([]*j.SpanRef, 0, len(references)) - for _, ref := range references { - if ref.Type == opentracing.ChildOfRef { - retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF)) - } else if ref.Type == opentracing.FollowsFromRef { - retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM)) - } - } - return retMe -} - -func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef { - return &j.SpanRef{ - RefType: refType, - TraceIdLow: int64(ctx.traceID.Low), - TraceIdHigh: int64(ctx.traceID.High), - SpanId: int64(ctx.spanID), - } -} diff --git a/pkg/net/trace/jaeger/jaeger.go b/pkg/net/trace/jaeger/jaeger.go deleted file mode 100644 index 5feda289e..000000000 --- a/pkg/net/trace/jaeger/jaeger.go +++ /dev/null @@ -1,49 +0,0 @@ -package jaeger - -import ( - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/trace" -) - -type Config struct { - Endpoint string - BatchSize int -} - -type JaegerReporter struct { - transport *HTTPTransport -} - -func newReport(c *Config) *JaegerReporter { - transport := NewHTTPTransport(c.Endpoint) - transport.batchSize = c.BatchSize - return &JaegerReporter{transport: transport} -} - -func (r *JaegerReporter) WriteSpan(raw *trace.Span) (err error) { - ctx := raw.Context() - traceID := TraceID{Low: ctx.TraceID} - spanID := SpanID(ctx.SpanID) - parentID := SpanID(ctx.ParentID) - tags := raw.Tags() - log.Info("[info] write span") - span := &Span{ - context: NewSpanContext(traceID, spanID, parentID, true, nil), - operationName: raw.OperationName(), - startTime: raw.StartTime(), - duration: raw.Duration(), - } - - span.serviceName = raw.ServiceName() - - for _, t := range tags { - span.SetTag(t.Key, t.Value) - } - - r.transport.Append(span) - return nil -} - -func (rpt *JaegerReporter) Close() error { - return rpt.transport.Close() -} diff --git a/pkg/net/trace/jaeger/jaeger_test.go b/pkg/net/trace/jaeger/jaeger_test.go deleted file mode 100644 index d082eb8ea..000000000 --- a/pkg/net/trace/jaeger/jaeger_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package jaeger - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/go-kratos/kratos/pkg/net/trace" -) - -func TestJaegerReporter(t *testing.T) { - var handler = func(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - t.Errorf("expected 'POST' request, got '%s'", r.Method) - } - - aSpanPayload, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - - t.Logf("%s\n", aSpanPayload) - } - ht := httptest.NewServer(http.HandlerFunc(handler)) - defer ht.Close() - - c := &Config{ - Endpoint: ht.URL, - BatchSize: 1, - } - - //c.Endpoint = "http://127.0.0.1:14268/api/traces" - - report := newReport(c) - t1 := trace.NewTracer("jaeger_test_1", report, true) - t2 := trace.NewTracer("jaeger_test_2", report, true) - sp1 := t1.New("option_1") - sp2 := sp1.Fork("service3", "opt_client") - sp2.SetLog(trace.Log("log_k", "log_v")) - // inject - header := make(http.Header) - t1.Inject(sp2, trace.HTTPFormat, header) - t.Log(header) - sp3, err := t2.Extract(trace.HTTPFormat, header) - if err != nil { - t.Fatal(err) - } - sp3.Finish(nil) - sp2.Finish(nil) - sp1.Finish(nil) - report.Close() -} diff --git a/pkg/net/trace/jaeger/reference.go b/pkg/net/trace/jaeger/reference.go deleted file mode 100644 index becc01685..000000000 --- a/pkg/net/trace/jaeger/reference.go +++ /dev/null @@ -1,9 +0,0 @@ -package jaeger - -import "github.com/opentracing/opentracing-go" - -// Reference represents a causal reference to other Spans (via their SpanContext). -type Reference struct { - Type opentracing.SpanReferenceType - Context SpanContext -} diff --git a/pkg/net/trace/jaeger/span.go b/pkg/net/trace/jaeger/span.go deleted file mode 100644 index 33dab4ebe..000000000 --- a/pkg/net/trace/jaeger/span.go +++ /dev/null @@ -1,345 +0,0 @@ -package jaeger - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" -) - -// Span implements opentracing.Span -type Span struct { - // referenceCounter used to increase the lifetime of - // the object before return it into the pool. - referenceCounter int32 - - serviceName string - - sync.RWMutex - - // TODO: (breaking change) change to use a pointer - context SpanContext - - // The name of the "operation" this span is an instance of. - // Known as a "span name" in some implementations. - operationName string - - // firstInProcess, if true, indicates that this span is the root of the (sub)tree - // of spans in the current process. In other words it's true for the root spans, - // and the ingress spans when the process joins another trace. - firstInProcess bool - - // startTime is the timestamp indicating when the span began, with microseconds precision. - startTime time.Time - - // duration returns duration of the span with microseconds precision. - // Zero value means duration is unknown. - duration time.Duration - - // tags attached to this span - tags []Tag - - // The span's "micro-log" - logs []opentracing.LogRecord - - // The number of logs dropped because of MaxLogsPerSpan. - numDroppedLogs int - - // references for this span - references []Reference -} - -// Tag is a simple key value wrapper. -// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. -type Tag struct { - key string - value interface{} -} - -// NewTag creates a new Tag. -// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead. -func NewTag(key string, value interface{}) Tag { - return Tag{key: key, value: value} -} - -// SetOperationName sets or changes the operation name. -func (s *Span) SetOperationName(operationName string) opentracing.Span { - s.Lock() - s.operationName = operationName - s.Unlock() - return s -} - -// SetTag implements SetTag() of opentracing.Span -func (s *Span) SetTag(key string, value interface{}) opentracing.Span { - return s.setTagInternal(key, value, true) -} - -func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span { - if lock { - s.Lock() - defer s.Unlock() - } - s.appendTagNoLocking(key, value) - return s -} - -// SpanContext returns span context -func (s *Span) SpanContext() SpanContext { - s.Lock() - defer s.Unlock() - return s.context -} - -// StartTime returns span start time -func (s *Span) StartTime() time.Time { - s.Lock() - defer s.Unlock() - return s.startTime -} - -// Duration returns span duration -func (s *Span) Duration() time.Duration { - s.Lock() - defer s.Unlock() - return s.duration -} - -// Tags returns tags for span -func (s *Span) Tags() opentracing.Tags { - s.Lock() - defer s.Unlock() - var result = make(opentracing.Tags, len(s.tags)) - for _, tag := range s.tags { - result[tag.key] = tag.value - } - return result -} - -// Logs returns micro logs for span -func (s *Span) Logs() []opentracing.LogRecord { - s.Lock() - defer s.Unlock() - - logs := append([]opentracing.LogRecord(nil), s.logs...) - if s.numDroppedLogs != 0 { - fixLogs(logs, s.numDroppedLogs) - } - - return logs -} - -// References returns references for this span -func (s *Span) References() []opentracing.SpanReference { - s.Lock() - defer s.Unlock() - - if s.references == nil || len(s.references) == 0 { - return nil - } - - result := make([]opentracing.SpanReference, len(s.references)) - for i, r := range s.references { - result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context} - } - return result -} - -func (s *Span) appendTagNoLocking(key string, value interface{}) { - s.tags = append(s.tags, Tag{key: key, value: value}) -} - -// LogFields implements opentracing.Span API -func (s *Span) LogFields(fields ...log.Field) { - s.Lock() - defer s.Unlock() - if !s.context.IsSampled() { - return - } - s.logFieldsNoLocking(fields...) -} - -// this function should only be called while holding a Write lock -func (s *Span) logFieldsNoLocking(fields ...log.Field) { - lr := opentracing.LogRecord{ - Fields: fields, - Timestamp: time.Now(), - } - s.appendLogNoLocking(lr) -} - -// LogKV implements opentracing.Span API -func (s *Span) LogKV(alternatingKeyValues ...interface{}) { - s.RLock() - sampled := s.context.IsSampled() - s.RUnlock() - if !sampled { - return - } - fields, err := log.InterleavedKVToFields(alternatingKeyValues...) - if err != nil { - s.LogFields(log.Error(err), log.String("function", "LogKV")) - return - } - s.LogFields(fields...) -} - -// LogEvent implements opentracing.Span API -func (s *Span) LogEvent(event string) { - s.Log(opentracing.LogData{Event: event}) -} - -// LogEventWithPayload implements opentracing.Span API -func (s *Span) LogEventWithPayload(event string, payload interface{}) { - s.Log(opentracing.LogData{Event: event, Payload: payload}) -} - -// Log implements opentracing.Span API -func (s *Span) Log(ld opentracing.LogData) { - s.Lock() - defer s.Unlock() - if s.context.IsSampled() { - s.appendLogNoLocking(ld.ToLogRecord()) - } -} - -// this function should only be called while holding a Write lock -func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) { - maxLogs := 100 - // We have too many logs. We don't touch the first numOld logs; we treat the - // rest as a circular buffer and overwrite the oldest log among those. - numOld := (maxLogs - 1) / 2 - numNew := maxLogs - numOld - s.logs[numOld+s.numDroppedLogs%numNew] = lr - s.numDroppedLogs++ -} - -// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at -// the end (i.e. pos circular left shifts). -func rotateLogBuffer(buf []opentracing.LogRecord, pos int) { - // This algorithm is described in: - // http://www.cplusplus.com/reference/algorithm/rotate - for first, middle, next := 0, pos, pos; first != middle; { - buf[first], buf[next] = buf[next], buf[first] - first++ - next++ - if next == len(buf) { - next = middle - } else if first == middle { - middle = next - } - } -} - -func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) { - // We dropped some log events, which means that we used part of Logs as a - // circular buffer (see appendLog). De-circularize it. - numOld := (len(logs) - 1) / 2 - numNew := len(logs) - numOld - rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew) - - // Replace the log in the middle (the oldest "new" log) with information - // about the dropped logs. This means that we are effectively dropping one - // more "new" log. - numDropped := numDroppedLogs + 1 - logs[numOld] = opentracing.LogRecord{ - // Keep the timestamp of the last dropped event. - Timestamp: logs[numOld].Timestamp, - Fields: []log.Field{ - log.String("event", "dropped Span logs"), - log.Int("dropped_log_count", numDropped), - log.String("component", "jaeger-client"), - }, - } -} - -func (s *Span) fixLogsIfDropped() { - if s.numDroppedLogs == 0 { - return - } - fixLogs(s.logs, s.numDroppedLogs) - s.numDroppedLogs = 0 -} - -// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext -func (s *Span) SetBaggageItem(key, value string) opentracing.Span { - s.context.baggage[key] = value - return s -} - -// BaggageItem implements BaggageItem() of opentracing.SpanContext -func (s *Span) BaggageItem(key string) string { - s.RLock() - defer s.RUnlock() - return s.context.baggage[key] -} - -// Finish implements opentracing.Span API -// After finishing the Span object it returns back to the allocator unless the reporter retains it again, -// so after that, the Span object should no longer be used because it won't be valid anymore. -func (s *Span) Finish() { - s.FinishWithOptions(opentracing.FinishOptions{}) -} - -// FinishWithOptions implements opentracing.Span API -func (s *Span) FinishWithOptions(options opentracing.FinishOptions) { -} - -// Context implements opentracing.Span API -func (s *Span) Context() opentracing.SpanContext { - s.Lock() - defer s.Unlock() - return s.context -} - -// Tracer implements opentracing.Span API -func (s *Span) Tracer() opentracing.Tracer { - return nil -} - -func (s *Span) String() string { - s.RLock() - defer s.RUnlock() - return s.context.String() -} - -// OperationName allows retrieving current operation name. -func (s *Span) OperationName() string { - s.RLock() - defer s.RUnlock() - return s.operationName -} - -// Retain increases object counter to increase the lifetime of the object -func (s *Span) Retain() *Span { - atomic.AddInt32(&s.referenceCounter, 1) - return s -} - -// Release decrements object counter and return to the -// allocator manager when counter will below zero -func (s *Span) Release() { - -} - -// reset span state and release unused data -func (s *Span) reset() { - s.firstInProcess = false - s.context = emptyContext - s.operationName = "" - s.startTime = time.Time{} - s.duration = 0 - atomic.StoreInt32(&s.referenceCounter, 0) - - // Note: To reuse memory we can save the pointers on the heap - s.tags = s.tags[:0] - s.logs = s.logs[:0] - s.numDroppedLogs = 0 - s.references = s.references[:0] -} - -func (s *Span) ServiceName() string { - return s.serviceName -} diff --git a/pkg/net/trace/jaeger/span_context.go b/pkg/net/trace/jaeger/span_context.go deleted file mode 100644 index d580c2d7f..000000000 --- a/pkg/net/trace/jaeger/span_context.go +++ /dev/null @@ -1,369 +0,0 @@ -package jaeger - -import ( - "errors" - "fmt" - "strconv" - "strings" - "sync" - - "go.uber.org/atomic" -) - -const ( - flagSampled = 1 - flagDebug = 2 - flagFirehose = 8 -) - -var ( - errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state") - errMalformedTracerStateString = errors.New("String does not match tracer state format") - - emptyContext = SpanContext{} -) - -// TraceID represents unique 128bit identifier of a trace -type TraceID struct { - High, Low uint64 -} - -// SpanID represents unique 64bit identifier of a span -type SpanID uint64 - -// SpanContext represents propagated span identity and state -type SpanContext struct { - // traceID represents globally unique ID of the trace. - // Usually generated as a random number. - traceID TraceID - - // spanID represents span ID that must be unique within its trace, - // but does not have to be globally unique. - spanID SpanID - - // parentID refers to the ID of the parent span. - // Should be 0 if the current span is a root span. - parentID SpanID - - // Distributed Context baggage. The is a snapshot in time. - baggage map[string]string - - // debugID can be set to some correlation ID when the context is being - // extracted from a TextMap carrier. - // - // See JaegerDebugHeader in constants.go - debugID string - - // samplingState is shared across all spans - samplingState *samplingState - - // remote indicates that span context represents a remote parent - remote bool -} - -type samplingState struct { - // Span context's state flags that are propagated across processes. Only lower 8 bits are used. - // We use an int32 instead of byte to be able to use CAS operations. - stateFlags atomic.Int32 - - // When state is not final, sampling will be retried on other span write operations, - // like SetOperationName / SetTag, and the spans will remain writable. - final atomic.Bool - - // localRootSpan stores the SpanID of the first span created in this process for a given trace. - localRootSpan SpanID - - // extendedState allows samplers to keep intermediate state. - // The keys and values in this map are completely opaque: interface{} -> interface{}. - extendedState sync.Map -} - -func (s *samplingState) isLocalRootSpan(id SpanID) bool { - return id == s.localRootSpan -} - -func (s *samplingState) setFlag(newFlag int32) { - swapped := false - for !swapped { - old := s.stateFlags.Load() - swapped = s.stateFlags.CAS(old, old|newFlag) - } -} - -func (s *samplingState) unsetFlag(newFlag int32) { - swapped := false - for !swapped { - old := s.stateFlags.Load() - swapped = s.stateFlags.CAS(old, old&^newFlag) - } -} - -func (s *samplingState) setSampled() { - s.setFlag(flagSampled) -} - -func (s *samplingState) unsetSampled() { - s.unsetFlag(flagSampled) -} - -func (s *samplingState) setDebugAndSampled() { - s.setFlag(flagDebug | flagSampled) -} - -func (s *samplingState) setFirehose() { - s.setFlag(flagFirehose) -} - -func (s *samplingState) setFlags(flags byte) { - s.stateFlags.Store(int32(flags)) -} - -func (s *samplingState) setFinal() { - s.final.Store(true) -} - -func (s *samplingState) flags() byte { - return byte(s.stateFlags.Load()) -} - -func (s *samplingState) isSampled() bool { - return s.stateFlags.Load()&flagSampled == flagSampled -} - -func (s *samplingState) isDebug() bool { - return s.stateFlags.Load()&flagDebug == flagDebug -} - -func (s *samplingState) isFirehose() bool { - return s.stateFlags.Load()&flagFirehose == flagFirehose -} - -func (s *samplingState) isFinal() bool { - return s.final.Load() -} - -func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} { - if value, ok := s.extendedState.Load(key); ok { - return value - } - value := initValue() - value, _ = s.extendedState.LoadOrStore(key, value) - return value -} - -// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext -func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { - for k, v := range c.baggage { - if !handler(k, v) { - break - } - } -} - -// IsSampled returns whether this trace was chosen for permanent storage -// by the sampling mechanism of the tracer. -func (c SpanContext) IsSampled() bool { - return c.samplingState.isSampled() -} - -// IsDebug indicates whether sampling was explicitly requested by the service. -func (c SpanContext) IsDebug() bool { - return c.samplingState.isDebug() -} - -// IsSamplingFinalized indicates whether the sampling decision has been finalized. -func (c SpanContext) IsSamplingFinalized() bool { - return c.samplingState.isFinal() -} - -// IsFirehose indicates whether the firehose flag was set -func (c SpanContext) IsFirehose() bool { - return c.samplingState.isFirehose() -} - -// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist, -// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler). -func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} { - return c.samplingState.extendedStateForKey(key, initValue) -} - -// IsValid indicates whether this context actually represents a valid trace. -func (c SpanContext) IsValid() bool { - return c.traceID.IsValid() && c.spanID != 0 -} - -// SetFirehose enables firehose mode for this trace. -func (c SpanContext) SetFirehose() { - c.samplingState.setFirehose() -} - -func (c SpanContext) String() string { - if c.traceID.High == 0 { - return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) - } - return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load()) -} - -// ContextFromString reconstructs the Context encoded in a string -func ContextFromString(value string) (SpanContext, error) { - var context SpanContext - if value == "" { - return emptyContext, errEmptyTracerStateString - } - parts := strings.Split(value, ":") - if len(parts) != 4 { - return emptyContext, errMalformedTracerStateString - } - var err error - if context.traceID, err = TraceIDFromString(parts[0]); err != nil { - return emptyContext, err - } - if context.spanID, err = SpanIDFromString(parts[1]); err != nil { - return emptyContext, err - } - if context.parentID, err = SpanIDFromString(parts[2]); err != nil { - return emptyContext, err - } - flags, err := strconv.ParseUint(parts[3], 10, 8) - if err != nil { - return emptyContext, err - } - context.samplingState = &samplingState{} - context.samplingState.setFlags(byte(flags)) - return context, nil -} - -// TraceID returns the trace ID of this span context -func (c SpanContext) TraceID() TraceID { - return c.traceID -} - -// SpanID returns the span ID of this span context -func (c SpanContext) SpanID() SpanID { - return c.spanID -} - -// ParentID returns the parent span ID of this span context -func (c SpanContext) ParentID() SpanID { - return c.parentID -} - -// Flags returns the bitmap containing such bits as 'sampled' and 'debug'. -func (c SpanContext) Flags() byte { - return c.samplingState.flags() -} - -// NewSpanContext creates a new instance of SpanContext -func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { - samplingState := &samplingState{} - if sampled { - samplingState.setSampled() - } - - return SpanContext{ - traceID: traceID, - spanID: spanID, - parentID: parentID, - samplingState: samplingState, - baggage: baggage} -} - -// CopyFrom copies data from ctx into this context, including span identity and baggage. -// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing. -func (c *SpanContext) CopyFrom(ctx *SpanContext) { - c.traceID = ctx.traceID - c.spanID = ctx.spanID - c.parentID = ctx.parentID - c.samplingState = ctx.samplingState - if l := len(ctx.baggage); l > 0 { - c.baggage = make(map[string]string, l) - for k, v := range ctx.baggage { - c.baggage[k] = v - } - } else { - c.baggage = nil - } -} - -// WithBaggageItem creates a new context with an extra baggage item. -func (c SpanContext) WithBaggageItem(key, value string) SpanContext { - var newBaggage map[string]string - if c.baggage == nil { - newBaggage = map[string]string{key: value} - } else { - newBaggage = make(map[string]string, len(c.baggage)+1) - for k, v := range c.baggage { - newBaggage[k] = v - } - newBaggage[key] = value - } - // Use positional parameters so the compiler will help catch new fields. - return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote} -} - -// isDebugIDContainerOnly returns true when the instance of the context is only -// used to return the debug/correlation ID from extract() method. This happens -// in the situation when "jaeger-debug-id" header is passed in the carrier to -// the extract() method, but the request otherwise has no span context in it. -// Previously this would've returned opentracing.ErrSpanContextNotFound from the -// extract method, but now it returns a dummy context with only debugID filled in. -// -// See JaegerDebugHeader in constants.go -// See TextMapPropagator#Extract -func (c *SpanContext) isDebugIDContainerOnly() bool { - return !c.traceID.IsValid() && c.debugID != "" -} - -// ------- TraceID ------- - -func (t TraceID) String() string { - if t.High == 0 { - return fmt.Sprintf("%x", t.Low) - } - return fmt.Sprintf("%x%016x", t.High, t.Low) -} - -// TraceIDFromString creates a TraceID from a hexadecimal string -func TraceIDFromString(s string) (TraceID, error) { - var hi, lo uint64 - var err error - if len(s) > 32 { - return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s) - } else if len(s) > 16 { - hiLen := len(s) - 16 - if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil { - return TraceID{}, err - } - if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil { - return TraceID{}, err - } - } else { - if lo, err = strconv.ParseUint(s, 16, 64); err != nil { - return TraceID{}, err - } - } - return TraceID{High: hi, Low: lo}, nil -} - -// IsValid checks if the trace ID is valid, i.e. not zero. -func (t TraceID) IsValid() bool { - return t.High != 0 || t.Low != 0 -} - -// ------- SpanID ------- - -func (s SpanID) String() string { - return fmt.Sprintf("%x", uint64(s)) -} - -// SpanIDFromString creates a SpanID from a hexadecimal string -func SpanIDFromString(s string) (SpanID, error) { - if len(s) > 16 { - return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s) - } - id, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return SpanID(0), err - } - return SpanID(id), nil -} diff --git a/pkg/net/trace/marshal.go b/pkg/net/trace/marshal.go deleted file mode 100644 index d92c484da..000000000 --- a/pkg/net/trace/marshal.go +++ /dev/null @@ -1,106 +0,0 @@ -package trace - -import ( - "encoding/binary" - errs "errors" - "fmt" - "math" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" - - protogen "github.com/go-kratos/kratos/pkg/net/trace/proto" -) - -const protoVersion1 int32 = 1 - -var ( - errSpanVersion = errs.New("trace: marshal not support version") -) - -func marshalSpan(sp *Span, version int32) ([]byte, error) { - if version == protoVersion1 { - return marshalSpanV1(sp) - } - return nil, errSpanVersion -} - -func marshalSpanV1(sp *Span) ([]byte, error) { - protoSpan := new(protogen.Span) - protoSpan.Version = protoVersion1 - protoSpan.ServiceName = sp.dapper.serviceName - protoSpan.OperationName = sp.operationName - protoSpan.TraceId = sp.context.TraceID - protoSpan.SpanId = sp.context.SpanID - protoSpan.ParentId = sp.context.ParentID - protoSpan.SamplingProbability = sp.context.Probability - protoSpan.StartTime = ×tamp.Timestamp{ - Seconds: sp.startTime.Unix(), - Nanos: int32(sp.startTime.Nanosecond()), - } - protoSpan.Duration = &duration.Duration{ - Seconds: int64(sp.duration / time.Second), - Nanos: int32(sp.duration % time.Second), - } - protoSpan.Tags = make([]*protogen.Tag, len(sp.tags)) - for i := range sp.tags { - protoSpan.Tags[i] = toProtoTag(sp.tags[i]) - } - protoSpan.Logs = sp.logs - return proto.Marshal(protoSpan) -} - -func toProtoTag(tag Tag) *protogen.Tag { - ptag := &protogen.Tag{Key: tag.Key} - switch value := tag.Value.(type) { - case string: - ptag.Kind = protogen.Tag_STRING - ptag.Value = []byte(value) - case int: - ptag.Kind = protogen.Tag_INT - ptag.Value = serializeInt64(int64(value)) - case int32: - ptag.Kind = protogen.Tag_INT - ptag.Value = serializeInt64(int64(value)) - case int64: - ptag.Kind = protogen.Tag_INT - ptag.Value = serializeInt64(value) - case bool: - ptag.Kind = protogen.Tag_BOOL - ptag.Value = serializeBool(value) - case float32: - ptag.Kind = protogen.Tag_BOOL - ptag.Value = serializeFloat64(float64(value)) - case float64: - ptag.Kind = protogen.Tag_BOOL - ptag.Value = serializeFloat64(value) - default: - ptag.Kind = protogen.Tag_STRING - ptag.Value = []byte((fmt.Sprintf("%v", tag.Value))) - } - return ptag -} - -func serializeInt64(v int64) []byte { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, uint64(v)) - return data -} - -func serializeFloat64(v float64) []byte { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, math.Float64bits(v)) - return data -} - -func serializeBool(v bool) []byte { - data := make([]byte, 1) - if v { - data[0] = byte(1) - } else { - data[0] = byte(0) - } - return data -} diff --git a/pkg/net/trace/marshal_test.go b/pkg/net/trace/marshal_test.go deleted file mode 100644 index 3c4d5831d..000000000 --- a/pkg/net/trace/marshal_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package trace - -import ( - "testing" -) - -func TestMarshalSpanV1(t *testing.T) { - report := &mockReport{} - t1 := NewTracer("service1", report, true) - sp1 := t1.New("opt_test").(*Span) - sp1.SetLog(Log("hello", "test123")) - sp1.SetTag(TagString("tag1", "hell"), TagBool("booltag", true), TagFloat64("float64tag", 3.14159)) - sp1.Finish(nil) - _, err := marshalSpanV1(sp1) - if err != nil { - t.Error(err) - } -} diff --git a/pkg/net/trace/mocktrace/mocktrace.go b/pkg/net/trace/mocktrace/mocktrace.go deleted file mode 100644 index 4fcf2c561..000000000 --- a/pkg/net/trace/mocktrace/mocktrace.go +++ /dev/null @@ -1,84 +0,0 @@ -package mocktrace - -import ( - "github.com/go-kratos/kratos/pkg/net/trace" -) - -// MockTrace . -type MockTrace struct { - Spans []*MockSpan -} - -// New . -func (m *MockTrace) New(operationName string, opts ...trace.Option) trace.Trace { - span := &MockSpan{OperationName: operationName, MockTrace: m} - m.Spans = append(m.Spans, span) - return span -} - -// Inject . -func (m *MockTrace) Inject(t trace.Trace, format interface{}, carrier interface{}) error { - return nil -} - -// Extract . -func (m *MockTrace) Extract(format interface{}, carrier interface{}) (trace.Trace, error) { - return &MockSpan{}, nil -} - -// MockSpan . -type MockSpan struct { - *MockTrace - OperationName string - FinishErr error - Finished bool - Tags []trace.Tag - Logs []trace.LogField -} - -// Fork . -func (m *MockSpan) Fork(serviceName string, operationName string) trace.Trace { - span := &MockSpan{OperationName: operationName, MockTrace: m.MockTrace} - m.Spans = append(m.Spans, span) - return span -} - -// Follow . -func (m *MockSpan) Follow(serviceName string, operationName string) trace.Trace { - span := &MockSpan{OperationName: operationName, MockTrace: m.MockTrace} - m.Spans = append(m.Spans, span) - return span -} - -// Finish . -func (m *MockSpan) Finish(perr *error) { - if perr != nil { - m.FinishErr = *perr - } - m.Finished = true -} - -// SetTag . -func (m *MockSpan) SetTag(tags ...trace.Tag) trace.Trace { - m.Tags = append(m.Tags, tags...) - return m -} - -// SetLog . -func (m *MockSpan) SetLog(logs ...trace.LogField) trace.Trace { - m.Logs = append(m.Logs, logs...) - return m -} - -// Visit . -func (m *MockSpan) Visit(fn func(k, v string)) {} - -// SetTitle . -func (m *MockSpan) SetTitle(title string) { - m.OperationName = title -} - -// TraceID . -func (m *MockSpan) TraceID() string { - return "" -} diff --git a/pkg/net/trace/mocktrace/mocktrace_test.go b/pkg/net/trace/mocktrace/mocktrace_test.go deleted file mode 100644 index a0021f1ef..000000000 --- a/pkg/net/trace/mocktrace/mocktrace_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Package mocktrace this ut just make ci happay. -package mocktrace - -import ( - "fmt" - "testing" -) - -func TestMockTrace(t *testing.T) { - mocktrace := &MockTrace{} - mocktrace.Inject(nil, nil, nil) - mocktrace.Extract(nil, nil) - - root := mocktrace.New("test") - root.Fork("", "") - root.Follow("", "") - root.Finish(nil) - err := fmt.Errorf("test") - root.Finish(&err) - root.SetTag() - root.SetLog() - root.Visit(func(k, v string) {}) - root.SetTitle("") -} diff --git a/pkg/net/trace/noop.go b/pkg/net/trace/noop.go deleted file mode 100644 index b60a32ba9..000000000 --- a/pkg/net/trace/noop.go +++ /dev/null @@ -1,47 +0,0 @@ -package trace - -var ( - _ Tracer = nooptracer{} -) - -type nooptracer struct{} - -func (n nooptracer) New(title string, opts ...Option) Trace { - return noopspan{} -} - -func (n nooptracer) Inject(t Trace, format interface{}, carrier interface{}) error { - return nil -} - -func (n nooptracer) Extract(format interface{}, carrier interface{}) (Trace, error) { - return noopspan{}, nil -} - -type noopspan struct{} - -func (n noopspan) TraceID() string { return "" } - -func (n noopspan) Fork(string, string) Trace { - return noopspan{} -} - -func (n noopspan) Follow(string, string) Trace { - return noopspan{} -} - -func (n noopspan) Finish(err *error) {} - -func (n noopspan) SetTag(tags ...Tag) Trace { - return noopspan{} -} - -func (n noopspan) SetLog(logs ...LogField) Trace { - return noopspan{} -} - -func (n noopspan) Visit(func(k, v string)) {} - -func (n noopspan) SetTitle(string) {} - -func (n noopspan) String() string { return "" } diff --git a/pkg/net/trace/option.go b/pkg/net/trace/option.go deleted file mode 100644 index f0865c2d7..000000000 --- a/pkg/net/trace/option.go +++ /dev/null @@ -1,17 +0,0 @@ -package trace - -var defaultOption = option{} - -type option struct { - Debug bool -} - -// Option dapper Option -type Option func(*option) - -// EnableDebug enable debug mode -func EnableDebug() Option { - return func(opt *option) { - opt.Debug = true - } -} diff --git a/pkg/net/trace/propagation.go b/pkg/net/trace/propagation.go deleted file mode 100644 index 0eb45bd23..000000000 --- a/pkg/net/trace/propagation.go +++ /dev/null @@ -1,177 +0,0 @@ -package trace - -import ( - errs "errors" - "net/http" - - "google.golang.org/grpc/metadata" -) - -var ( - // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or - // Tracer.Extract() is not recognized by the Tracer implementation. - ErrUnsupportedFormat = errs.New("trace: Unknown or unsupported Inject/Extract format") - - // ErrTraceNotFound occurs when the `carrier` passed to - // Tracer.Extract() is valid and uncorrupted but has insufficient - // information to extract a Trace. - ErrTraceNotFound = errs.New("trace: Trace not found in Extract carrier") - - // ErrInvalidTrace errors occur when Tracer.Inject() is asked to - // operate on a Trace which it is not prepared to handle (for - // example, since it was created by a different tracer implementation). - ErrInvalidTrace = errs.New("trace: Trace type incompatible with tracer") - - // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() - // implementations expect a different type of `carrier` than they are - // given. - ErrInvalidCarrier = errs.New("trace: Invalid Inject/Extract carrier") - - // ErrTraceCorrupted occurs when the `carrier` passed to - // Tracer.Extract() is of the expected type but is corrupted. - ErrTraceCorrupted = errs.New("trace: Trace data corrupted in Extract carrier") -) - -// BuiltinFormat is used to demarcate the values within package `trace` -// that are intended for use with the Tracer.Inject() and Tracer.Extract() -// methods. -type BuiltinFormat byte - -// support format list -const ( - // HTTPFormat represents Trace as HTTP header string pairs. - // - // the HTTPFormat format requires that the keys and values - // be valid as HTTP headers as-is (i.e., character casing may be unstable - // and special characters are disallowed in keys, values should be - // URL-escaped, etc). - // - // the carrier must be a `http.Header`. - HTTPFormat BuiltinFormat = iota - // GRPCFormat represents Trace as gRPC metadata. - // - // the carrier must be a `google.golang.org/grpc/metadata.MD`. - GRPCFormat -) - -// Carrier propagator must convert generic interface{} to something this -// implement Carrier interface, Trace can use Carrier to represents itself. -type Carrier interface { - Set(key, val string) - Get(key string) string -} - -// propagator is responsible for injecting and extracting `Trace` instances -// from a format-specific "carrier" -type propagator interface { - Inject(carrier interface{}) (Carrier, error) - Extract(carrier interface{}) (Carrier, error) -} - -type httpPropagator struct{} - -type httpCarrier http.Header - -func (h httpCarrier) Set(key, val string) { - http.Header(h).Set(key, val) -} - -func (h httpCarrier) Get(key string) string { - return http.Header(h).Get(key) -} - -func (httpPropagator) Inject(carrier interface{}) (Carrier, error) { - header, ok := carrier.(http.Header) - if !ok { - return nil, ErrInvalidCarrier - } - if header == nil { - return nil, ErrInvalidTrace - } - return httpCarrier(header), nil -} - -func (httpPropagator) Extract(carrier interface{}) (Carrier, error) { - header, ok := carrier.(http.Header) - if !ok { - return nil, ErrInvalidCarrier - } - if header == nil { - return nil, ErrTraceNotFound - } - return httpCarrier(header), nil -} - -const legacyGRPCKey = "trace" - -type grpcPropagator struct{} - -type grpcCarrier map[string][]string - -func (g grpcCarrier) Get(key string) string { - if v, ok := g[key]; ok && len(v) > 0 { - return v[0] - } - // ts := g[legacyGRPCKey] - // if len(ts) != 8 { - // return "" - // } - // switch key { - // case KeyTraceID: - // return ts[0] - // case KeyTraceSpanID: - // return ts[1] - // case KeyTraceParentID: - // return ts[2] - // case KeyTraceLevel: - // return ts[3] - // case KeyTraceSampled: - // return ts[4] - // case KeyTraceCaller: - // return ts[5] - // } - return "" -} - -func (g grpcCarrier) Set(key, val string) { - // ts := make([]string, 8) - // g[legacyGRPCKey] = ts - // switch key { - // case KeyTraceID: - // ts[0] = val - // case KeyTraceSpanID: - // ts[1] = val - // case KeyTraceParentID: - // ts[2] = val - // case KeyTraceLevel: - // ts[3] = val - // case KeyTraceSampled: - // ts[4] = val - // case KeyTraceCaller: - // ts[5] = val - // default: - g[key] = append(g[key], val) - // } -} - -func (grpcPropagator) Inject(carrier interface{}) (Carrier, error) { - md, ok := carrier.(metadata.MD) - if !ok { - return nil, ErrInvalidCarrier - } - if md == nil { - return nil, ErrInvalidTrace - } - return grpcCarrier(md), nil -} - -func (grpcPropagator) Extract(carrier interface{}) (Carrier, error) { - md, ok := carrier.(metadata.MD) - if !ok { - return nil, ErrInvalidCarrier - } - if md == nil { - return nil, ErrTraceNotFound - } - return grpcCarrier(md), nil -} diff --git a/pkg/net/trace/proto/span.pb.go b/pkg/net/trace/proto/span.pb.go deleted file mode 100644 index 55f612fc9..000000000 --- a/pkg/net/trace/proto/span.pb.go +++ /dev/null @@ -1,557 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: proto/span.proto - -package protogen - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import duration "github.com/golang/protobuf/ptypes/duration" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Tag_Kind int32 - -const ( - Tag_STRING Tag_Kind = 0 - Tag_INT Tag_Kind = 1 - Tag_BOOL Tag_Kind = 2 - Tag_FLOAT Tag_Kind = 3 -) - -var Tag_Kind_name = map[int32]string{ - 0: "STRING", - 1: "INT", - 2: "BOOL", - 3: "FLOAT", -} -var Tag_Kind_value = map[string]int32{ - "STRING": 0, - "INT": 1, - "BOOL": 2, - "FLOAT": 3, -} - -func (x Tag_Kind) String() string { - return proto.EnumName(Tag_Kind_name, int32(x)) -} -func (Tag_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_span_68a8dae26ef502a2, []int{0, 0} -} - -type Log_Kind int32 - -const ( - Log_STRING Log_Kind = 0 - Log_INT Log_Kind = 1 - Log_BOOL Log_Kind = 2 - Log_FLOAT Log_Kind = 3 -) - -var Log_Kind_name = map[int32]string{ - 0: "STRING", - 1: "INT", - 2: "BOOL", - 3: "FLOAT", -} -var Log_Kind_value = map[string]int32{ - "STRING": 0, - "INT": 1, - "BOOL": 2, - "FLOAT": 3, -} - -func (x Log_Kind) String() string { - return proto.EnumName(Log_Kind_name, int32(x)) -} -func (Log_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_span_68a8dae26ef502a2, []int{2, 0} -} - -type SpanRef_RefType int32 - -const ( - SpanRef_CHILD_OF SpanRef_RefType = 0 - SpanRef_FOLLOWS_FROM SpanRef_RefType = 1 -) - -var SpanRef_RefType_name = map[int32]string{ - 0: "CHILD_OF", - 1: "FOLLOWS_FROM", -} -var SpanRef_RefType_value = map[string]int32{ - "CHILD_OF": 0, - "FOLLOWS_FROM": 1, -} - -func (x SpanRef_RefType) String() string { - return proto.EnumName(SpanRef_RefType_name, int32(x)) -} -func (SpanRef_RefType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_span_68a8dae26ef502a2, []int{3, 0} -} - -type Tag struct { - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Kind Tag_Kind `protobuf:"varint,2,opt,name=kind,enum=dapper.trace.Tag_Kind" json:"kind,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} -func (*Tag) Descriptor() ([]byte, []int) { - return fileDescriptor_span_68a8dae26ef502a2, []int{0} -} -func (m *Tag) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Tag.Unmarshal(m, b) -} -func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Tag.Marshal(b, m, deterministic) -} -func (dst *Tag) XXX_Merge(src proto.Message) { - xxx_messageInfo_Tag.Merge(dst, src) -} -func (m *Tag) XXX_Size() int { - return xxx_messageInfo_Tag.Size(m) -} -func (m *Tag) XXX_DiscardUnknown() { - xxx_messageInfo_Tag.DiscardUnknown(m) -} - -var xxx_messageInfo_Tag proto.InternalMessageInfo - -func (m *Tag) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *Tag) GetKind() Tag_Kind { - if m != nil { - return m.Kind - } - return Tag_STRING -} - -func (m *Tag) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type Field struct { - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Field) Reset() { *m = Field{} } -func (m *Field) String() string { return proto.CompactTextString(m) } -func (*Field) ProtoMessage() {} -func (*Field) Descriptor() ([]byte, []int) { - return fileDescriptor_span_68a8dae26ef502a2, []int{1} -} -func (m *Field) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Field.Unmarshal(m, b) -} -func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Field.Marshal(b, m, deterministic) -} -func (dst *Field) XXX_Merge(src proto.Message) { - xxx_messageInfo_Field.Merge(dst, src) -} -func (m *Field) XXX_Size() int { - return xxx_messageInfo_Field.Size(m) -} -func (m *Field) XXX_DiscardUnknown() { - xxx_messageInfo_Field.DiscardUnknown(m) -} - -var xxx_messageInfo_Field proto.InternalMessageInfo - -func (m *Field) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *Field) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type Log struct { - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Kind Log_Kind `protobuf:"varint,2,opt,name=kind,enum=dapper.trace.Log_Kind" json:"kind,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - Timestamp int64 `protobuf:"varint,4,opt,name=timestamp" json:"timestamp,omitempty"` - Fields []*Field `protobuf:"bytes,5,rep,name=fields" json:"fields,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Log) Reset() { *m = Log{} } -func (m *Log) String() string { return proto.CompactTextString(m) } -func (*Log) ProtoMessage() {} -func (*Log) Descriptor() ([]byte, []int) { - return fileDescriptor_span_68a8dae26ef502a2, []int{2} -} -func (m *Log) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Log.Unmarshal(m, b) -} -func (m *Log) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Log.Marshal(b, m, deterministic) -} -func (dst *Log) XXX_Merge(src proto.Message) { - xxx_messageInfo_Log.Merge(dst, src) -} -func (m *Log) XXX_Size() int { - return xxx_messageInfo_Log.Size(m) -} -func (m *Log) XXX_DiscardUnknown() { - xxx_messageInfo_Log.DiscardUnknown(m) -} - -var xxx_messageInfo_Log proto.InternalMessageInfo - -func (m *Log) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *Log) GetKind() Log_Kind { - if m != nil { - return m.Kind - } - return Log_STRING -} - -func (m *Log) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *Log) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -func (m *Log) GetFields() []*Field { - if m != nil { - return m.Fields - } - return nil -} - -// SpanRef describes causal relationship of the current span to another span (e.g. 'child-of') -type SpanRef struct { - RefType SpanRef_RefType `protobuf:"varint,1,opt,name=ref_type,json=refType,enum=dapper.trace.SpanRef_RefType" json:"ref_type,omitempty"` - TraceId uint64 `protobuf:"varint,2,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` - SpanId uint64 `protobuf:"varint,3,opt,name=span_id,json=spanId" json:"span_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SpanRef) Reset() { *m = SpanRef{} } -func (m *SpanRef) String() string { return proto.CompactTextString(m) } -func (*SpanRef) ProtoMessage() {} -func (*SpanRef) Descriptor() ([]byte, []int) { - return fileDescriptor_span_68a8dae26ef502a2, []int{3} -} -func (m *SpanRef) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SpanRef.Unmarshal(m, b) -} -func (m *SpanRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SpanRef.Marshal(b, m, deterministic) -} -func (dst *SpanRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_SpanRef.Merge(dst, src) -} -func (m *SpanRef) XXX_Size() int { - return xxx_messageInfo_SpanRef.Size(m) -} -func (m *SpanRef) XXX_DiscardUnknown() { - xxx_messageInfo_SpanRef.DiscardUnknown(m) -} - -var xxx_messageInfo_SpanRef proto.InternalMessageInfo - -func (m *SpanRef) GetRefType() SpanRef_RefType { - if m != nil { - return m.RefType - } - return SpanRef_CHILD_OF -} - -func (m *SpanRef) GetTraceId() uint64 { - if m != nil { - return m.TraceId - } - return 0 -} - -func (m *SpanRef) GetSpanId() uint64 { - if m != nil { - return m.SpanId - } - return 0 -} - -// Span represents a named unit of work performed by a service. -type Span struct { - Version int32 `protobuf:"varint,99,opt,name=version" json:"version,omitempty"` - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName" json:"service_name,omitempty"` - OperationName string `protobuf:"bytes,2,opt,name=operation_name,json=operationName" json:"operation_name,omitempty"` - // Deprecated: caller no long required - Caller string `protobuf:"bytes,3,opt,name=caller" json:"caller,omitempty"` - TraceId uint64 `protobuf:"varint,4,opt,name=trace_id,json=traceId" json:"trace_id,omitempty"` - SpanId uint64 `protobuf:"varint,5,opt,name=span_id,json=spanId" json:"span_id,omitempty"` - ParentId uint64 `protobuf:"varint,6,opt,name=parent_id,json=parentId" json:"parent_id,omitempty"` - // Deprecated: level no long required - Level int32 `protobuf:"varint,7,opt,name=level" json:"level,omitempty"` - // Deprecated: use start_time instead instead of start_at - StartAt int64 `protobuf:"varint,8,opt,name=start_at,json=startAt" json:"start_at,omitempty"` - // Deprecated: use duration instead instead of finish_at - FinishAt int64 `protobuf:"varint,9,opt,name=finish_at,json=finishAt" json:"finish_at,omitempty"` - SamplingProbability float32 `protobuf:"fixed32,10,opt,name=sampling_probability,json=samplingProbability" json:"sampling_probability,omitempty"` - Env string `protobuf:"bytes,19,opt,name=env" json:"env,omitempty"` - StartTime *timestamp.Timestamp `protobuf:"bytes,20,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - Duration *duration.Duration `protobuf:"bytes,21,opt,name=duration" json:"duration,omitempty"` - References []*SpanRef `protobuf:"bytes,22,rep,name=references" json:"references,omitempty"` - Tags []*Tag `protobuf:"bytes,11,rep,name=tags" json:"tags,omitempty"` - Logs []*Log `protobuf:"bytes,12,rep,name=logs" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { - return fileDescriptor_span_68a8dae26ef502a2, []int{4} -} -func (m *Span) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span.Unmarshal(m, b) -} -func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span.Marshal(b, m, deterministic) -} -func (dst *Span) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span.Merge(dst, src) -} -func (m *Span) XXX_Size() int { - return xxx_messageInfo_Span.Size(m) -} -func (m *Span) XXX_DiscardUnknown() { - xxx_messageInfo_Span.DiscardUnknown(m) -} - -var xxx_messageInfo_Span proto.InternalMessageInfo - -func (m *Span) GetVersion() int32 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *Span) GetServiceName() string { - if m != nil { - return m.ServiceName - } - return "" -} - -func (m *Span) GetOperationName() string { - if m != nil { - return m.OperationName - } - return "" -} - -func (m *Span) GetCaller() string { - if m != nil { - return m.Caller - } - return "" -} - -func (m *Span) GetTraceId() uint64 { - if m != nil { - return m.TraceId - } - return 0 -} - -func (m *Span) GetSpanId() uint64 { - if m != nil { - return m.SpanId - } - return 0 -} - -func (m *Span) GetParentId() uint64 { - if m != nil { - return m.ParentId - } - return 0 -} - -func (m *Span) GetLevel() int32 { - if m != nil { - return m.Level - } - return 0 -} - -func (m *Span) GetStartAt() int64 { - if m != nil { - return m.StartAt - } - return 0 -} - -func (m *Span) GetFinishAt() int64 { - if m != nil { - return m.FinishAt - } - return 0 -} - -func (m *Span) GetSamplingProbability() float32 { - if m != nil { - return m.SamplingProbability - } - return 0 -} - -func (m *Span) GetEnv() string { - if m != nil { - return m.Env - } - return "" -} - -func (m *Span) GetStartTime() *timestamp.Timestamp { - if m != nil { - return m.StartTime - } - return nil -} - -func (m *Span) GetDuration() *duration.Duration { - if m != nil { - return m.Duration - } - return nil -} - -func (m *Span) GetReferences() []*SpanRef { - if m != nil { - return m.References - } - return nil -} - -func (m *Span) GetTags() []*Tag { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Span) GetLogs() []*Log { - if m != nil { - return m.Logs - } - return nil -} - -func init() { - proto.RegisterType((*Tag)(nil), "dapper.trace.Tag") - proto.RegisterType((*Field)(nil), "dapper.trace.Field") - proto.RegisterType((*Log)(nil), "dapper.trace.Log") - proto.RegisterType((*SpanRef)(nil), "dapper.trace.SpanRef") - proto.RegisterType((*Span)(nil), "dapper.trace.Span") - proto.RegisterEnum("dapper.trace.Tag_Kind", Tag_Kind_name, Tag_Kind_value) - proto.RegisterEnum("dapper.trace.Log_Kind", Log_Kind_name, Log_Kind_value) - proto.RegisterEnum("dapper.trace.SpanRef_RefType", SpanRef_RefType_name, SpanRef_RefType_value) -} - -func init() { proto.RegisterFile("proto/span.proto", fileDescriptor_span_68a8dae26ef502a2) } - -var fileDescriptor_span_68a8dae26ef502a2 = []byte{ - // 669 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdd, 0x6e, 0xd3, 0x4a, - 0x10, 0xc7, 0xeb, 0xd8, 0x89, 0x9d, 0x49, 0x4e, 0xe5, 0xb3, 0xfd, 0x38, 0xdb, 0x1e, 0x3e, 0x4c, - 0xa4, 0x4a, 0x06, 0x24, 0x07, 0x82, 0x2a, 0xc1, 0x65, 0x4b, 0x15, 0x88, 0x30, 0x0d, 0xda, 0x46, - 0x42, 0xe2, 0x26, 0xda, 0x24, 0x63, 0x63, 0xd5, 0xb1, 0x2d, 0x7b, 0x1b, 0x29, 0xcf, 0xc0, 0x5b, - 0xf0, 0x50, 0xdc, 0xf1, 0x2e, 0x68, 0xd7, 0x4e, 0x9a, 0xd2, 0x22, 0x04, 0x77, 0x3b, 0xf3, 0xff, - 0xed, 0xce, 0xcc, 0xfa, 0xbf, 0x06, 0x3b, 0xcb, 0x53, 0x91, 0x76, 0x8b, 0x8c, 0x27, 0x9e, 0x5a, - 0x92, 0xf6, 0x8c, 0x67, 0x19, 0xe6, 0x9e, 0xc8, 0xf9, 0x14, 0x0f, 0x1f, 0x86, 0x69, 0x1a, 0xc6, - 0xd8, 0x55, 0xda, 0xe4, 0x2a, 0xe8, 0x8a, 0x68, 0x8e, 0x85, 0xe0, 0xf3, 0xac, 0xc4, 0x0f, 0x1f, - 0xfc, 0x0c, 0xcc, 0xae, 0x72, 0x2e, 0xa2, 0xb4, 0x3a, 0xae, 0xf3, 0x45, 0x03, 0x7d, 0xc4, 0x43, - 0x62, 0x83, 0x7e, 0x89, 0x4b, 0xaa, 0x39, 0x9a, 0xdb, 0x64, 0x72, 0x49, 0x9e, 0x80, 0x71, 0x19, - 0x25, 0x33, 0x5a, 0x73, 0x34, 0x77, 0xbb, 0xb7, 0xef, 0x6d, 0xd6, 0xf5, 0x46, 0x3c, 0xf4, 0xde, - 0x45, 0xc9, 0x8c, 0x29, 0x86, 0xec, 0x42, 0x7d, 0xc1, 0xe3, 0x2b, 0xa4, 0xba, 0xa3, 0xb9, 0x6d, - 0x56, 0x06, 0x9d, 0x67, 0x60, 0x48, 0x86, 0x00, 0x34, 0x2e, 0x46, 0x6c, 0x70, 0xfe, 0xc6, 0xde, - 0x22, 0x26, 0xe8, 0x83, 0xf3, 0x91, 0xad, 0x11, 0x0b, 0x8c, 0xd3, 0xe1, 0xd0, 0xb7, 0x6b, 0xa4, - 0x09, 0xf5, 0xbe, 0x3f, 0x3c, 0x19, 0xd9, 0x7a, 0xa7, 0x0b, 0xf5, 0x7e, 0x84, 0xf1, 0xec, 0x8e, - 0x76, 0xd6, 0x25, 0x6a, 0x9b, 0x25, 0xbe, 0x69, 0xa0, 0xfb, 0xe9, 0x1f, 0xb7, 0xef, 0xa7, 0xbf, - 0x6f, 0x9f, 0xdc, 0x83, 0xe6, 0xfa, 0x36, 0xa9, 0xe1, 0x68, 0xae, 0xce, 0xae, 0x13, 0xe4, 0x29, - 0x34, 0x02, 0xd9, 0x6a, 0x41, 0xeb, 0x8e, 0xee, 0xb6, 0x7a, 0x3b, 0x37, 0x2b, 0xa8, 0x31, 0x58, - 0x85, 0xfc, 0xc5, 0x4d, 0x7c, 0xd5, 0xc0, 0xbc, 0xc8, 0x78, 0xc2, 0x30, 0x20, 0x2f, 0xc1, 0xca, - 0x31, 0x18, 0x8b, 0x65, 0x86, 0x6a, 0xc2, 0xed, 0xde, 0xfd, 0x9b, 0xc5, 0x2a, 0xd0, 0x63, 0x18, - 0x8c, 0x96, 0x19, 0x32, 0x33, 0x2f, 0x17, 0xe4, 0x00, 0x2c, 0x45, 0x8c, 0xa3, 0xf2, 0x22, 0x0c, - 0x66, 0xaa, 0x78, 0x30, 0x23, 0xff, 0x81, 0x29, 0x5d, 0x25, 0x15, 0x5d, 0x29, 0x0d, 0x19, 0x0e, - 0x66, 0x9d, 0xc7, 0x60, 0x56, 0xe7, 0x90, 0x36, 0x58, 0xaf, 0xdf, 0x0e, 0xfc, 0xb3, 0xf1, 0xb0, - 0x6f, 0x6f, 0x11, 0x1b, 0xda, 0xfd, 0xa1, 0xef, 0x0f, 0x3f, 0x5e, 0x8c, 0xfb, 0x6c, 0xf8, 0xde, - 0xd6, 0x3a, 0xdf, 0x0d, 0x30, 0x64, 0x6d, 0x42, 0xc1, 0x5c, 0x60, 0x5e, 0x44, 0x69, 0x42, 0xa7, - 0x8e, 0xe6, 0xd6, 0xd9, 0x2a, 0x24, 0x8f, 0xa0, 0x5d, 0x60, 0xbe, 0x88, 0xa6, 0x38, 0x4e, 0xf8, - 0x1c, 0xab, 0x2f, 0xd4, 0xaa, 0x72, 0xe7, 0x7c, 0x8e, 0xe4, 0x08, 0xb6, 0xd3, 0x0c, 0x4b, 0x57, - 0x96, 0x50, 0x4d, 0x41, 0xff, 0xac, 0xb3, 0x0a, 0xdb, 0x87, 0xc6, 0x94, 0xc7, 0x31, 0xe6, 0xaa, - 0xdf, 0x26, 0xab, 0xa2, 0x1b, 0x33, 0x1a, 0xbf, 0x9c, 0xb1, 0xbe, 0x39, 0x23, 0xf9, 0x1f, 0x9a, - 0x19, 0xcf, 0x31, 0x11, 0x52, 0x6a, 0x28, 0xc9, 0x2a, 0x13, 0x03, 0xe5, 0x86, 0x18, 0x17, 0x18, - 0x53, 0x53, 0x8d, 0x52, 0x06, 0xb2, 0x4c, 0x21, 0x78, 0x2e, 0xc6, 0x5c, 0x50, 0x4b, 0x99, 0xc1, - 0x54, 0xf1, 0x89, 0x90, 0xa7, 0x05, 0x51, 0x12, 0x15, 0x9f, 0xa5, 0xd6, 0x54, 0x9a, 0x55, 0x26, - 0x4e, 0x04, 0x79, 0x0e, 0xbb, 0x05, 0x9f, 0x67, 0x71, 0x94, 0x84, 0xe3, 0x2c, 0x4f, 0x27, 0x7c, - 0x12, 0xc5, 0x91, 0x58, 0x52, 0x70, 0x34, 0xb7, 0xc6, 0x76, 0x56, 0xda, 0x87, 0x6b, 0x49, 0x9a, - 0x19, 0x93, 0x05, 0xdd, 0x29, 0xcd, 0x8c, 0xc9, 0x82, 0xbc, 0x02, 0x28, 0x8b, 0x4b, 0xff, 0xd1, - 0x5d, 0x47, 0x73, 0x5b, 0xbd, 0x43, 0xaf, 0x7c, 0xda, 0xde, 0xea, 0x69, 0x7b, 0xa3, 0x95, 0x39, - 0x59, 0x53, 0xd1, 0x32, 0x26, 0xc7, 0x60, 0xad, 0x9e, 0x3c, 0xdd, 0x53, 0x1b, 0x0f, 0x6e, 0x6d, - 0x3c, 0xab, 0x00, 0xb6, 0x46, 0xc9, 0x31, 0x40, 0x8e, 0x01, 0xe6, 0x98, 0x4c, 0xb1, 0xa0, 0xfb, - 0xca, 0xe2, 0x7b, 0x77, 0xba, 0x8e, 0x6d, 0x80, 0xe4, 0x08, 0x0c, 0xc1, 0xc3, 0x82, 0xb6, 0xd4, - 0x86, 0x7f, 0x6f, 0xfd, 0x34, 0x98, 0x92, 0x25, 0x16, 0xa7, 0x61, 0x41, 0xdb, 0x77, 0x61, 0x7e, - 0x1a, 0x32, 0x25, 0x9f, 0xc2, 0x27, 0x4b, 0xf5, 0x18, 0x62, 0x32, 0x69, 0xa8, 0xd5, 0x8b, 0x1f, - 0x01, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x7b, 0x57, 0x93, 0x12, 0x05, 0x00, 0x00, -} diff --git a/pkg/net/trace/proto/span.proto b/pkg/net/trace/proto/span.proto deleted file mode 100644 index 6e9e2ed76..000000000 --- a/pkg/net/trace/proto/span.proto +++ /dev/null @@ -1,77 +0,0 @@ -syntax = "proto3"; -package dapper.trace; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "protogen"; - -message Tag { - enum Kind { - STRING = 0; - INT = 1; - BOOL = 2; - FLOAT = 3; - } - string key = 1; - Kind kind = 2; - bytes value = 3; -} - -message Field { - string key = 1; - bytes value = 2; -} - -message Log { - // Deprecated: Kind no long use - enum Kind { - STRING = 0; - INT = 1; - BOOL = 2; - FLOAT = 3; - } - string key = 1; - // Deprecated: Kind no long use - Kind kind = 2; - // Deprecated: Value no long use - bytes value = 3; - int64 timestamp = 4; - repeated Field fields = 5; -} - -// SpanRef describes causal relationship of the current span to another span (e.g. 'child-of') -message SpanRef { - enum RefType { - CHILD_OF = 0; - FOLLOWS_FROM = 1; - } - RefType ref_type = 1; - uint64 trace_id = 2; - uint64 span_id = 3; -} - -// Span represents a named unit of work performed by a service. -message Span { - int32 version = 99; - string service_name = 1; - string operation_name = 2; - // Deprecated: caller no long required - string caller = 3; - uint64 trace_id = 4; - uint64 span_id = 5; - uint64 parent_id = 6; - // Deprecated: level no long required - int32 level = 7; - // Deprecated: use start_time instead instead of start_at - int64 start_at = 8; - // Deprecated: use duration instead instead of finish_at - int64 finish_at = 9; - float sampling_probability = 10; - string env = 19; - google.protobuf.Timestamp start_time = 20; - google.protobuf.Duration duration = 21; - repeated SpanRef references = 22; - repeated Tag tags = 11; - repeated Log logs = 12; -} diff --git a/pkg/net/trace/report.go b/pkg/net/trace/report.go deleted file mode 100644 index 6bce7ea28..000000000 --- a/pkg/net/trace/report.go +++ /dev/null @@ -1,138 +0,0 @@ -package trace - -import ( - "fmt" - "net" - "os" - "sync" - "time" -) - -const ( - // MaxPackageSize . - _maxPackageSize = 1024 * 32 - // safe udp package size // MaxPackageSize = 508 _dataChSize = 4096) - // max memory usage 1024 * 32 * 4096 -> 128MB - _dataChSize = 4096 - _defaultWriteChannalTimeout = 50 * time.Millisecond - _defaultWriteTimeout = 200 * time.Millisecond -) - -// reporter trace reporter. -type reporter interface { - WriteSpan(sp *Span) error - Close() error -} - -// newReport with network address -func newReport(network, address string, timeout time.Duration, protocolVersion int32) reporter { - if timeout == 0 { - timeout = _defaultWriteTimeout - } - report := &connReport{ - network: network, - address: address, - dataCh: make(chan []byte, _dataChSize), - done: make(chan struct{}), - timeout: timeout, - version: protocolVersion, - } - go report.daemon() - return report -} - -type connReport struct { - version int32 - rmx sync.RWMutex - closed bool - - network, address string - - dataCh chan []byte - - conn net.Conn - - done chan struct{} - - timeout time.Duration -} - -func (c *connReport) daemon() { - for b := range c.dataCh { - c.send(b) - } - c.done <- struct{}{} -} - -func (c *connReport) WriteSpan(sp *Span) error { - data, err := marshalSpan(sp, c.version) - if err != nil { - return err - } - return c.writePackage(data) -} - -func (c *connReport) writePackage(data []byte) error { - c.rmx.RLock() - defer c.rmx.RUnlock() - if c.closed { - return fmt.Errorf("report already closed") - } - if len(data) > _maxPackageSize { - return fmt.Errorf("package too large length %d > %d", len(data), _maxPackageSize) - } - select { - case c.dataCh <- data: - return nil - case <-time.After(_defaultWriteChannalTimeout): - return fmt.Errorf("write to data channel timeout") - } -} - -func (c *connReport) Close() error { - c.rmx.Lock() - c.closed = true - c.rmx.Unlock() - - t := time.NewTimer(time.Second) - close(c.dataCh) - select { - case <-t.C: - c.closeConn() - return fmt.Errorf("close report timeout force close") - case <-c.done: - return c.closeConn() - } -} - -func (c *connReport) send(data []byte) { - if c.conn == nil { - if err := c.reconnect(); err != nil { - c.Errorf("connect error: %s retry after second", err) - time.Sleep(time.Second) - return - } - } - c.conn.SetWriteDeadline(time.Now().Add(100 * time.Microsecond)) - if _, err := c.conn.Write(data); err != nil { - c.Errorf("write to conn error: %s, close connect", err) - c.conn.Close() - c.conn = nil - } -} - -func (c *connReport) reconnect() (err error) { - c.conn, err = net.DialTimeout(c.network, c.address, c.timeout) - return -} - -func (c *connReport) closeConn() error { - if c.conn != nil { - return c.conn.Close() - } - return nil -} - -func (c *connReport) Errorf(format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, format+"\n", args...) -} diff --git a/pkg/net/trace/report_test.go b/pkg/net/trace/report_test.go deleted file mode 100644 index 4d0652921..000000000 --- a/pkg/net/trace/report_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package trace - -import ( - "bytes" - "io" - "log" - "net" - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -func newServer(w io.Writer, network, address string) (func() error, error) { - lis, err := net.Listen(network, address) - if err != nil { - return nil, err - } - done := make(chan struct{}) - go func() { - conn, err := lis.Accept() - if err != nil { - lis.Close() - log.Fatal(err) - } - io.Copy(w, conn) - conn.Close() - done <- struct{}{} - }() - return func() error { - <-done - return lis.Close() - }, nil -} - -func TestReportTCP(t *testing.T) { - buf := &bytes.Buffer{} - cancel, err := newServer(buf, "tcp", "127.0.0.1:6077") - if err != nil { - t.Fatal(err) - } - report := newReport("tcp", "127.0.0.1:6077", 0, 0).(*connReport) - data := []byte("hello, world") - report.writePackage(data) - if err := report.Close(); err != nil { - t.Error(err) - } - cancel() - assert.Equal(t, data, buf.Bytes(), "receive data") -} - -func newUnixgramServer(w io.Writer, address string) (func() error, error) { - conn, err := net.ListenPacket("unixgram", address) - if err != nil { - return nil, err - } - done := make(chan struct{}) - go func() { - p := make([]byte, 4096) - n, _, err := conn.ReadFrom(p) - if err != nil { - log.Fatal(err) - } - w.Write(p[:n]) - done <- struct{}{} - }() - return func() error { - <-done - return conn.Close() - }, nil -} - -func TestReportUnixgram(t *testing.T) { - os.Remove("/tmp/trace.sock") - buf := &bytes.Buffer{} - cancel, err := newUnixgramServer(buf, "/tmp/trace.sock") - if err != nil { - t.Fatal(err) - } - report := newReport("unixgram", "/tmp/trace.sock", 0, 0).(*connReport) - data := []byte("hello, world") - report.writePackage(data) - if err := report.Close(); err != nil { - t.Error(err) - } - cancel() - assert.Equal(t, data, buf.Bytes(), "receive data") -} diff --git a/pkg/net/trace/sample.go b/pkg/net/trace/sample.go deleted file mode 100644 index 6a7aa92fc..000000000 --- a/pkg/net/trace/sample.go +++ /dev/null @@ -1,67 +0,0 @@ -package trace - -import ( - "math/rand" - "sync/atomic" - "time" -) - -const ( - slotLength = 2048 -) - -var ignoreds = []string{"/metrics", "/ping"} // NOTE: add YOUR URL PATH that want ignore - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -func oneAtTimeHash(s string) (hash uint32) { - b := []byte(s) - for i := range b { - hash += uint32(b[i]) - hash += hash << 10 - hash ^= hash >> 6 - } - hash += hash << 3 - hash ^= hash >> 11 - hash += hash << 15 - return -} - -// sampler decides whether a new trace should be sampled or not. -type sampler interface { - IsSampled(traceID uint64, operationName string) (bool, float32) - Close() error -} - -type probabilitySampling struct { - probability float32 - slot [slotLength]int64 -} - -func (p *probabilitySampling) IsSampled(traceID uint64, operationName string) (bool, float32) { - for _, ignored := range ignoreds { - if operationName == ignored { - return false, 0 - } - } - now := time.Now().Unix() - idx := oneAtTimeHash(operationName) % slotLength - old := atomic.LoadInt64(&p.slot[idx]) - if old != now { - atomic.SwapInt64(&p.slot[idx], now) - return true, 1 - } - return rand.Float32() < float32(p.probability), float32(p.probability) -} - -func (p *probabilitySampling) Close() error { return nil } - -// newSampler new probability sampler -func newSampler(probability float32) sampler { - if probability <= 0 || probability > 1 { - panic("probability P ∈ (0, 1]") - } - return &probabilitySampling{probability: probability} -} diff --git a/pkg/net/trace/sample_test.go b/pkg/net/trace/sample_test.go deleted file mode 100644 index 329ec5f16..000000000 --- a/pkg/net/trace/sample_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package trace - -import ( - "testing" -) - -func TestProbabilitySampling(t *testing.T) { - sampler := newSampler(0.001) - t.Run("test one operationName", func(t *testing.T) { - sampled, probability := sampler.IsSampled(0, "test123") - if !sampled || probability != 1 { - t.Errorf("expect sampled and probability == 1 get: %v %f", sampled, probability) - } - }) - t.Run("test probability", func(t *testing.T) { - sampler.IsSampled(0, "test_opt_2") - count := 0 - for i := 0; i < 100000; i++ { - sampled, _ := sampler.IsSampled(0, "test_opt_2") - if sampled { - count++ - } - } - if count < 60 || count > 150 { - t.Errorf("expect count between 60~150 get %d", count) - } - }) -} - -func BenchmarkProbabilitySampling(b *testing.B) { - sampler := newSampler(0.001) - for i := 0; i < b.N; i++ { - sampler.IsSampled(0, "test_opt_xxx") - } -} diff --git a/pkg/net/trace/span.go b/pkg/net/trace/span.go deleted file mode 100644 index fd6699abd..000000000 --- a/pkg/net/trace/span.go +++ /dev/null @@ -1,141 +0,0 @@ -package trace - -import ( - "fmt" - "time" - - protogen "github.com/go-kratos/kratos/pkg/net/trace/proto" -) - -const ( - _maxChilds = 1024 - _maxTags = 128 - _maxLogs = 256 -) - -var _ Trace = &Span{} - -// Span is a trace span. -type Span struct { - dapper *dapper - context spanContext - operationName string - startTime time.Time - duration time.Duration - tags []Tag - logs []*protogen.Log - childs int -} - -func (s *Span) ServiceName() string { - return s.dapper.serviceName -} - -func (s *Span) OperationName() string { - return s.operationName -} - -func (s *Span) StartTime() time.Time { - return s.startTime -} - -func (s *Span) Duration() time.Duration { - return s.duration -} - -func (s *Span) TraceID() string { - return s.context.String() -} - -func (s *Span) Context() spanContext { - return s.context -} - -func (s *Span) Tags() []Tag { - return s.tags -} - -func (s *Span) Logs() []*protogen.Log { - return s.logs -} - -func (s *Span) Fork(serviceName, operationName string) Trace { - if s.childs > _maxChilds { - // if child span more than max childs set return noopspan - return noopspan{} - } - s.childs++ - // 为了兼容临时为 New 的 Span 设置 span.kind - return s.dapper.newSpanWithContext(operationName, s.context).SetTag(TagString(TagSpanKind, "client")) -} - -func (s *Span) Follow(serviceName, operationName string) Trace { - return s.Fork(serviceName, operationName).SetTag(TagString(TagSpanKind, "producer")) -} - -func (s *Span) Finish(perr *error) { - s.duration = time.Since(s.startTime) - if perr != nil && *perr != nil { - err := *perr - s.SetTag(TagBool(TagError, true)) - s.SetLog(Log(LogMessage, err.Error())) - if err, ok := err.(stackTracer); ok { - s.SetLog(Log(LogStack, fmt.Sprintf("%+v", err.StackTrace()))) - } - } - s.dapper.report(s) -} - -func (s *Span) SetTag(tags ...Tag) Trace { - if !s.context.isSampled() && !s.context.isDebug() { - return s - } - if len(s.tags) < _maxTags { - s.tags = append(s.tags, tags...) - } - if len(s.tags) == _maxTags { - s.tags = append(s.tags, Tag{Key: "trace.error", Value: "too many tags"}) - } - return s -} - -// LogFields is an efficient and type-checked way to record key:value -// NOTE current unsupport -func (s *Span) SetLog(logs ...LogField) Trace { - if !s.context.isSampled() && !s.context.isDebug() { - return s - } - if len(s.logs) < _maxLogs { - s.setLog(logs...) - } - if len(s.logs) == _maxLogs { - s.setLog(LogField{Key: "trace.error", Value: "too many logs"}) - } - return s -} - -func (s *Span) setLog(logs ...LogField) Trace { - protoLog := &protogen.Log{ - Timestamp: time.Now().UnixNano(), - Fields: make([]*protogen.Field, len(logs)), - } - for i := range logs { - protoLog.Fields[i] = &protogen.Field{Key: logs[i].Key, Value: []byte(logs[i].Value)} - } - s.logs = append(s.logs, protoLog) - return s -} - -// Visit visits the k-v pair in trace, calling fn for each. -func (s *Span) Visit(fn func(k, v string)) { - fn(KratosTraceID, s.context.String()) -} - -// SetTitle reset trace title -func (s *Span) SetTitle(operationName string) { - s.operationName = operationName -} - -func (s *Span) String() string { - return s.context.String() -} diff --git a/pkg/net/trace/span_test.go b/pkg/net/trace/span_test.go deleted file mode 100644 index 8d6b92085..000000000 --- a/pkg/net/trace/span_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package trace - -import ( - "fmt" - "strconv" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" -) - -func TestSpan(t *testing.T) { - report := &mockReport{} - t1 := NewTracer("service1", report, true) - t.Run("test span string", func(t *testing.T) { - sp1 := t1.New("testfinish").(*Span) - assert.NotEmpty(t, fmt.Sprint(sp1)) - }) - t.Run("test fork", func(t *testing.T) { - sp1 := t1.New("testfork").(*Span) - sp2 := sp1.Fork("xxx", "opt_2").(*Span) - assert.Equal(t, sp1.context.TraceID, sp2.context.TraceID) - assert.Equal(t, sp1.context.SpanID, sp2.context.ParentID) - t.Run("test max fork", func(t *testing.T) { - sp3 := sp2.Fork("xx", "xxx") - for i := 0; i < 100; i++ { - sp3 = sp3.Fork("", "xxx") - } - assert.Equal(t, noopspan{}, sp3) - }) - t.Run("test max childs", func(t *testing.T) { - sp3 := sp2.Fork("xx", "xxx") - for i := 0; i < 4096; i++ { - sp3.Fork("", "xxx") - } - assert.Equal(t, noopspan{}, sp3.Fork("xx", "xx")) - }) - }) - t.Run("test finish", func(t *testing.T) { - t.Run("test finish ok", func(t *testing.T) { - sp1 := t1.New("testfinish").(*Span) - time.Sleep(time.Millisecond) - sp1.Finish(nil) - assert.True(t, sp1.startTime.Unix() > 0) - assert.True(t, sp1.duration > time.Microsecond) - }) - t.Run("test finish error", func(t *testing.T) { - sp1 := t1.New("testfinish").(*Span) - time.Sleep(time.Millisecond) - err := fmt.Errorf("🍻") - sp1.Finish(&err) - assert.True(t, sp1.startTime.Unix() > 0) - assert.True(t, sp1.duration > time.Microsecond) - errorTag := false - for _, tag := range sp1.tags { - if tag.Key == TagError && tag.Value != nil { - errorTag = true - } - } - assert.True(t, errorTag) - messageLog := false - for _, log := range sp1.logs { - assert.True(t, log.Timestamp != 0) - for _, field := range log.Fields { - if field.Key == LogMessage && len(field.Value) != 0 { - messageLog = true - } - } - } - assert.True(t, messageLog) - }) - t.Run("test finish error stack", func(t *testing.T) { - sp1 := t1.New("testfinish").(*Span) - time.Sleep(time.Millisecond) - err := fmt.Errorf("🍻") - err = errors.WithStack(err) - sp1.Finish(&err) - ok := false - for _, log := range sp1.logs { - for _, field := range log.Fields { - if field.Key == LogStack && len(field.Value) != 0 { - ok = true - } - } - } - assert.True(t, ok, "LogStack set") - }) - t.Run("test too many tags", func(t *testing.T) { - sp1 := t1.New("testfinish").(*Span) - for i := 0; i < 1024; i++ { - sp1.SetTag(Tag{Key: strconv.Itoa(i), Value: "hello"}) - } - assert.Len(t, sp1.tags, _maxTags+1) - assert.Equal(t, sp1.tags[_maxTags].Key, "trace.error") - assert.Equal(t, sp1.tags[_maxTags].Value, "too many tags") - }) - t.Run("test too many logs", func(t *testing.T) { - sp1 := t1.New("testfinish").(*Span) - for i := 0; i < 1024; i++ { - sp1.SetLog(LogField{Key: strconv.Itoa(i), Value: "hello"}) - } - assert.Len(t, sp1.logs, _maxLogs+1) - assert.Equal(t, sp1.logs[_maxLogs].Fields[0].Key, "trace.error") - assert.Equal(t, sp1.logs[_maxLogs].Fields[0].Value, []byte("too many logs")) - }) - }) -} diff --git a/pkg/net/trace/tag.go b/pkg/net/trace/tag.go deleted file mode 100644 index 3c7329842..000000000 --- a/pkg/net/trace/tag.go +++ /dev/null @@ -1,182 +0,0 @@ -package trace - -// Standard Span tags https://github.com/opentracing/specification/blob/master/semantic_conventions.md#span-tags-table -const ( - // The software package, framework, library, or module that generated the associated Span. - // E.g., "grpc", "django", "JDBI". - // type string - TagComponent = "component" - - // Database instance name. - // E.g., In java, if the jdbc.url="jdbc:mysql://127.0.0.1:3306/customers", the instance name is "customers". - // type string - TagDBInstance = "db.instance" - - // A database statement for the given database type. - // E.g., for db.type="sql", "SELECT * FROM wuser_table"; for db.type="redis", "SET mykey 'WuValue'". - TagDBStatement = "db.statement" - - // Database type. For any SQL database, "sql". For others, the lower-case database category, - // e.g. "cassandra", "hbase", or "redis". - // type string - TagDBType = "db.type" - - // Username for accessing database. E.g., "readonly_user" or "reporting_user" - // type string - TagDBUser = "db.user" - - // true if and only if the application considers the operation represented by the Span to have failed - // type bool - TagError = "error" - - // HTTP method of the request for the associated Span. E.g., "GET", "POST" - // type string - TagHTTPMethod = "http.method" - - // HTTP response status code for the associated Span. E.g., 200, 503, 404 - // type integer - TagHTTPStatusCode = "http.status_code" - - // URL of the request being handled in this segment of the trace, in standard URI format. - // E.g., "https://domain.net/path/to?resource=here" - // type string - TagHTTPURL = "http.url" - - // An address at which messages can be exchanged. - // E.g. A Kafka record has an associated "topic name" that can be extracted by the instrumented producer or consumer and stored using this tag. - // type string - TagMessageBusDestination = "message_bus.destination" - - // Remote "address", suitable for use in a networking client library. - // This may be a "ip:port", a bare "hostname", a FQDN, or even a JDBC substring like "mysql://prod-db:3306" - // type string - TagPeerAddress = "peer.address" - - // Remote hostname. E.g., "opentracing.io", "internal.dns.name" - // type string - TagPeerHostname = "peer.hostname" - - // Remote IPv4 address as a .-separated tuple. E.g., "127.0.0.1" - // type string - TagPeerIPv4 = "peer.ipv4" - - // Remote IPv6 address as a string of colon-separated 4-char hex tuples. - // E.g., "2001:0db8:85a3:0000:0000:8a2e:0370:7334" - // type string - TagPeerIPv6 = "peer.ipv6" - - // Remote port. E.g., 80 - // type integer - TagPeerPort = "peer.port" - - // Remote service name (for some unspecified definition of "service"). - // E.g., "elasticsearch", "a_custom_microservice", "memcache" - // type string - TagPeerService = "peer.service" - - // If greater than 0, a hint to the Tracer to do its best to capture the trace. - // If 0, a hint to the trace to not-capture the trace. If absent, the Tracer should use its default sampling mechanism. - // type string - TagSamplingPriority = "sampling.priority" - - // Either "client" or "server" for the appropriate roles in an RPC, - // and "producer" or "consumer" for the appropriate roles in a messaging scenario. - // type string - TagSpanKind = "span.kind" - - // legacy tag - TagAnnotation = "legacy.annotation" - TagAddress = "legacy.address" - TagComment = "legacy.comment" -) - -// Standard log tags -const ( - // The type or "kind" of an error (only for event="error" logs). E.g., "Exception", "OSError" - // type string - LogErrorKind = "error.kind" - - // For languages that support such a thing (e.g., Java, Python), - // the actual Throwable/Exception/Error object instance itself. - // E.g., A java.lang.UnsupportedOperationException instance, a python exceptions.NameError instance - // type string - LogErrorObject = "error.object" - - // A stable identifier for some notable moment in the lifetime of a Span. For instance, a mutex lock acquisition or release or the sorts of lifetime events in a browser page load described in the Performance.timing specification. E.g., from Zipkin, "cs", "sr", "ss", or "cr". Or, more generally, "initialized" or "timed out". For errors, "error" - // type string - LogEvent = "event" - - // A concise, human-readable, one-line message explaining the event. - // E.g., "Could not connect to backend", "Cache invalidation succeeded" - // type string - LogMessage = "message" - - // A stack trace in platform-conventional format; may or may not pertain to an error. E.g., "File \"example.py\", line 7, in \\ncaller()\nFile \"example.py\", line 5, in caller\ncallee()\nFile \"example.py\", line 2, in callee\nraise Exception(\"Yikes\")\n" - // type string - LogStack = "stack" -) - -// Tag interface -type Tag struct { - Key string - Value interface{} -} - -// TagString new string tag. -func TagString(key string, val string) Tag { - return Tag{Key: key, Value: val} -} - -// TagInt64 new int64 tag. -func TagInt64(key string, val int64) Tag { - return Tag{Key: key, Value: val} -} - -// TagInt new int tag -func TagInt(key string, val int) Tag { - return Tag{Key: key, Value: val} -} - -// TagBool new bool tag -func TagBool(key string, val bool) Tag { - return Tag{Key: key, Value: val} -} - -// TagFloat64 new float64 tag -func TagFloat64(key string, val float64) Tag { - return Tag{Key: key, Value: val} -} - -// TagFloat32 new float64 tag -func TagFloat32(key string, val float32) Tag { - return Tag{Key: key, Value: val} -} - -// String new tag String. -// NOTE: use TagString -func String(key string, val string) Tag { - return TagString(key, val) -} - -// Int new tag Int. -// NOTE: use TagInt -func Int(key string, val int) Tag { - return TagInt(key, val) -} - -// Bool new tagBool -// NOTE: use TagBool -func Bool(key string, val bool) Tag { - return TagBool(key, val) -} - -// Log new log. -func Log(key string, val string) LogField { - return LogField{Key: key, Value: val} -} - -// LogField LogField -type LogField struct { - Key string - Value string -} diff --git a/pkg/net/trace/tag_test.go b/pkg/net/trace/tag_test.go deleted file mode 100644 index 15c5a94c3..000000000 --- a/pkg/net/trace/tag_test.go +++ /dev/null @@ -1 +0,0 @@ -package trace diff --git a/pkg/net/trace/tracer.go b/pkg/net/trace/tracer.go deleted file mode 100644 index 20e71de47..000000000 --- a/pkg/net/trace/tracer.go +++ /dev/null @@ -1,94 +0,0 @@ -package trace - -import ( - "io" -) - -var ( - // global tracer - _tracer Tracer = nooptracer{} -) - -// SetGlobalTracer SetGlobalTracer -func SetGlobalTracer(tracer Tracer) { - _tracer = tracer -} - -// Tracer is a simple, thin interface for Trace creation and propagation. -type Tracer interface { - // New trace instance with given title. - New(operationName string, opts ...Option) Trace - // Inject takes the Trace instance and injects it for - // propagation within `carrier`. The actual type of `carrier` depends on - // the value of `format`. - Inject(t Trace, format interface{}, carrier interface{}) error - // Extract returns a Trace instance given `format` and `carrier`. - // return `ErrTraceNotFound` if trace not found. - Extract(format interface{}, carrier interface{}) (Trace, error) -} - -// New trace instance with given operationName. -func New(operationName string, opts ...Option) Trace { - return _tracer.New(operationName, opts...) -} - -// Inject takes the Trace instance and injects it for -// propagation within `carrier`. The actual type of `carrier` depends on -// the value of `format`. -func Inject(t Trace, format interface{}, carrier interface{}) error { - return _tracer.Inject(t, format, carrier) -} - -// Extract returns a Trace instance given `format` and `carrier`. -// return `ErrTraceNotFound` if trace not found. -func Extract(format interface{}, carrier interface{}) (Trace, error) { - return _tracer.Extract(format, carrier) -} - -// Close trace flush data. -func Close() error { - if closer, ok := _tracer.(io.Closer); ok { - return closer.Close() - } - return nil -} - -// Trace trace common interface. -type Trace interface { - // return current trace id. - TraceID() string - // Fork fork a trace with client trace. - Fork(serviceName, operationName string) Trace - - // Follow - Follow(serviceName, operationName string) Trace - - // Finish when trace finish call it. - Finish(err *error) - - // Scan scan trace into info. - // Deprecated: method Scan is deprecated, use Inject instead of Scan - // Scan(ti *Info) - - // Adds a tag to the trace. - // - // If there is a pre-existing tag set for `key`, it is overwritten. - // - // Tag values can be numeric types, strings, or bools. The behavior of - // other tag value types is undefined at the OpenTracing level. If a - // tracing system does not know how to handle a particular value type, it - // may ignore the tag, but shall not panic. - // NOTE current only support legacy tag: TagAnnotation TagAddress TagComment - // other will be ignore - SetTag(tags ...Tag) Trace - - // LogFields is an efficient and type-checked way to record key:value - // NOTE current unsupport - SetLog(logs ...LogField) Trace - - // Visit visits the k-v pair in trace, calling fn for each. - Visit(fn func(k, v string)) - - // SetTitle reset trace title - SetTitle(title string) -} diff --git a/pkg/net/trace/util.go b/pkg/net/trace/util.go deleted file mode 100644 index 358cb0dc2..000000000 --- a/pkg/net/trace/util.go +++ /dev/null @@ -1,59 +0,0 @@ -package trace - -import ( - "context" - "encoding/binary" - "math/rand" - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/net/ip" - - "github.com/pkg/errors" -) - -var _hostHash byte - -func init() { - rand.Seed(time.Now().UnixNano()) - _hostHash = byte(oneAtTimeHash(env.Hostname)) -} - -func extendTag() (tags []Tag) { - tags = append(tags, - TagString("region", env.Region), - TagString("zone", env.Zone), - TagString("hostname", env.Hostname), - TagString("ip", ip.InternalIP()), - ) - return -} - -func genID() uint64 { - var b [8]byte - // i think this code will not survive to 2106-02-07 - binary.BigEndian.PutUint32(b[4:], uint32(time.Now().Unix())>>8) - b[4] = _hostHash - binary.BigEndian.PutUint32(b[:4], uint32(rand.Int31())) - return binary.BigEndian.Uint64(b[:]) -} - -type stackTracer interface { - StackTrace() errors.StackTrace -} - -type ctxKey string - -var _ctxkey ctxKey = "kratos/pkg/net/trace.trace" - -// FromContext returns the trace bound to the context, if any. -func FromContext(ctx context.Context) (t Trace, ok bool) { - t, ok = ctx.Value(_ctxkey).(Trace) - return -} - -// NewContext new a trace context. -// NOTE: This method is not thread safe. -func NewContext(ctx context.Context, t Trace) context.Context { - return context.WithValue(ctx, _ctxkey, t) -} diff --git a/pkg/net/trace/util_test.go b/pkg/net/trace/util_test.go deleted file mode 100644 index 6d98908a1..000000000 --- a/pkg/net/trace/util_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package trace - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFromContext(t *testing.T) { - report := &mockReport{} - t1 := NewTracer("service1", report, true) - sp1 := t1.New("test123") - ctx := context.Background() - ctx = NewContext(ctx, sp1) - sp2, ok := FromContext(ctx) - if !ok { - t.Fatal("nothing from context") - } - assert.Equal(t, sp1, sp2) -} diff --git a/pkg/net/trace/zipkin/config.go b/pkg/net/trace/zipkin/config.go deleted file mode 100644 index 31d89371b..000000000 --- a/pkg/net/trace/zipkin/config.go +++ /dev/null @@ -1,30 +0,0 @@ -package zipkin - -import ( - "time" - - "github.com/go-kratos/kratos/pkg/conf/env" - "github.com/go-kratos/kratos/pkg/net/trace" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// Config config. -// url should be the endpoint to send the spans to, e.g. -// http://localhost:9411/api/v2/spans -type Config struct { - Endpoint string `dsn:"endpoint"` - BatchSize int `dsn:"query.batch_size,100"` - Timeout xtime.Duration `dsn:"query.timeout,200ms"` - DisableSample bool `dsn:"query.disable_sample"` -} - -// Init init trace report. -func Init(c *Config) { - if c.BatchSize == 0 { - c.BatchSize = 100 - } - if c.Timeout == 0 { - c.Timeout = xtime.Duration(200 * time.Millisecond) - } - trace.SetGlobalTracer(trace.NewTracer(env.AppID, newReport(c), c.DisableSample)) -} diff --git a/pkg/net/trace/zipkin/zipkin.go b/pkg/net/trace/zipkin/zipkin.go deleted file mode 100644 index 59dcd86f1..000000000 --- a/pkg/net/trace/zipkin/zipkin.go +++ /dev/null @@ -1,99 +0,0 @@ -package zipkin - -import ( - "fmt" - "time" - - protogen "github.com/go-kratos/kratos/pkg/net/trace/proto" - - "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/reporter" - "github.com/openzipkin/zipkin-go/reporter/http" - - "github.com/go-kratos/kratos/pkg/net/trace" -) - -type report struct { - rpt reporter.Reporter -} - -func newReport(c *Config) *report { - return &report{ - rpt: http.NewReporter(c.Endpoint, - http.Timeout(time.Duration(c.Timeout)), - http.BatchSize(c.BatchSize), - ), - } -} - -// WriteSpan write a trace span to queue. -func (r *report) WriteSpan(raw *trace.Span) (err error) { - ctx := raw.Context() - traceID := model.TraceID{Low: ctx.TraceID} - spanID := model.ID(ctx.SpanID) - parentID := model.ID(ctx.ParentID) - tags := raw.Tags() - span := model.SpanModel{ - SpanContext: model.SpanContext{ - TraceID: traceID, - ID: spanID, - ParentID: &parentID, - }, - Name: raw.OperationName(), - Timestamp: raw.StartTime(), - Duration: raw.Duration(), - Tags: make(map[string]string, len(tags)), - } - span.LocalEndpoint = &model.Endpoint{ServiceName: raw.ServiceName()} - for _, tag := range tags { - switch tag.Key { - case trace.TagSpanKind: - switch tag.Value.(string) { - case "client": - span.Kind = model.Client - case "server": - span.Kind = model.Server - case "producer": - span.Kind = model.Producer - case "consumer": - span.Kind = model.Consumer - } - default: - v, ok := tag.Value.(string) - if ok { - span.Tags[tag.Key] = v - } else { - span.Tags[tag.Key] = fmt.Sprint(v) - } - } - } - //log save to zipkin annotation - span.Annotations = r.converLogsToAnnotations(raw.Logs()) - r.rpt.Send(span) - return -} - -func (r *report) converLogsToAnnotations(logs []*protogen.Log) (annotations []model.Annotation) { - annotations = make([]model.Annotation, 0, len(annotations)) - for _, lg := range logs { - annotations = append(annotations, r.converLogToAnnotation(lg)...) - } - return annotations -} -func (r *report) converLogToAnnotation(log *protogen.Log) (annotations []model.Annotation) { - annotations = make([]model.Annotation, 0, len(log.Fields)) - for _, field := range log.Fields { - val := string(field.Value) - annotation := model.Annotation{ - Timestamp: time.Unix(0, log.Timestamp), - Value: field.Key + " : " + val, - } - annotations = append(annotations, annotation) - } - return annotations -} - -// Close close the report. -func (r *report) Close() error { - return r.rpt.Close() -} diff --git a/pkg/net/trace/zipkin/zipkin_test.go b/pkg/net/trace/zipkin/zipkin_test.go deleted file mode 100644 index 394a74a64..000000000 --- a/pkg/net/trace/zipkin/zipkin_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package zipkin - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/net/trace" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -func TestZipkin(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - t.Errorf("expected 'POST' request, got '%s'", r.Method) - } - - aSpanPayload, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - - t.Logf("%s\n", aSpanPayload) - })) - defer ts.Close() - - c := &Config{ - Endpoint: ts.URL, - Timeout: xtime.Duration(time.Second * 5), - BatchSize: 100, - } - //c.Endpoint = "http://127.0.0.1:9411/api/v2/spans" - report := newReport(c) - t1 := trace.NewTracer("service1", report, true) - t2 := trace.NewTracer("service2", report, true) - sp1 := t1.New("option_1") - sp2 := sp1.Fork("service3", "opt_client") - sp2.SetLog(trace.Log("log_k", "log_v")) - // inject - header := make(http.Header) - t1.Inject(sp2, trace.HTTPFormat, header) - t.Log(header) - sp3, err := t2.Extract(trace.HTTPFormat, header) - if err != nil { - t.Fatal(err) - } - sp3.Finish(nil) - sp2.Finish(nil) - sp1.Finish(nil) - report.Close() -} diff --git a/pkg/ratelimit/README.md b/pkg/ratelimit/README.md deleted file mode 100644 index 5cc494138..000000000 --- a/pkg/ratelimit/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# rate - -# 项目简介 -BBR 限流 - -# 编译环境 - - -# 依赖包 - - -# 编译执行 - - \ No newline at end of file diff --git a/pkg/ratelimit/bbr/bbr.go b/pkg/ratelimit/bbr/bbr.go deleted file mode 100644 index 738336e9d..000000000 --- a/pkg/ratelimit/bbr/bbr.go +++ /dev/null @@ -1,284 +0,0 @@ -package bbr - -import ( - "context" - "math" - "sync/atomic" - "time" - - "github.com/go-kratos/kratos/pkg/container/group" - "github.com/go-kratos/kratos/pkg/ecode" - "github.com/go-kratos/kratos/pkg/log" - limit "github.com/go-kratos/kratos/pkg/ratelimit" - "github.com/go-kratos/kratos/pkg/stat/metric" - - cpustat "github.com/go-kratos/kratos/pkg/stat/sys/cpu" -) - -var ( - cpu int64 - decay = 0.95 - initTime = time.Now() - defaultConf = &Config{ - Window: time.Second * 10, - WinBucket: 100, - CPUThreshold: 800, - } -) - -type cpuGetter func() int64 - -func init() { - go cpuproc() -} - -// cpu = cpuᵗ⁻¹ * decay + cpuᵗ * (1 - decay) -func cpuproc() { - ticker := time.NewTicker(time.Millisecond * 250) - defer func() { - ticker.Stop() - if err := recover(); err != nil { - log.Error("rate.limit.cpuproc() err(%+v)", err) - go cpuproc() - } - }() - - // EMA algorithm: https://blog.csdn.net/m0_38106113/article/details/81542863 - for range ticker.C { - stat := &cpustat.Stat{} - cpustat.ReadStat(stat) - prevCpu := atomic.LoadInt64(&cpu) - curCpu := int64(float64(prevCpu)*decay + float64(stat.Usage)*(1.0-decay)) - atomic.StoreInt64(&cpu, curCpu) - } -} - -// Stats contains the metrics's snapshot of bbr. -type Stat struct { - Cpu int64 - InFlight int64 - MaxInFlight int64 - MinRt int64 - MaxPass int64 -} - -// BBR implements bbr-like limiter. -// It is inspired by sentinel. -// https://github.com/alibaba/Sentinel/wiki/%E7%B3%BB%E7%BB%9F%E8%87%AA%E9%80%82%E5%BA%94%E9%99%90%E6%B5%81 -type BBR struct { - cpu cpuGetter - passStat metric.RollingCounter - rtStat metric.RollingCounter - inFlight int64 - winBucketPerSec int64 - bucketDuration time.Duration - winSize int - conf *Config - prevDrop atomic.Value - maxPASSCache atomic.Value - minRtCache atomic.Value -} - -// CounterCache is used to cache maxPASS and minRt result. -// Value of current bucket is not counted in real time. -// Cache time is equal to a bucket duration. -type CounterCache struct { - val int64 - time time.Time -} - -// Config contains configs of bbr limiter. -type Config struct { - Enabled bool - Window time.Duration - WinBucket int - Rule string - Debug bool - CPUThreshold int64 -} - -func (l *BBR) maxPASS() int64 { - passCache := l.maxPASSCache.Load() - if passCache != nil { - ps := passCache.(*CounterCache) - if l.timespan(ps.time) < 1 { - return ps.val - } - } - rawMaxPass := int64(l.passStat.Reduce(func(iterator metric.Iterator) float64 { - var result = 1.0 - for i := 1; iterator.Next() && i < l.conf.WinBucket; i++ { - bucket := iterator.Bucket() - count := 0.0 - for _, p := range bucket.Points { - count += p - } - result = math.Max(result, count) - } - return result - })) - if rawMaxPass == 0 { - rawMaxPass = 1 - } - l.maxPASSCache.Store(&CounterCache{ - val: rawMaxPass, - time: time.Now(), - }) - return rawMaxPass -} - -func (l *BBR) timespan(lastTime time.Time) int { - v := int(time.Since(lastTime) / l.bucketDuration) - if v > -1 { - return v - } - return l.winSize -} - -func (l *BBR) minRT() int64 { - rtCache := l.minRtCache.Load() - if rtCache != nil { - rc := rtCache.(*CounterCache) - if l.timespan(rc.time) < 1 { - return rc.val - } - } - rawMinRT := int64(math.Ceil(l.rtStat.Reduce(func(iterator metric.Iterator) float64 { - var result = math.MaxFloat64 - for i := 1; iterator.Next() && i < l.conf.WinBucket; i++ { - bucket := iterator.Bucket() - if len(bucket.Points) == 0 { - continue - } - total := 0.0 - for _, p := range bucket.Points { - total += p - } - avg := total / float64(bucket.Count) - result = math.Min(result, avg) - } - return result - }))) - if rawMinRT <= 0 { - rawMinRT = 1 - } - l.minRtCache.Store(&CounterCache{ - val: rawMinRT, - time: time.Now(), - }) - return rawMinRT -} - -func (l *BBR) maxFlight() int64 { - return int64(math.Floor(float64(l.maxPASS()*l.minRT()*l.winBucketPerSec)/1000.0 + 0.5)) -} - -func (l *BBR) shouldDrop() bool { - if l.cpu() < l.conf.CPUThreshold { - prevDrop, _ := l.prevDrop.Load().(time.Duration) - if prevDrop == 0 { - return false - } - if time.Since(initTime)-prevDrop <= time.Second { - inFlight := atomic.LoadInt64(&l.inFlight) - return inFlight > 1 && inFlight > l.maxFlight() - } - l.prevDrop.Store(time.Duration(0)) - return false - } - inFlight := atomic.LoadInt64(&l.inFlight) - drop := inFlight > 1 && inFlight > l.maxFlight() - if drop { - prevDrop, _ := l.prevDrop.Load().(time.Duration) - if prevDrop != 0 { - return drop - } - l.prevDrop.Store(time.Since(initTime)) - } - return drop -} - -// Stat tasks a snapshot of the bbr limiter. -func (l *BBR) Stat() Stat { - return Stat{ - Cpu: l.cpu(), - InFlight: atomic.LoadInt64(&l.inFlight), - MinRt: l.minRT(), - MaxPass: l.maxPASS(), - MaxInFlight: l.maxFlight(), - } -} - -// Allow checks all inbound traffic. -// Once overload is detected, it raises ecode.LimitExceed error. -func (l *BBR) Allow(ctx context.Context, opts ...limit.AllowOption) (func(info limit.DoneInfo), error) { - allowOpts := limit.DefaultAllowOpts() - for _, opt := range opts { - opt.Apply(&allowOpts) - } - if l.shouldDrop() { - return nil, ecode.LimitExceed - } - atomic.AddInt64(&l.inFlight, 1) - stime := time.Since(initTime) - return func(do limit.DoneInfo) { - rt := int64((time.Since(initTime) - stime) / time.Millisecond) - l.rtStat.Add(rt) - atomic.AddInt64(&l.inFlight, -1) - switch do.Op { - case limit.Success: - l.passStat.Add(1) - return - default: - return - } - }, nil -} - -func newLimiter(conf *Config) limit.Limiter { - if conf == nil { - conf = defaultConf - } - size := conf.WinBucket - bucketDuration := conf.Window / time.Duration(conf.WinBucket) - passStat := metric.NewRollingCounter(metric.RollingCounterOpts{Size: size, BucketDuration: bucketDuration}) - rtStat := metric.NewRollingCounter(metric.RollingCounterOpts{Size: size, BucketDuration: bucketDuration}) - cpu := func() int64 { - return atomic.LoadInt64(&cpu) - } - limiter := &BBR{ - cpu: cpu, - conf: conf, - passStat: passStat, - rtStat: rtStat, - winBucketPerSec: int64(time.Second) / (int64(conf.Window) / int64(conf.WinBucket)), - bucketDuration: bucketDuration, - winSize: conf.WinBucket, - } - return limiter -} - -// Group represents a class of BBRLimiter and forms a namespace in which -// units of BBRLimiter. -type Group struct { - group *group.Group -} - -// NewGroup new a limiter group container, if conf nil use default conf. -func NewGroup(conf *Config) *Group { - if conf == nil { - conf = defaultConf - } - group := group.NewGroup(func() interface{} { - return newLimiter(conf) - }) - return &Group{ - group: group, - } -} - -// Get get a limiter by a specified key, if limiter not exists then make a new one. -func (g *Group) Get(key string) limit.Limiter { - limiter := g.group.Get(key) - return limiter.(limit.Limiter) -} diff --git a/pkg/ratelimit/bbr/bbr_test.go b/pkg/ratelimit/bbr/bbr_test.go deleted file mode 100644 index 93345e5ff..000000000 --- a/pkg/ratelimit/bbr/bbr_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package bbr - -import ( - "context" - "fmt" - "math/rand" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/go-kratos/kratos/pkg/ratelimit" - "github.com/go-kratos/kratos/pkg/stat/metric" -) - -func confForTest() *Config { - return &Config{ - Window: time.Second, - WinBucket: 10, - CPUThreshold: 800, - } -} - -func warmup(bbr *BBR, count int) { - for i := 0; i < count; i++ { - done, err := bbr.Allow(context.TODO()) - time.Sleep(time.Millisecond * 1) - if err == nil { - done(ratelimit.DoneInfo{Op: ratelimit.Success}) - } - } -} - -func forceAllow(bbr *BBR) { - inflight := bbr.inFlight - bbr.inFlight = bbr.maxPASS() - 1 - done, err := bbr.Allow(context.TODO()) - if err == nil { - done(ratelimit.DoneInfo{Op: ratelimit.Success}) - } - bbr.inFlight = inflight -} - -func TestBBR(t *testing.T) { - cfg := &Config{ - Window: time.Second * 5, - WinBucket: 50, - CPUThreshold: 100, - } - limiter := newLimiter(cfg) - var wg sync.WaitGroup - var drop int64 - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < 300; i++ { - f, err := limiter.Allow(context.TODO()) - if err != nil { - atomic.AddInt64(&drop, 1) - } else { - count := rand.Intn(100) - time.Sleep(time.Millisecond * time.Duration(count)) - f(ratelimit.DoneInfo{Op: ratelimit.Success}) - } - } - }() - } - wg.Wait() - fmt.Println("drop: ", drop) -} - -func TestBBRMaxPass(t *testing.T) { - bucketDuration := time.Millisecond * 100 - bbr := newLimiter(confForTest()).(*BBR) - for i := 1; i <= 10; i++ { - bbr.passStat.Add(int64(i * 100)) - time.Sleep(bucketDuration) - } - assert.Equal(t, int64(1000), bbr.maxPASS()) - - // default max pass is equal to 1. - bbr = newLimiter(confForTest()).(*BBR) - assert.Equal(t, int64(1), bbr.maxPASS()) -} - -func TestBBRMaxPassWithCache(t *testing.T) { - bucketDuration := time.Millisecond * 100 - bbr := newLimiter(confForTest()).(*BBR) - // witch cache, value of latest bucket is not counted instently. - // after a bucket duration time, this bucket will be fullly counted. - for i := 1; i <= 11; i++ { - bbr.passStat.Add(int64(i * 50)) - time.Sleep(bucketDuration / 2) - _ = bbr.maxPASS() - bbr.passStat.Add(int64(i * 50)) - time.Sleep(bucketDuration / 2) - } - bbr.passStat.Add(int64(1)) - assert.Equal(t, int64(1000), bbr.maxPASS()) -} - -func TestBBRMinRt(t *testing.T) { - bucketDuration := time.Millisecond * 100 - bbr := newLimiter(confForTest()).(*BBR) - for i := 0; i < 10; i++ { - for j := i*10 + 1; j <= i*10+10; j++ { - bbr.rtStat.Add(int64(j)) - } - if i != 9 { - time.Sleep(bucketDuration) - } - } - assert.Equal(t, int64(6), bbr.minRT()) - - // default max min rt is equal to maxFloat64. - bucketDuration = time.Millisecond * 100 - bbr = newLimiter(confForTest()).(*BBR) - bbr.rtStat = metric.NewRollingCounter(metric.RollingCounterOpts{Size: 10, BucketDuration: bucketDuration}) - assert.Equal(t, int64(1), bbr.minRT()) -} - -func TestBBRMinRtWithCache(t *testing.T) { - bucketDuration := time.Millisecond * 100 - bbr := newLimiter(confForTest()).(*BBR) - for i := 0; i < 10; i++ { - for j := i*10 + 1; j <= i*10+5; j++ { - bbr.rtStat.Add(int64(j)) - } - if i != 9 { - time.Sleep(bucketDuration / 2) - } - _ = bbr.minRT() - for j := i*10 + 6; j <= i*10+10; j++ { - bbr.rtStat.Add(int64(j)) - } - if i != 9 { - time.Sleep(bucketDuration / 2) - } - } - assert.Equal(t, int64(6), bbr.minRT()) -} - -func TestBBRMaxQps(t *testing.T) { - bbr := newLimiter(confForTest()).(*BBR) - bucketDuration := time.Millisecond * 100 - passStat := metric.NewRollingCounter(metric.RollingCounterOpts{Size: 10, BucketDuration: bucketDuration}) - rtStat := metric.NewRollingCounter(metric.RollingCounterOpts{Size: 10, BucketDuration: bucketDuration}) - for i := 0; i < 10; i++ { - passStat.Add(int64((i + 2) * 100)) - for j := i*10 + 1; j <= i*10+10; j++ { - rtStat.Add(int64(j)) - } - if i != 9 { - time.Sleep(bucketDuration) - } - } - bbr.passStat = passStat - bbr.rtStat = rtStat - assert.Equal(t, int64(60), bbr.maxFlight()) -} - -func TestBBRShouldDrop(t *testing.T) { - var cpu int64 - bbr := newLimiter(confForTest()).(*BBR) - bbr.cpu = func() int64 { - return cpu - } - bucketDuration := time.Millisecond * 100 - passStat := metric.NewRollingCounter(metric.RollingCounterOpts{Size: 10, BucketDuration: bucketDuration}) - rtStat := metric.NewRollingCounter(metric.RollingCounterOpts{Size: 10, BucketDuration: bucketDuration}) - for i := 0; i < 10; i++ { - passStat.Add(int64((i + 1) * 100)) - for j := i*10 + 1; j <= i*10+10; j++ { - rtStat.Add(int64(j)) - } - if i != 9 { - time.Sleep(bucketDuration) - } - } - bbr.passStat = passStat - bbr.rtStat = rtStat - // cpu >= 800, inflight < maxQps - cpu = 800 - bbr.inFlight = 50 - assert.Equal(t, false, bbr.shouldDrop()) - - // cpu >= 800, inflight > maxQps - cpu = 800 - bbr.inFlight = 80 - assert.Equal(t, true, bbr.shouldDrop()) - - // cpu < 800, inflight > maxQps, cold duration - cpu = 700 - bbr.inFlight = 80 - assert.Equal(t, true, bbr.shouldDrop()) - - // cpu < 800, inflight > maxQps - time.Sleep(2 * time.Second) - cpu = 700 - bbr.inFlight = 80 - assert.Equal(t, false, bbr.shouldDrop()) -} - -func TestGroup(t *testing.T) { - cfg := &Config{ - Window: time.Second * 5, - WinBucket: 50, - CPUThreshold: 100, - } - group := NewGroup(cfg) - t.Run("get", func(t *testing.T) { - limiter := group.Get("test") - assert.NotNil(t, limiter) - }) -} - -func BenchmarkBBRAllowUnderLowLoad(b *testing.B) { - bbr := newLimiter(confForTest()).(*BBR) - bbr.cpu = func() int64 { - return 500 - } - b.ResetTimer() - for i := 0; i <= b.N; i++ { - done, err := bbr.Allow(context.TODO()) - if err == nil { - done(ratelimit.DoneInfo{Op: ratelimit.Success}) - } - } -} - -func BenchmarkBBRAllowUnderHighLoad(b *testing.B) { - bbr := newLimiter(confForTest()).(*BBR) - bbr.cpu = func() int64 { - return 900 - } - bbr.inFlight = 1 - b.ResetTimer() - for i := 0; i <= b.N; i++ { - if i%10000 == 0 { - maxFlight := bbr.maxFlight() - if maxFlight != 0 { - bbr.inFlight = rand.Int63n(bbr.maxFlight() * 2) - } - } - done, err := bbr.Allow(context.TODO()) - if err == nil { - done(ratelimit.DoneInfo{Op: ratelimit.Success}) - } - } -} - -func BenchmarkBBRShouldDropUnderLowLoad(b *testing.B) { - bbr := newLimiter(confForTest()).(*BBR) - bbr.cpu = func() int64 { - return 500 - } - warmup(bbr, 10000) - b.ResetTimer() - for i := 0; i <= b.N; i++ { - bbr.shouldDrop() - } -} - -func BenchmarkBBRShouldDropUnderHighLoad(b *testing.B) { - bbr := newLimiter(confForTest()).(*BBR) - bbr.cpu = func() int64 { - return 900 - } - warmup(bbr, 10000) - bbr.inFlight = 1000 - b.ResetTimer() - for i := 0; i <= b.N; i++ { - bbr.shouldDrop() - if i%10000 == 0 { - forceAllow(bbr) - } - } -} - -func BenchmarkBBRShouldDropUnderUnstableLoad(b *testing.B) { - bbr := newLimiter(confForTest()).(*BBR) - bbr.cpu = func() int64 { - return 500 - } - warmup(bbr, 10000) - bbr.prevDrop.Store(time.Since(initTime)) - bbr.inFlight = 1000 - b.ResetTimer() - for i := 0; i <= b.N; i++ { - bbr.shouldDrop() - if i%100000 == 0 { - forceAllow(bbr) - } - } -} diff --git a/pkg/ratelimit/limiter.go b/pkg/ratelimit/limiter.go deleted file mode 100644 index 79c8e8ab1..000000000 --- a/pkg/ratelimit/limiter.go +++ /dev/null @@ -1,40 +0,0 @@ -package ratelimit - -import ( - "context" -) - -// Op operations type. -type Op int - -const ( - // Success opertion type: success - Success Op = iota - // Ignore opertion type: ignore - Ignore - // Drop opertion type: drop - Drop -) - -type allowOptions struct{} - -// AllowOptions allow options. -type AllowOption interface { - Apply(*allowOptions) -} - -// DoneInfo done info. -type DoneInfo struct { - Err error - Op Op -} - -// DefaultAllowOpts returns the default allow options. -func DefaultAllowOpts() allowOptions { - return allowOptions{} -} - -// Limiter limit interface. -type Limiter interface { - Allow(ctx context.Context, opts ...AllowOption) (func(info DoneInfo), error) -} diff --git a/pkg/stat/README.md b/pkg/stat/README.md deleted file mode 100644 index 676067e2f..000000000 --- a/pkg/stat/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# stat - -## 项目简介 - -数据统计、监控采集等 diff --git a/pkg/stat/metric/counter.go b/pkg/stat/metric/counter.go deleted file mode 100644 index 6015899d2..000000000 --- a/pkg/stat/metric/counter.go +++ /dev/null @@ -1,84 +0,0 @@ -package metric - -import ( - "fmt" - "sync/atomic" - - "github.com/prometheus/client_golang/prometheus" -) - -var _ Metric = &counter{} - -// Counter stores a numerical value that only ever goes up. -type Counter interface { - Metric -} - -// CounterOpts is an alias of Opts. -type CounterOpts Opts - -type counter struct { - val int64 -} - -// NewCounter creates a new Counter based on the CounterOpts. -func NewCounter(opts CounterOpts) Counter { - return &counter{} -} - -func (c *counter) Add(val int64) { - if val < 0 { - panic(fmt.Errorf("stat/metric: cannot decrease in negative value. val: %d", val)) - } - atomic.AddInt64(&c.val, val) -} - -func (c *counter) Value() int64 { - return atomic.LoadInt64(&c.val) -} - -// CounterVecOpts is an alias of VectorOpts. -type CounterVecOpts VectorOpts - -// CounterVec counter vec. -type CounterVec interface { - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc(labels ...string) - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(v float64, labels ...string) -} - -// counterVec counter vec. -type promCounterVec struct { - counter *prometheus.CounterVec -} - -// NewCounterVec . -func NewCounterVec(cfg *CounterVecOpts) CounterVec { - if cfg == nil { - return nil - } - vec := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: cfg.Namespace, - Subsystem: cfg.Subsystem, - Name: cfg.Name, - Help: cfg.Help, - }, cfg.Labels) - prometheus.MustRegister(vec) - return &promCounterVec{ - counter: vec, - } -} - -// Inc Inc increments the counter by 1. Use Add to increment it by arbitrary. -func (counter *promCounterVec) Inc(labels ...string) { - counter.counter.WithLabelValues(labels...).Inc() -} - -// Add Inc increments the counter by 1. Use Add to increment it by arbitrary. -func (counter *promCounterVec) Add(v float64, labels ...string) { - counter.counter.WithLabelValues(labels...).Add(v) -} diff --git a/pkg/stat/metric/counter_test.go b/pkg/stat/metric/counter_test.go deleted file mode 100644 index 413a61c94..000000000 --- a/pkg/stat/metric/counter_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package metric - -import ( - "math/rand" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCounter(t *testing.T) { - counter := NewCounter(CounterOpts{}) - count := rand.Intn(100) - for i := 0; i < count; i++ { - counter.Add(1) - } - val := counter.Value() - assert.Equal(t, val, int64(count)) -} - -func TestCounterVec(t *testing.T) { - counterVec := NewCounterVec(&CounterVecOpts{ - Namespace: "test", - Subsystem: "test", - Name: "test", - Help: "this is test metrics.", - Labels: []string{"name", "addr"}, - }) - counterVec.Inc("name1", "127.0.0.1") - assert.Panics(t, func() { - NewCounterVec(&CounterVecOpts{ - Namespace: "test", - Subsystem: "test", - Name: "test", - Help: "this is test metrics.", - Labels: []string{"name", "addr"}, - }) - }, "Expected to panic.") - assert.NotPanics(t, func() { - NewCounterVec(&CounterVecOpts{ - Namespace: "test", - Subsystem: "test", - Name: "test2", - Help: "this is test metrics.", - Labels: []string{"name", "addr"}, - }) - }, "Expected normal. no panic.") -} diff --git a/pkg/stat/metric/gauge.go b/pkg/stat/metric/gauge.go deleted file mode 100644 index de6980a49..000000000 --- a/pkg/stat/metric/gauge.go +++ /dev/null @@ -1,94 +0,0 @@ -package metric - -import ( - "sync/atomic" - - "github.com/prometheus/client_golang/prometheus" -) - -var _ Metric = &gauge{} - -// Gauge stores a numerical value that can be add arbitrarily. -type Gauge interface { - Metric - // Sets sets the value to the given number. - Set(int64) -} - -// GaugeOpts is an alias of Opts. -type GaugeOpts Opts - -type gauge struct { - val int64 -} - -// NewGauge creates a new Gauge based on the GaugeOpts. -func NewGauge(opts GaugeOpts) Gauge { - return &gauge{} -} - -func (g *gauge) Add(val int64) { - atomic.AddInt64(&g.val, val) -} - -func (g *gauge) Set(val int64) { - old := atomic.LoadInt64(&g.val) - atomic.CompareAndSwapInt64(&g.val, old, val) -} - -func (g *gauge) Value() int64 { - return atomic.LoadInt64(&g.val) -} - -// GaugeVecOpts is an alias of VectorOpts. -type GaugeVecOpts VectorOpts - -// GaugeVec gauge vec. -type GaugeVec interface { - // Set sets the Gauge to an arbitrary value. - Set(v float64, labels ...string) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc(labels ...string) - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(v float64, labels ...string) -} - -// gaugeVec gauge vec. -type promGaugeVec struct { - gauge *prometheus.GaugeVec -} - -// NewGaugeVec . -func NewGaugeVec(cfg *GaugeVecOpts) GaugeVec { - if cfg == nil { - return nil - } - vec := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: cfg.Namespace, - Subsystem: cfg.Subsystem, - Name: cfg.Name, - Help: cfg.Help, - }, cfg.Labels) - prometheus.MustRegister(vec) - return &promGaugeVec{ - gauge: vec, - } -} - -// Inc Inc increments the counter by 1. Use Add to increment it by arbitrary. -func (gauge *promGaugeVec) Inc(labels ...string) { - gauge.gauge.WithLabelValues(labels...).Inc() -} - -// Add Inc increments the counter by 1. Use Add to increment it by arbitrary. -func (gauge *promGaugeVec) Add(v float64, labels ...string) { - gauge.gauge.WithLabelValues(labels...).Add(v) -} - -// Set set the given value to the collection. -func (gauge *promGaugeVec) Set(v float64, labels ...string) { - gauge.gauge.WithLabelValues(labels...).Set(v) -} diff --git a/pkg/stat/metric/gauge_test.go b/pkg/stat/metric/gauge_test.go deleted file mode 100644 index b3cb26d22..000000000 --- a/pkg/stat/metric/gauge_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package metric - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGaugeAdd(t *testing.T) { - gauge := NewGauge(GaugeOpts{}) - gauge.Add(100) - gauge.Add(-50) - val := gauge.Value() - assert.Equal(t, val, int64(50)) -} - -func TestGaugeSet(t *testing.T) { - gauge := NewGauge(GaugeOpts{}) - gauge.Add(100) - gauge.Set(50) - val := gauge.Value() - assert.Equal(t, val, int64(50)) -} diff --git a/pkg/stat/metric/histogram.go b/pkg/stat/metric/histogram.go deleted file mode 100644 index 8a7af582c..000000000 --- a/pkg/stat/metric/histogram.go +++ /dev/null @@ -1,50 +0,0 @@ -package metric - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -// HistogramVecOpts is histogram vector opts. -type HistogramVecOpts struct { - Namespace string - Subsystem string - Name string - Help string - Labels []string - Buckets []float64 -} - -// HistogramVec gauge vec. -type HistogramVec interface { - // Observe adds a single observation to the histogram. - Observe(v int64, labels ...string) -} - -// Histogram prom histogram collection. -type promHistogramVec struct { - histogram *prometheus.HistogramVec -} - -// NewHistogramVec new a histogram vec. -func NewHistogramVec(cfg *HistogramVecOpts) HistogramVec { - if cfg == nil { - return nil - } - vec := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: cfg.Namespace, - Subsystem: cfg.Subsystem, - Name: cfg.Name, - Help: cfg.Help, - Buckets: cfg.Buckets, - }, cfg.Labels) - prometheus.MustRegister(vec) - return &promHistogramVec{ - histogram: vec, - } -} - -// Timing adds a single observation to the histogram. -func (histogram *promHistogramVec) Observe(v int64, labels ...string) { - histogram.histogram.WithLabelValues(labels...).Observe(float64(v)) -} diff --git a/pkg/stat/metric/iterator.go b/pkg/stat/metric/iterator.go deleted file mode 100644 index b0cac693d..000000000 --- a/pkg/stat/metric/iterator.go +++ /dev/null @@ -1,26 +0,0 @@ -package metric - -import "fmt" - -// Iterator iterates the buckets within the window. -type Iterator struct { - count int - iteratedCount int - cur *Bucket -} - -// Next returns true util all of the buckets has been iterated. -func (i *Iterator) Next() bool { - return i.count != i.iteratedCount -} - -// Bucket gets current bucket. -func (i *Iterator) Bucket() Bucket { - if !(i.Next()) { - panic(fmt.Errorf("stat/metric: iteration out of range iteratedCount: %d count: %d", i.iteratedCount, i.count)) - } - bucket := *i.cur - i.iteratedCount++ - i.cur = i.cur.Next() - return bucket -} diff --git a/pkg/stat/metric/metric.go b/pkg/stat/metric/metric.go deleted file mode 100644 index 9ea08ca72..000000000 --- a/pkg/stat/metric/metric.go +++ /dev/null @@ -1,104 +0,0 @@ -package metric - -import ( - "errors" - "fmt" -) - -// Opts contains the common arguments for creating Metric. -type Opts struct { -} - -// Metric is a sample interface. -// Implementations of Metrics in metric package are Counter, Gauge, -// PointGauge, RollingCounter and RollingGauge. -type Metric interface { - // Add adds the given value to the counter. - Add(int64) - // Value gets the current value. - // If the metric's type is PointGauge, RollingCounter, RollingGauge, - // it returns the sum value within the window. - Value() int64 -} - -// Aggregation contains some common aggregation function. -// Each aggregation can compute summary statistics of window. -type Aggregation interface { - // Min finds the min value within the window. - Min() float64 - // Max finds the max value within the window. - Max() float64 - // Avg computes average value within the window. - Avg() float64 - // Sum computes sum value within the window. - Sum() float64 -} - -// VectorOpts contains the common arguments for creating vec Metric.. -type VectorOpts struct { - Namespace string - Subsystem string - Name string - Help string - Labels []string -} - -const ( - _businessNamespace = "business" - _businessSubsystemCount = "count" - _businessSubSystemGauge = "gauge" - _businessSubSystemHistogram = "histogram" -) - -var ( - _defaultBuckets = []float64{5, 10, 25, 50, 100, 250, 500} -) - -// NewBusinessMetricCount business Metric count vec. -// name or labels should not be empty. -func NewBusinessMetricCount(name string, labels ...string) CounterVec { - if name == "" || len(labels) == 0 { - panic(errors.New("stat:metric business count metric name should not be empty or labels length should be greater than zero")) - } - return NewCounterVec(&CounterVecOpts{ - Namespace: _businessNamespace, - Subsystem: _businessSubsystemCount, - Name: name, - Labels: labels, - Help: fmt.Sprintf("business metric count %s", name), - }) -} - -// NewBusinessMetricGauge business Metric gauge vec. -// name or labels should not be empty. -func NewBusinessMetricGauge(name string, labels ...string) GaugeVec { - if name == "" || len(labels) == 0 { - panic(errors.New("stat:metric business gauge metric name should not be empty or labels length should be greater than zero")) - } - return NewGaugeVec(&GaugeVecOpts{ - Namespace: _businessNamespace, - Subsystem: _businessSubSystemGauge, - Name: name, - Labels: labels, - Help: fmt.Sprintf("business metric gauge %s", name), - }) -} - -// NewBusinessMetricHistogram business Metric histogram vec. -// name or labels should not be empty. -func NewBusinessMetricHistogram(name string, buckets []float64, labels ...string) HistogramVec { - if name == "" || len(labels) == 0 { - panic(errors.New("stat:metric business histogram metric name should not be empty or labels length should be greater than zero")) - } - if len(buckets) == 0 { - buckets = _defaultBuckets - } - return NewHistogramVec(&HistogramVecOpts{ - Namespace: _businessNamespace, - Subsystem: _businessSubSystemHistogram, - Name: name, - Labels: labels, - Buckets: buckets, - Help: fmt.Sprintf("business metric histogram %s", name), - }) -} diff --git a/pkg/stat/metric/point_gauge.go b/pkg/stat/metric/point_gauge.go deleted file mode 100644 index 0fc15a622..000000000 --- a/pkg/stat/metric/point_gauge.go +++ /dev/null @@ -1,61 +0,0 @@ -package metric - -var _ Metric = &pointGauge{} -var _ Aggregation = &pointGauge{} - -// PointGauge represents a ring window. -// Every buckets within the window contains one point. -// When the window is full, the earliest point will be overwrite. -type PointGauge interface { - Aggregation - Metric - // Reduce applies the reduction function to all buckets within the window. - Reduce(func(Iterator) float64) float64 -} - -// PointGaugeOpts contains the arguments for creating PointGauge. -type PointGaugeOpts struct { - // Size represents the bucket size within the window. - Size int -} - -type pointGauge struct { - policy *PointPolicy -} - -// NewPointGauge creates a new PointGauge based on PointGaugeOpts. -func NewPointGauge(opts PointGaugeOpts) PointGauge { - window := NewWindow(WindowOpts{Size: opts.Size}) - policy := NewPointPolicy(window) - return &pointGauge{ - policy: policy, - } -} - -func (r *pointGauge) Add(val int64) { - r.policy.Append(float64(val)) -} - -func (r *pointGauge) Reduce(f func(Iterator) float64) float64 { - return r.policy.Reduce(f) -} - -func (r *pointGauge) Avg() float64 { - return r.policy.Reduce(Avg) -} - -func (r *pointGauge) Min() float64 { - return r.policy.Reduce(Min) -} - -func (r *pointGauge) Max() float64 { - return r.policy.Reduce(Max) -} - -func (r *pointGauge) Sum() float64 { - return r.policy.Reduce(Sum) -} - -func (r *pointGauge) Value() int64 { - return int64(r.Sum()) -} diff --git a/pkg/stat/metric/point_gauge_test.go b/pkg/stat/metric/point_gauge_test.go deleted file mode 100644 index fcfee4788..000000000 --- a/pkg/stat/metric/point_gauge_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package metric - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPointGaugeAdd(t *testing.T) { - opts := PointGaugeOpts{Size: 3} - pointGauge := NewPointGauge(opts) - listBuckets := func() [][]float64 { - buckets := make([][]float64, 0) - pointGauge.Reduce(func(i Iterator) float64 { - for i.Next() { - bucket := i.Bucket() - buckets = append(buckets, bucket.Points) - } - return 0.0 - }) - return buckets - } - assert.Equal(t, [][]float64{{}, {}, {}}, listBuckets(), "Empty Buckets") - pointGauge.Add(1) - assert.Equal(t, [][]float64{{}, {}, {1}}, listBuckets(), "Point 1") - pointGauge.Add(2) - assert.Equal(t, [][]float64{{}, {1}, {2}}, listBuckets(), "Point 1, 2") - pointGauge.Add(3) - assert.Equal(t, [][]float64{{1}, {2}, {3}}, listBuckets(), "Point 1, 2, 3") - pointGauge.Add(4) - assert.Equal(t, [][]float64{{2}, {3}, {4}}, listBuckets(), "Point 2, 3, 4") - pointGauge.Add(5) - assert.Equal(t, [][]float64{{3}, {4}, {5}}, listBuckets(), "Point 3, 4, 5") -} - -func TestPointGaugeReduce(t *testing.T) { - opts := PointGaugeOpts{Size: 10} - pointGauge := NewPointGauge(opts) - for i := 0; i < opts.Size; i++ { - pointGauge.Add(int64(i)) - } - var _ = pointGauge.Reduce(func(i Iterator) float64 { - idx := 0 - for i.Next() { - bucket := i.Bucket() - assert.Equal(t, bucket.Points[0], float64(idx), "validate points of pointGauge") - idx++ - } - return 0.0 - }) - assert.Equal(t, float64(9), pointGauge.Max(), "validate max of pointGauge") - assert.Equal(t, float64(4.5), pointGauge.Avg(), "validate avg of pointGauge") - assert.Equal(t, float64(0), pointGauge.Min(), "validate min of pointGauge") - assert.Equal(t, float64(45), pointGauge.Sum(), "validate sum of pointGauge") -} diff --git a/pkg/stat/metric/point_policy.go b/pkg/stat/metric/point_policy.go deleted file mode 100644 index aae8934e9..000000000 --- a/pkg/stat/metric/point_policy.go +++ /dev/null @@ -1,57 +0,0 @@ -package metric - -import "sync" - -// PointPolicy is a policy of points within the window. -// PointPolicy wraps the window and make it seem like ring-buf. -// When using PointPolicy, every buckets within the windows contains at more one point. -// e.g. [[1], [2], [3]] -type PointPolicy struct { - mu sync.RWMutex - size int - window *Window - offset int -} - -// NewPointPolicy creates a new PointPolicy. -func NewPointPolicy(window *Window) *PointPolicy { - return &PointPolicy{ - window: window, - size: window.Size(), - offset: -1, - } -} - -func (p *PointPolicy) prevOffset() int { - return p.offset -} - -func (p *PointPolicy) nextOffset() int { - return (p.prevOffset() + 1) % p.size -} - -func (p *PointPolicy) updateOffset(offset int) { - p.offset = offset -} - -// Append appends the given points to the window. -func (p *PointPolicy) Append(val float64) { - p.mu.Lock() - defer p.mu.Unlock() - offset := p.nextOffset() - p.window.ResetBucket(offset) - p.window.Append(offset, val) - p.updateOffset(offset) -} - -// Reduce applies the reduction function to all buckets within the window. -func (p *PointPolicy) Reduce(f func(Iterator) float64) float64 { - p.mu.RLock() - defer p.mu.RUnlock() - offset := p.offset + 1 - if offset == p.size { - offset = 0 - } - iterator := p.window.Iterator(offset, p.size) - return f(iterator) -} diff --git a/pkg/stat/metric/reduce.go b/pkg/stat/metric/reduce.go deleted file mode 100644 index 20165c984..000000000 --- a/pkg/stat/metric/reduce.go +++ /dev/null @@ -1,77 +0,0 @@ -package metric - -// Sum the values within the window. -func Sum(iterator Iterator) float64 { - var result = 0.0 - for iterator.Next() { - bucket := iterator.Bucket() - for _, p := range bucket.Points { - result = result + p - } - } - return result -} - -// Avg the values within the window. -func Avg(iterator Iterator) float64 { - var result = 0.0 - var count = 0.0 - for iterator.Next() { - bucket := iterator.Bucket() - for _, p := range bucket.Points { - result = result + p - count = count + 1 - } - } - return result / count -} - -// Min the values within the window. -func Min(iterator Iterator) float64 { - var result = 0.0 - var started = false - for iterator.Next() { - bucket := iterator.Bucket() - for _, p := range bucket.Points { - if !started { - result = p - started = true - continue - } - if p < result { - result = p - } - } - } - return result -} - -// Max the values within the window. -func Max(iterator Iterator) float64 { - var result = 0.0 - var started = false - for iterator.Next() { - bucket := iterator.Bucket() - for _, p := range bucket.Points { - if !started { - result = p - started = true - continue - } - if p > result { - result = p - } - } - } - return result -} - -// Count sums the count value within the window. -func Count(iterator Iterator) float64 { - var result int64 - for iterator.Next() { - bucket := iterator.Bucket() - result += bucket.Count - } - return float64(result) -} diff --git a/pkg/stat/metric/reduce_test.go b/pkg/stat/metric/reduce_test.go deleted file mode 100644 index 5165dd293..000000000 --- a/pkg/stat/metric/reduce_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package metric - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCount(t *testing.T) { - opts := PointGaugeOpts{Size: 10} - pointGauge := NewPointGauge(opts) - for i := 0; i < opts.Size; i++ { - pointGauge.Add(int64(i)) - } - result := pointGauge.Reduce(Count) - assert.Equal(t, float64(10), result, "validate count of pointGauge") -} diff --git a/pkg/stat/metric/rolling_counter.go b/pkg/stat/metric/rolling_counter.go deleted file mode 100644 index e1038439b..000000000 --- a/pkg/stat/metric/rolling_counter.go +++ /dev/null @@ -1,75 +0,0 @@ -package metric - -import ( - "fmt" - "time" -) - -var _ Metric = &rollingCounter{} -var _ Aggregation = &rollingCounter{} - -// RollingCounter represents a ring window based on time duration. -// e.g. [[1], [3], [5]] -type RollingCounter interface { - Metric - Aggregation - Timespan() int - // Reduce applies the reduction function to all buckets within the window. - Reduce(func(Iterator) float64) float64 -} - -// RollingCounterOpts contains the arguments for creating RollingCounter. -type RollingCounterOpts struct { - Size int - BucketDuration time.Duration -} - -type rollingCounter struct { - policy *RollingPolicy -} - -// NewRollingCounter creates a new RollingCounter bases on RollingCounterOpts. -func NewRollingCounter(opts RollingCounterOpts) RollingCounter { - window := NewWindow(WindowOpts{Size: opts.Size}) - policy := NewRollingPolicy(window, RollingPolicyOpts{BucketDuration: opts.BucketDuration}) - return &rollingCounter{ - policy: policy, - } -} - -func (r *rollingCounter) Add(val int64) { - if val < 0 { - panic(fmt.Errorf("stat/metric: cannot decrease in value. val: %d", val)) - } - r.policy.Add(float64(val)) -} - -func (r *rollingCounter) Reduce(f func(Iterator) float64) float64 { - return r.policy.Reduce(f) -} - -func (r *rollingCounter) Avg() float64 { - return r.policy.Reduce(Avg) -} - -func (r *rollingCounter) Min() float64 { - return r.policy.Reduce(Min) -} - -func (r *rollingCounter) Max() float64 { - return r.policy.Reduce(Max) -} - -func (r *rollingCounter) Sum() float64 { - return r.policy.Reduce(Sum) -} - -func (r *rollingCounter) Value() int64 { - return int64(r.Sum()) -} - -func (r *rollingCounter) Timespan() int { - r.policy.mu.RLock() - defer r.policy.mu.RUnlock() - return r.policy.timespan() -} diff --git a/pkg/stat/metric/rolling_counter_test.go b/pkg/stat/metric/rolling_counter_test.go deleted file mode 100644 index 82caa52e1..000000000 --- a/pkg/stat/metric/rolling_counter_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package metric - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestRollingCounterAdd(t *testing.T) { - size := 3 - bucketDuration := time.Second - opts := RollingCounterOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingCounter(opts) - listBuckets := func() [][]float64 { - buckets := make([][]float64, 0) - r.Reduce(func(i Iterator) float64 { - for i.Next() { - bucket := i.Bucket() - buckets = append(buckets, bucket.Points) - } - return 0.0 - }) - return buckets - } - assert.Equal(t, [][]float64{{}, {}, {}}, listBuckets()) - r.Add(1) - assert.Equal(t, [][]float64{{}, {}, {1}}, listBuckets()) - time.Sleep(time.Second) - r.Add(2) - r.Add(3) - assert.Equal(t, [][]float64{{}, {1}, {5}}, listBuckets()) - time.Sleep(time.Second) - r.Add(4) - r.Add(5) - r.Add(6) - assert.Equal(t, [][]float64{{1}, {5}, {15}}, listBuckets()) - time.Sleep(time.Second) - r.Add(7) - assert.Equal(t, [][]float64{{5}, {15}, {7}}, listBuckets()) -} - -func TestRollingCounterReduce(t *testing.T) { - size := 3 - bucketDuration := time.Second - opts := RollingCounterOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingCounter(opts) - for x := 0; x < size; x = x + 1 { - for i := 0; i <= x; i++ { - r.Add(1) - } - if x < size-1 { - time.Sleep(bucketDuration) - } - } - var result = r.Reduce(func(iterator Iterator) float64 { - var result float64 - for iterator.Next() { - bucket := iterator.Bucket() - result += bucket.Points[0] - } - return result - }) - if result != 6.0 { - t.Fatalf("Validate sum of points. result: %f", result) - } -} - -func TestRollingCounterDataRace(t *testing.T) { - size := 3 - bucketDuration := time.Millisecond * 10 - opts := RollingCounterOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingCounter(opts) - var stop = make(chan bool) - go func() { - for { - select { - case <-stop: - return - default: - r.Add(1) - time.Sleep(time.Millisecond * 5) - } - } - }() - go func() { - for { - select { - case <-stop: - return - default: - _ = r.Reduce(func(i Iterator) float64 { - for i.Next() { - bucket := i.Bucket() - for range bucket.Points { - continue - } - } - return 0 - }) - } - } - }() - time.Sleep(time.Second * 3) - close(stop) -} - -func BenchmarkRollingCounterIncr(b *testing.B) { - size := 3 - bucketDuration := time.Millisecond * 100 - opts := RollingCounterOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingCounter(opts) - b.ResetTimer() - for i := 0; i <= b.N; i++ { - r.Add(1) - } -} - -func BenchmarkRollingCounterReduce(b *testing.B) { - size := 3 - bucketDuration := time.Second - opts := RollingCounterOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingCounter(opts) - for i := 0; i <= 10; i++ { - r.Add(1) - time.Sleep(time.Millisecond * 500) - } - b.ResetTimer() - for i := 0; i <= b.N; i++ { - var _ = r.Reduce(func(i Iterator) float64 { - var result float64 - for i.Next() { - bucket := i.Bucket() - if len(bucket.Points) != 0 { - result += bucket.Points[0] - } - } - return result - }) - } -} diff --git a/pkg/stat/metric/rolling_gauge.go b/pkg/stat/metric/rolling_gauge.go deleted file mode 100644 index c065a0228..000000000 --- a/pkg/stat/metric/rolling_gauge.go +++ /dev/null @@ -1,62 +0,0 @@ -package metric - -import "time" - -var _ Metric = &rollingGauge{} -var _ Aggregation = &rollingGauge{} - -// RollingGauge represents a ring window based on time duration. -// e.g. [[1, 2], [1, 2, 3], [1,2, 3, 4]] -type RollingGauge interface { - Metric - Aggregation - // Reduce applies the reduction function to all buckets within the window. - Reduce(func(Iterator) float64) float64 -} - -// RollingGaugeOpts contains the arguments for creating RollingGauge. -type RollingGaugeOpts struct { - Size int - BucketDuration time.Duration -} - -type rollingGauge struct { - policy *RollingPolicy -} - -// NewRollingGauge creates a new RollingGauge baseed on RollingGaugeOpts. -func NewRollingGauge(opts RollingGaugeOpts) RollingGauge { - window := NewWindow(WindowOpts{Size: opts.Size}) - policy := NewRollingPolicy(window, RollingPolicyOpts{BucketDuration: opts.BucketDuration}) - return &rollingGauge{ - policy: policy, - } -} - -func (r *rollingGauge) Add(val int64) { - r.policy.Append(float64(val)) -} - -func (r *rollingGauge) Reduce(f func(Iterator) float64) float64 { - return r.policy.Reduce(f) -} - -func (r *rollingGauge) Avg() float64 { - return r.policy.Reduce(Avg) -} - -func (r *rollingGauge) Min() float64 { - return r.policy.Reduce(Min) -} - -func (r *rollingGauge) Max() float64 { - return r.policy.Reduce(Max) -} - -func (r *rollingGauge) Sum() float64 { - return r.policy.Reduce(Sum) -} - -func (r *rollingGauge) Value() int64 { - return int64(r.Sum()) -} diff --git a/pkg/stat/metric/rolling_gauge_test.go b/pkg/stat/metric/rolling_gauge_test.go deleted file mode 100644 index 825ea1551..000000000 --- a/pkg/stat/metric/rolling_gauge_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package metric - -import ( - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestRollingGaugeAdd(t *testing.T) { - size := 3 - bucketDuration := time.Second - opts := RollingGaugeOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingGauge(opts) - listBuckets := func() [][]float64 { - buckets := make([][]float64, 0) - r.Reduce(func(i Iterator) float64 { - for i.Next() { - bucket := i.Bucket() - buckets = append(buckets, bucket.Points) - } - return 0.0 - }) - return buckets - } - assert.Equal(t, [][]float64{{}, {}, {}}, listBuckets()) - r.Add(1) - assert.Equal(t, [][]float64{{}, {}, {1}}, listBuckets()) - time.Sleep(time.Second) - r.Add(2) - r.Add(3) - assert.Equal(t, [][]float64{{}, {1}, {2, 3}}, listBuckets()) - time.Sleep(time.Second) - r.Add(4) - r.Add(5) - r.Add(6) - assert.Equal(t, [][]float64{{1}, {2, 3}, {4, 5, 6}}, listBuckets()) - time.Sleep(time.Second) - r.Add(7) - assert.Equal(t, [][]float64{{2, 3}, {4, 5, 6}, {7}}, listBuckets()) -} - -func TestRollingGaugeReset(t *testing.T) { - size := 3 - bucketDuration := time.Second - opts := RollingGaugeOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingGauge(opts) - listBuckets := func() [][]float64 { - buckets := make([][]float64, 0) - r.Reduce(func(i Iterator) float64 { - for i.Next() { - bucket := i.Bucket() - buckets = append(buckets, bucket.Points) - } - return 0.0 - }) - return buckets - } - r.Add(1) - time.Sleep(time.Second) - assert.Equal(t, [][]float64{{}, {1}}, listBuckets()) - time.Sleep(time.Second) - assert.Equal(t, [][]float64{{1}}, listBuckets()) - time.Sleep(time.Second) - assert.Equal(t, [][]float64{}, listBuckets()) - - // cross window - r.Add(1) - time.Sleep(time.Second * 5) - assert.Equal(t, [][]float64{}, listBuckets()) -} - -func TestRollingGaugeReduce(t *testing.T) { - size := 3 - bucketDuration := time.Second - opts := RollingGaugeOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingGauge(opts) - for x := 0; x < size; x = x + 1 { - for i := 0; i <= x; i++ { - r.Add(int64(i)) - } - if x < size-1 { - time.Sleep(bucketDuration) - } - } - var result = r.Reduce(func(i Iterator) float64 { - var result float64 - for i.Next() { - bucket := i.Bucket() - for _, point := range bucket.Points { - result += point - } - } - return result - }) - if result != 4.0 { - t.Fatalf("Validate sum of points. result: %f", result) - } -} - -func TestRollingGaugeDataRace(t *testing.T) { - size := 3 - bucketDuration := time.Second - opts := RollingGaugeOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingGauge(opts) - var stop = make(chan bool) - go func() { - for { - select { - case <-stop: - return - default: - r.Add(rand.Int63()) - time.Sleep(time.Millisecond * 5) - } - } - }() - go func() { - for { - select { - case <-stop: - return - default: - _ = r.Reduce(func(i Iterator) float64 { - for i.Next() { - bucket := i.Bucket() - for range bucket.Points { - continue - } - } - return 0 - }) - } - } - }() - time.Sleep(time.Second * 3) - close(stop) -} - -func BenchmarkRollingGaugeIncr(b *testing.B) { - size := 10 - bucketDuration := time.Second - opts := RollingGaugeOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingGauge(opts) - b.ResetTimer() - for i := 0; i <= b.N; i++ { - r.Add(1.0) - } -} - -func BenchmarkRollingGaugeReduce(b *testing.B) { - size := 10 - bucketDuration := time.Second - opts := RollingGaugeOpts{ - Size: size, - BucketDuration: bucketDuration, - } - r := NewRollingGauge(opts) - for i := 0; i <= 10; i++ { - r.Add(1.0) - time.Sleep(time.Millisecond * 500) - } - b.ResetTimer() - for i := 0; i <= b.N; i++ { - var _ = r.Reduce(func(i Iterator) float64 { - var result float64 - for i.Next() { - bucket := i.Bucket() - if len(bucket.Points) != 0 { - result += bucket.Points[0] - } - } - return result - }) - } -} diff --git a/pkg/stat/metric/rolling_policy.go b/pkg/stat/metric/rolling_policy.go deleted file mode 100644 index 13e5516b2..000000000 --- a/pkg/stat/metric/rolling_policy.go +++ /dev/null @@ -1,100 +0,0 @@ -package metric - -import ( - "sync" - "time" -) - -// RollingPolicy is a policy for ring window based on time duration. -// RollingPolicy moves bucket offset with time duration. -// e.g. If the last point is appended one bucket duration ago, -// RollingPolicy will increment current offset. -type RollingPolicy struct { - mu sync.RWMutex - size int - window *Window - offset int - - bucketDuration time.Duration - lastAppendTime time.Time -} - -// RollingPolicyOpts contains the arguments for creating RollingPolicy. -type RollingPolicyOpts struct { - BucketDuration time.Duration -} - -// NewRollingPolicy creates a new RollingPolicy based on the given window and RollingPolicyOpts. -func NewRollingPolicy(window *Window, opts RollingPolicyOpts) *RollingPolicy { - return &RollingPolicy{ - window: window, - size: window.Size(), - offset: 0, - - bucketDuration: opts.BucketDuration, - lastAppendTime: time.Now(), - } -} - -func (r *RollingPolicy) timespan() int { - v := int(time.Since(r.lastAppendTime) / r.bucketDuration) - if v > -1 { // maybe time backwards - return v - } - return r.size -} - -func (r *RollingPolicy) add(f func(offset int, val float64), val float64) { - r.mu.Lock() - timespan := r.timespan() - if timespan > 0 { - r.lastAppendTime = r.lastAppendTime.Add(time.Duration(timespan * int(r.bucketDuration))) - offset := r.offset - // reset the expired buckets - s := offset + 1 - if timespan > r.size { - timespan = r.size - } - e, e1 := s+timespan, 0 // e: reset offset must start from offset+1 - if e > r.size { - e1 = e - r.size - e = r.size - } - for i := s; i < e; i++ { - r.window.ResetBucket(i) - offset = i - } - for i := 0; i < e1; i++ { - r.window.ResetBucket(i) - offset = i - } - r.offset = offset - } - f(r.offset, val) - r.mu.Unlock() -} - -// Append appends the given points to the window. -func (r *RollingPolicy) Append(val float64) { - r.add(r.window.Append, val) -} - -// Add adds the given value to the latest point within bucket. -func (r *RollingPolicy) Add(val float64) { - r.add(r.window.Add, val) -} - -// Reduce applies the reduction function to all buckets within the window. -func (r *RollingPolicy) Reduce(f func(Iterator) float64) (val float64) { - r.mu.RLock() - timespan := r.timespan() - if count := r.size - timespan; count > 0 { - offset := r.offset + timespan + 1 - if offset >= r.size { - offset = offset - r.size - } - val = f(r.window.Iterator(offset, count)) - } - r.mu.RUnlock() - return val -} diff --git a/pkg/stat/metric/rolling_policy_test.go b/pkg/stat/metric/rolling_policy_test.go deleted file mode 100644 index 2792605f6..000000000 --- a/pkg/stat/metric/rolling_policy_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package metric - -import ( - "fmt" - "math/rand" - "testing" - "time" -) - -func GetRollingPolicy() *RollingPolicy { - w := NewWindow(WindowOpts{Size: 10}) - return NewRollingPolicy(w, RollingPolicyOpts{BucketDuration: 300 * time.Millisecond}) -} - -func Handler(t *testing.T, table []map[string][]int) { - for _, hm := range table { - var totalTs, lastOffset int - offsetAndPoints := hm["offsetAndPoints"] - timeSleep := hm["timeSleep"] - policy := GetRollingPolicy() - for i, n := range timeSleep { - totalTs += n - time.Sleep(time.Duration(n) * time.Millisecond) - policy.Add(1) - offset, points := offsetAndPoints[2*i], offsetAndPoints[2*i+1] - - if int(policy.window.window[offset].Points[0]) != points { - t.Errorf("error, time since last append: %vms, last offset: %v", totalTs, lastOffset) - } - lastOffset = offset - } - } -} - -func TestRollingPolicy_Add(t *testing.T) { - rand.Seed(time.Now().Unix()) - - // test add after 400ms and 601ms relative to the policy created time - policy := GetRollingPolicy() - time.Sleep(400 * time.Millisecond) - policy.Add(1) - time.Sleep(201 * time.Millisecond) - policy.Add(1) - for _, b := range policy.window.window { - fmt.Println(b.Points) - } - if int(policy.window.window[1].Points[0]) != 1 { - t.Errorf("error, time since last append: %vms, last offset: %v", 300, 0) - } - if int(policy.window.window[2].Points[0]) != 1 { - t.Errorf("error, time since last append: %vms, last offset: %v", 301, 0) - } - - // test func timespan return real span - table := []map[string][]int{ - { - "timeSleep": []int{294, 3200}, - "offsetAndPoints": []int{0, 1, 0, 1}, - }, - { - "timeSleep": []int{305, 3200, 6400}, - "offsetAndPoints": []int{1, 1, 1, 1, 1, 1}, - }, - } - - Handler(t, table) -} diff --git a/pkg/stat/metric/window.go b/pkg/stat/metric/window.go deleted file mode 100644 index e8a0d9844..000000000 --- a/pkg/stat/metric/window.go +++ /dev/null @@ -1,107 +0,0 @@ -package metric - -// Bucket contains multiple float64 points. -type Bucket struct { - Points []float64 - Count int64 - next *Bucket -} - -// Append appends the given value to the bucket. -func (b *Bucket) Append(val float64) { - b.Points = append(b.Points, val) - b.Count++ -} - -// Add adds the given value to the point. -func (b *Bucket) Add(offset int, val float64) { - b.Points[offset] += val - b.Count++ -} - -// Reset empties the bucket. -func (b *Bucket) Reset() { - b.Points = b.Points[:0] - b.Count = 0 -} - -// Next returns the next bucket. -func (b *Bucket) Next() *Bucket { - return b.next -} - -// Window contains multiple buckets. -type Window struct { - window []Bucket - size int -} - -// WindowOpts contains the arguments for creating Window. -type WindowOpts struct { - Size int -} - -// NewWindow creates a new Window based on WindowOpts. -func NewWindow(opts WindowOpts) *Window { - buckets := make([]Bucket, opts.Size) - for offset := range buckets { - buckets[offset] = Bucket{Points: make([]float64, 0)} - nextOffset := offset + 1 - if nextOffset == opts.Size { - nextOffset = 0 - } - buckets[offset].next = &buckets[nextOffset] - } - return &Window{window: buckets, size: opts.Size} -} - -// ResetWindow empties all buckets within the window. -func (w *Window) ResetWindow() { - for offset := range w.window { - w.ResetBucket(offset) - } -} - -// ResetBucket empties the bucket based on the given offset. -func (w *Window) ResetBucket(offset int) { - w.window[offset].Reset() -} - -// ResetBuckets empties the buckets based on the given offsets. -func (w *Window) ResetBuckets(offsets []int) { - for _, offset := range offsets { - w.ResetBucket(offset) - } -} - -// Append appends the given value to the bucket where index equals the given offset. -func (w *Window) Append(offset int, val float64) { - w.window[offset].Append(val) -} - -// Add adds the given value to the latest point within bucket where index equals the given offset. -func (w *Window) Add(offset int, val float64) { - if w.window[offset].Count == 0 { - w.window[offset].Append(val) - return - } - w.window[offset].Add(0, val) -} - -// Bucket returns the bucket where index equals the given offset. -func (w *Window) Bucket(offset int) Bucket { - return w.window[offset] -} - -// Size returns the size of the window. -func (w *Window) Size() int { - return w.size -} - -// Iterator returns the bucket iterator. -func (w *Window) Iterator(offset int, count int) Iterator { - return Iterator{ - count: count, - cur: &w.window[offset], - } -} diff --git a/pkg/stat/metric/window_test.go b/pkg/stat/metric/window_test.go deleted file mode 100644 index e49699a12..000000000 --- a/pkg/stat/metric/window_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package metric - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestWindowResetWindow(t *testing.T) { - opts := WindowOpts{Size: 3} - window := NewWindow(opts) - for i := 0; i < opts.Size; i++ { - window.Append(i, 1.0) - } - window.ResetWindow() - for i := 0; i < opts.Size; i++ { - assert.Equal(t, len(window.Bucket(i).Points), 0) - } -} - -func TestWindowResetBucket(t *testing.T) { - opts := WindowOpts{Size: 3} - window := NewWindow(opts) - for i := 0; i < opts.Size; i++ { - window.Append(i, 1.0) - } - window.ResetBucket(1) - assert.Equal(t, len(window.Bucket(1).Points), 0) - assert.Equal(t, window.Bucket(0).Points[0], float64(1.0)) - assert.Equal(t, window.Bucket(2).Points[0], float64(1.0)) -} - -func TestWindowResetBuckets(t *testing.T) { - opts := WindowOpts{Size: 3} - window := NewWindow(opts) - for i := 0; i < opts.Size; i++ { - window.Append(i, 1.0) - } - window.ResetBuckets([]int{0, 1, 2}) - for i := 0; i < opts.Size; i++ { - assert.Equal(t, len(window.Bucket(i).Points), 0) - } -} - -func TestWindowAppend(t *testing.T) { - opts := WindowOpts{Size: 3} - window := NewWindow(opts) - for i := 0; i < opts.Size; i++ { - window.Append(i, 1.0) - } - for i := 0; i < opts.Size; i++ { - assert.Equal(t, window.Bucket(i).Points[0], float64(1.0)) - } -} - -func TestWindowAdd(t *testing.T) { - opts := WindowOpts{Size: 3} - window := NewWindow(opts) - window.Append(0, 1.0) - window.Add(0, 1.0) - assert.Equal(t, window.Bucket(0).Points[0], float64(2.0)) -} - -func TestWindowSize(t *testing.T) { - opts := WindowOpts{Size: 3} - window := NewWindow(opts) - assert.Equal(t, window.Size(), 3) -} diff --git a/pkg/stat/sys/cpu/README.md b/pkg/stat/sys/cpu/README.md deleted file mode 100644 index 1028e9e03..000000000 --- a/pkg/stat/sys/cpu/README.md +++ /dev/null @@ -1,7 +0,0 @@ -## stat/sys - -System Information - -## 项目简介 - -获取Linux平台下的系统信息,包括cpu主频、cpu使用率等 diff --git a/pkg/stat/sys/cpu/cgroup.go b/pkg/stat/sys/cpu/cgroup.go deleted file mode 100644 index b297f611f..000000000 --- a/pkg/stat/sys/cpu/cgroup.go +++ /dev/null @@ -1,126 +0,0 @@ -package cpu - -import ( - "bufio" - "fmt" - "io" - "os" - "path" - "strconv" - "strings" -) - -const cgroupRootDir = "/sys/fs/cgroup" - -// cgroup Linux cgroup -type cgroup struct { - cgroupSet map[string]string -} - -// CPUCFSQuotaUs cpu.cfs_quota_us -func (c *cgroup) CPUCFSQuotaUs() (int64, error) { - data, err := readFile(path.Join(c.cgroupSet["cpu"], "cpu.cfs_quota_us")) - if err != nil { - return 0, err - } - return strconv.ParseInt(data, 10, 64) -} - -// CPUCFSPeriodUs cpu.cfs_period_us -func (c *cgroup) CPUCFSPeriodUs() (uint64, error) { - data, err := readFile(path.Join(c.cgroupSet["cpu"], "cpu.cfs_period_us")) - if err != nil { - return 0, err - } - return parseUint(data) -} - -// CPUAcctUsage cpuacct.usage -func (c *cgroup) CPUAcctUsage() (uint64, error) { - data, err := readFile(path.Join(c.cgroupSet["cpuacct"], "cpuacct.usage")) - if err != nil { - return 0, err - } - return parseUint(data) -} - -// CPUAcctUsagePerCPU cpuacct.usage_percpu -func (c *cgroup) CPUAcctUsagePerCPU() ([]uint64, error) { - data, err := readFile(path.Join(c.cgroupSet["cpuacct"], "cpuacct.usage_percpu")) - if err != nil { - return nil, err - } - var usage []uint64 - for _, v := range strings.Fields(string(data)) { - var u uint64 - if u, err = parseUint(v); err != nil { - return nil, err - } - // fix possible_cpu:https://www.ibm.com/support/knowledgecenter/en/linuxonibm/com.ibm.linux.z.lgdd/lgdd_r_posscpusparm.html - if u != 0 { - usage = append(usage, u) - } - } - return usage, nil -} - -// CPUSetCPUs cpuset.cpus -func (c *cgroup) CPUSetCPUs() ([]uint64, error) { - data, err := readFile(path.Join(c.cgroupSet["cpuset"], "cpuset.cpus")) - if err != nil { - return nil, err - } - cpus, err := ParseUintList(data) - if err != nil { - return nil, err - } - var sets []uint64 - for k := range cpus { - sets = append(sets, uint64(k)) - } - return sets, nil -} - -// CurrentcGroup get current process cgroup -func currentcGroup() (*cgroup, error) { - pid := os.Getpid() - cgroupFile := fmt.Sprintf("/proc/%d/cgroup", pid) - cgroupSet := make(map[string]string) - fp, err := os.Open(cgroupFile) - if err != nil { - return nil, err - } - defer fp.Close() - buf := bufio.NewReader(fp) - for { - line, err := buf.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return nil, err - } - col := strings.Split(strings.TrimSpace(line), ":") - if len(col) != 3 { - return nil, fmt.Errorf("invalid cgroup format %s", line) - } - dir := col[2] - // When dir is not equal to /, it must be in docker - if dir != "/" { - cgroupSet[col[1]] = path.Join(cgroupRootDir, col[1]) - if strings.Contains(col[1], ",") { - for _, k := range strings.Split(col[1], ",") { - cgroupSet[k] = path.Join(cgroupRootDir, k) - } - } - } else { - cgroupSet[col[1]] = path.Join(cgroupRootDir, col[1], col[2]) - if strings.Contains(col[1], ",") { - for _, k := range strings.Split(col[1], ",") { - cgroupSet[k] = path.Join(cgroupRootDir, k, col[2]) - } - } - } - } - return &cgroup{cgroupSet: cgroupSet}, nil -} diff --git a/pkg/stat/sys/cpu/cgroupCPU.go b/pkg/stat/sys/cpu/cgroupCPU.go deleted file mode 100644 index db1372ae9..000000000 --- a/pkg/stat/sys/cpu/cgroupCPU.go +++ /dev/null @@ -1,250 +0,0 @@ -package cpu - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" - - "github.com/pkg/errors" - pscpu "github.com/shirou/gopsutil/cpu" -) - -type cgroupCPU struct { - frequency uint64 - quota float64 - cores uint64 - - preSystem uint64 - preTotal uint64 - usage uint64 -} - -func newCgroupCPU() (cpu *cgroupCPU, err error) { - var cores int - cores, err = pscpu.Counts(true) - if err != nil || cores == 0 { - var cpus []uint64 - cpus, err = perCPUUsage() - if err != nil { - err = errors.Errorf("perCPUUsage() failed!err:=%v", err) - return - } - cores = len(cpus) - } - - sets, err := cpuSets() - if err != nil { - err = errors.Errorf("cpuSets() failed!err:=%v", err) - return - } - quota := float64(len(sets)) - cq, err := cpuQuota() - if err == nil && cq != -1 { - var period uint64 - if period, err = cpuPeriod(); err != nil { - err = errors.Errorf("cpuPeriod() failed!err:=%v", err) - return - } - limit := float64(cq) / float64(period) - if limit < quota { - quota = limit - } - } - maxFreq := cpuMaxFreq() - - preSystem, err := systemCPUUsage() - if err != nil { - err = errors.Errorf("systemCPUUsage() failed!err:=%v", err) - return - } - preTotal, err := totalCPUUsage() - if err != nil { - err = errors.Errorf("totalCPUUsage() failed!err:=%v", err) - return - } - cpu = &cgroupCPU{ - frequency: maxFreq, - quota: quota, - cores: uint64(cores), - preSystem: preSystem, - preTotal: preTotal, - } - return -} - -func (cpu *cgroupCPU) Usage() (u uint64, err error) { - var ( - total uint64 - system uint64 - ) - total, err = totalCPUUsage() - if err != nil { - return - } - system, err = systemCPUUsage() - if err != nil { - return - } - if system != cpu.preSystem { - u = uint64(float64((total-cpu.preTotal)*cpu.cores*1e3) / (float64(system-cpu.preSystem) * cpu.quota)) - } - cpu.preSystem = system - cpu.preTotal = total - return -} - -func (cpu *cgroupCPU) Info() Info { - return Info{ - Frequency: cpu.frequency, - Quota: cpu.quota, - } -} - -const nanoSecondsPerSecond = 1e9 - -// ErrNoCFSLimit is no quota limit -var ErrNoCFSLimit = errors.Errorf("no quota limit") - -var clockTicksPerSecond = uint64(getClockTicks()) - -// systemCPUUsage returns the host system's cpu usage in -// nanoseconds. An error is returned if the format of the underlying -// file does not match. -// -// Uses /proc/stat defined by POSIX. Looks for the cpu -// statistics line and then sums up the first seven fields -// provided. See man 5 proc for details on specific field -// information. -func systemCPUUsage() (usage uint64, err error) { - var ( - line string - f *os.File - ) - if f, err = os.Open("/proc/stat"); err != nil { - return - } - bufReader := bufio.NewReaderSize(nil, 128) - defer func() { - bufReader.Reset(nil) - f.Close() - }() - bufReader.Reset(f) - for err == nil { - if line, err = bufReader.ReadString('\n'); err != nil { - err = errors.WithStack(err) - return - } - parts := strings.Fields(line) - switch parts[0] { - case "cpu": - if len(parts) < 8 { - err = errors.WithStack(fmt.Errorf("bad format of cpu stats")) - return - } - var totalClockTicks uint64 - for _, i := range parts[1:8] { - var v uint64 - if v, err = strconv.ParseUint(i, 10, 64); err != nil { - err = errors.WithStack(fmt.Errorf("error parsing cpu stats")) - return - } - totalClockTicks += v - } - usage = (totalClockTicks * nanoSecondsPerSecond) / clockTicksPerSecond - return - } - } - err = errors.Errorf("bad stats format") - return -} - -func totalCPUUsage() (usage uint64, err error) { - var cg *cgroup - if cg, err = currentcGroup(); err != nil { - return - } - return cg.CPUAcctUsage() -} - -func perCPUUsage() (usage []uint64, err error) { - var cg *cgroup - if cg, err = currentcGroup(); err != nil { - return - } - return cg.CPUAcctUsagePerCPU() -} - -func cpuSets() (sets []uint64, err error) { - var cg *cgroup - if cg, err = currentcGroup(); err != nil { - return - } - return cg.CPUSetCPUs() -} - -func cpuQuota() (quota int64, err error) { - var cg *cgroup - if cg, err = currentcGroup(); err != nil { - return - } - return cg.CPUCFSQuotaUs() -} - -func cpuPeriod() (peroid uint64, err error) { - var cg *cgroup - if cg, err = currentcGroup(); err != nil { - return - } - return cg.CPUCFSPeriodUs() -} - -func cpuFreq() uint64 { - lines, err := readLines("/proc/cpuinfo") - if err != nil { - return 0 - } - for _, line := range lines { - fields := strings.Split(line, ":") - if len(fields) < 2 { - continue - } - key := strings.TrimSpace(fields[0]) - value := strings.TrimSpace(fields[1]) - if key == "cpu MHz" || key == "clock" { - // treat this as the fallback value, thus we ignore error - if t, err := strconv.ParseFloat(strings.Replace(value, "MHz", "", 1), 64); err == nil { - return uint64(t * 1000.0 * 1000.0) - } - } - } - return 0 -} - -func cpuMaxFreq() uint64 { - feq := cpuFreq() - data, err := readFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq") - if err != nil { - return feq - } - // override the max freq from /proc/cpuinfo - cfeq, err := parseUint(data) - if err == nil { - feq = cfeq - } - return feq -} - -//GetClockTicks get the OS's ticks per second -func getClockTicks() int { - // TODO figure out a better alternative for platforms where we're missing cgo - // - // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency(). - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx - // - // An example of its usage can be found here. - // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx - - return 100 -} diff --git a/pkg/stat/sys/cpu/cgroup_test.go b/pkg/stat/sys/cpu/cgroup_test.go deleted file mode 100644 index 9fbb1d151..000000000 --- a/pkg/stat/sys/cpu/cgroup_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux - -package cpu - -import ( - "testing" -) - -func TestCGroup(t *testing.T) { - // TODO -} diff --git a/pkg/stat/sys/cpu/cpu.go b/pkg/stat/sys/cpu/cpu.go deleted file mode 100644 index 92d8a5c78..000000000 --- a/pkg/stat/sys/cpu/cpu.go +++ /dev/null @@ -1,68 +0,0 @@ -package cpu - -import ( - "fmt" - "sync/atomic" - "time" -) - -const ( - interval time.Duration = time.Millisecond * 500 -) - -var ( - stats CPU - usage uint64 -) - -// CPU is cpu stat usage. -type CPU interface { - Usage() (u uint64, e error) - Info() Info -} - -func init() { - var ( - err error - ) - stats, err = newCgroupCPU() - if err != nil { - // fmt.Printf("cgroup cpu init failed(%v),switch to psutil cpu\n", err) - stats, err = newPsutilCPU(interval) - if err != nil { - panic(fmt.Sprintf("cgroup cpu init failed!err:=%v", err)) - } - } - go func() { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - <-ticker.C - u, err := stats.Usage() - if err == nil && u != 0 { - atomic.StoreUint64(&usage, u) - } - } - }() -} - -// Stat cpu stat. -type Stat struct { - Usage uint64 // cpu use ratio. -} - -// Info cpu info. -type Info struct { - Frequency uint64 - Quota float64 -} - -// ReadStat read cpu stat. -func ReadStat(stat *Stat) { - stat.Usage = atomic.LoadUint64(&usage) -} - -// GetInfo get cpu info. -func GetInfo() Info { - return stats.Info() -} diff --git a/pkg/stat/sys/cpu/cpu_test.go b/pkg/stat/sys/cpu/cpu_test.go deleted file mode 100644 index 336abe8ff..000000000 --- a/pkg/stat/sys/cpu/cpu_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package cpu - -import ( - "fmt" - "testing" - "time" -) - -func Test_CPUUsage(t *testing.T) { - var stat Stat - ReadStat(&stat) - fmt.Println(stat) - time.Sleep(time.Millisecond * 1000) - for i := 0; i < 6; i++ { - time.Sleep(time.Millisecond * 500) - ReadStat(&stat) - if stat.Usage == 0 { - t.Fatalf("get cpu failed!cpu usage is zero!") - } - fmt.Println(stat) - } -} diff --git a/pkg/stat/sys/cpu/psutilCPU.go b/pkg/stat/sys/cpu/psutilCPU.go deleted file mode 100644 index 8d64f8dcc..000000000 --- a/pkg/stat/sys/cpu/psutilCPU.go +++ /dev/null @@ -1,45 +0,0 @@ -package cpu - -import ( - "time" - - "github.com/shirou/gopsutil/cpu" -) - -type psutilCPU struct { - interval time.Duration -} - -func newPsutilCPU(interval time.Duration) (cpu *psutilCPU, err error) { - cpu = &psutilCPU{interval: interval} - _, err = cpu.Usage() - if err != nil { - return - } - return -} - -func (ps *psutilCPU) Usage() (u uint64, err error) { - var percents []float64 - percents, err = cpu.Percent(ps.interval, false) - if err == nil { - u = uint64(percents[0] * 10) - } - return -} - -func (ps *psutilCPU) Info() (info Info) { - stats, err := cpu.Info() - if err != nil { - return - } - cores, err := cpu.Counts(true) - if err != nil { - return - } - info = Info{ - Frequency: uint64(stats[0].Mhz), - Quota: float64(cores), - } - return -} diff --git a/pkg/stat/sys/cpu/stat_test.go b/pkg/stat/sys/cpu/stat_test.go deleted file mode 100644 index ed2783043..000000000 --- a/pkg/stat/sys/cpu/stat_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package cpu - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestStat(t *testing.T) { - time.Sleep(time.Second * 2) - var s Stat - var i Info - ReadStat(&s) - i = GetInfo() - - assert.NotZero(t, s.Usage) - assert.NotZero(t, i.Frequency) - assert.NotZero(t, i.Quota) -} diff --git a/pkg/stat/sys/cpu/util.go b/pkg/stat/sys/cpu/util.go deleted file mode 100644 index 25df1f9d1..000000000 --- a/pkg/stat/sys/cpu/util.go +++ /dev/null @@ -1,121 +0,0 @@ -package cpu - -import ( - "bufio" - "io/ioutil" - "os" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -func readFile(path string) (string, error) { - contents, err := ioutil.ReadFile(path) - if err != nil { - return "", errors.Wrapf(err, "os/stat: read file(%s) failed!", path) - } - return strings.TrimSpace(string(contents)), nil -} - -func parseUint(s string) (uint64, error) { - v, err := strconv.ParseUint(s, 10, 64) - if err != nil { - intValue, intErr := strconv.ParseInt(s, 10, 64) - // 1. Handle negative values greater than MinInt64 (and) - // 2. Handle negative values lesser than MinInt64 - if intErr == nil && intValue < 0 { - return 0, nil - } else if intErr != nil && - intErr.(*strconv.NumError).Err == strconv.ErrRange && - intValue < 0 { - return 0, nil - } - return 0, errors.Wrapf(err, "os/stat: parseUint(%s) failed!", s) - } - return v, nil -} - -// ParseUintList parses and validates the specified string as the value -// found in some cgroup file (e.g. cpuset.cpus, cpuset.mems), which could be -// one of the formats below. Note that duplicates are actually allowed in the -// input string. It returns a map[int]bool with available elements from val -// set to true. -// Supported formats: -// 7 -// 1-6 -// 0,3-4,7,8-10 -// 0-0,0,1-7 -// 03,1-3 <- this is gonna get parsed as [1,2,3] -// 3,2,1 -// 0-2,3,1 -func ParseUintList(val string) (map[int]bool, error) { - if val == "" { - return map[int]bool{}, nil - } - - availableInts := make(map[int]bool) - split := strings.Split(val, ",") - errInvalidFormat := errors.Errorf("os/stat: invalid format: %s", val) - for _, r := range split { - if !strings.Contains(r, "-") { - v, err := strconv.Atoi(r) - if err != nil { - return nil, errInvalidFormat - } - availableInts[v] = true - } else { - split := strings.SplitN(r, "-", 2) - min, err := strconv.Atoi(split[0]) - if err != nil { - return nil, errInvalidFormat - } - max, err := strconv.Atoi(split[1]) - if err != nil { - return nil, errInvalidFormat - } - if max < min { - return nil, errInvalidFormat - } - for i := min; i <= max; i++ { - availableInts[i] = true - } - } - } - return availableInts, nil -} - -// ReadLines reads contents from a file and splits them by new lines. -// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). -func readLines(filename string) ([]string, error) { - return readLinesOffsetN(filename, 0, -1) -} - -// ReadLinesOffsetN reads contents from file and splits them by new line. -// The offset tells at which line number to start. -// The count determines the number of lines to read (starting from offset): -// n >= 0: at most n lines -// n < 0: whole file -func readLinesOffsetN(filename string, offset uint, n int) ([]string, error) { - f, err := os.Open(filename) - if err != nil { - return []string{""}, err - } - defer f.Close() - - var ret []string - - r := bufio.NewReader(f) - for i := 0; i < n+int(offset) || n < 0; i++ { - line, err := r.ReadString('\n') - if err != nil { - break - } - if i < int(offset) { - continue - } - ret = append(ret, strings.Trim(line, "\n")) - } - - return ret, nil -} diff --git a/pkg/str/str.go b/pkg/str/str.go deleted file mode 100644 index 8edc95942..000000000 --- a/pkg/str/str.go +++ /dev/null @@ -1,55 +0,0 @@ -package str - -import ( - "bytes" - "strconv" - "strings" - "sync" -) - -var ( - bfPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer([]byte{}) - }, - } -) - -// JoinInts format int64 slice like:n1,n2,n3. -func JoinInts(is []int64) string { - if len(is) == 0 { - return "" - } - if len(is) == 1 { - return strconv.FormatInt(is[0], 10) - } - buf := bfPool.Get().(*bytes.Buffer) - for _, i := range is { - buf.WriteString(strconv.FormatInt(i, 10)) - buf.WriteByte(',') - } - if buf.Len() > 0 { - buf.Truncate(buf.Len() - 1) - } - s := buf.String() - buf.Reset() - bfPool.Put(buf) - return s -} - -// SplitInts split string into int64 slice. -func SplitInts(s string) ([]int64, error) { - if s == "" { - return nil, nil - } - sArr := strings.Split(s, ",") - res := make([]int64, 0, len(sArr)) - for _, sc := range sArr { - i, err := strconv.ParseInt(sc, 10, 64) - if err != nil { - return nil, err - } - res = append(res, i) - } - return res, nil -} diff --git a/pkg/str/str_test.go b/pkg/str/str_test.go deleted file mode 100644 index bd009370c..000000000 --- a/pkg/str/str_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package str - -import ( - "testing" -) - -func TestJoinInts(t *testing.T) { - // test empty slice - is := []int64{} - s := JoinInts(is) - if s != "" { - t.Errorf("input:%v,output:%s,result is incorrect", is, s) - } else { - t.Logf("input:%v,output:%s", is, s) - } - // test len(slice)==1 - is = []int64{1} - s = JoinInts(is) - if s != "1" { - t.Errorf("input:%v,output:%s,result is incorrect", is, s) - } else { - t.Logf("input:%v,output:%s", is, s) - } - // test len(slice)>1 - is = []int64{1, 2, 3} - s = JoinInts(is) - if s != "1,2,3" { - t.Errorf("input:%v,output:%s,result is incorrect", is, s) - } else { - t.Logf("input:%v,output:%s", is, s) - } -} - -func TestSplitInts(t *testing.T) { - // test empty slice - s := "" - is, err := SplitInts(s) - if err != nil || len(is) != 0 { - t.Error(err) - } - // test split int64 - s = "1,2,3" - is, err = SplitInts(s) - if err != nil || len(is) != 3 { - t.Error(err) - } -} - -func BenchmarkJoinInts(b *testing.B) { - is := make([]int64, 10000, 10000) - for i := int64(0); i < 10000; i++ { - is[i] = i - } - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - JoinInts(is) - } - }) -} diff --git a/pkg/sync/errgroup/README.md b/pkg/sync/errgroup/README.md deleted file mode 100644 index d665400da..000000000 --- a/pkg/sync/errgroup/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# errgroup - -提供带recover和并行数的errgroup,err中包含详细堆栈信息 diff --git a/pkg/sync/errgroup/doc.go b/pkg/sync/errgroup/doc.go deleted file mode 100644 index b35cff432..000000000 --- a/pkg/sync/errgroup/doc.go +++ /dev/null @@ -1,47 +0,0 @@ -// Package errgroup provides synchronization, error propagation, and Context -// errgroup 包为一组子任务的 goroutine 提供了 goroutine 同步,错误取消功能. -// -//errgroup 包含三种常用方式 -// -//1、直接使用 此时不会因为一个任务失败导致所有任务被 cancel: -// g := &errgroup.Group{} -// g.Go(func(ctx context.Context) { -// // NOTE: 此时 ctx 为 context.Background() -// // do something -// }) -// -//2、WithContext 使用 WithContext 时不会因为一个任务失败导致所有任务被 cancel: -// g := errgroup.WithContext(ctx) -// g.Go(func(ctx context.Context) { -// // NOTE: 此时 ctx 为 errgroup.WithContext 传递的 ctx -// // do something -// }) -// -//3、WithCancel 使用 WithCancel 时如果有一个人任务失败会导致所有*未进行或进行中*的任务被 cancel: -// g := errgroup.WithCancel(ctx) -// g.Go(func(ctx context.Context) { -// // NOTE: 此时 ctx 是从 errgroup.WithContext 传递的 ctx 派生出的 ctx -// // do something -// }) -// -//设置最大并行数 GOMAXPROCS 对以上三种使用方式均起效 -//NOTE: 由于 errgroup 实现问题,设定 GOMAXPROCS 的 errgroup 需要立即调用 Wait() 例如: -// -// g := errgroup.WithCancel(ctx) -// g.GOMAXPROCS(2) -// // task1 -// g.Go(func(ctx context.Context) { -// fmt.Println("task1") -// }) -// // task2 -// g.Go(func(ctx context.Context) { -// fmt.Println("task2") -// }) -// // task3 -// g.Go(func(ctx context.Context) { -// fmt.Println("task3") -// }) -// // NOTE: 此时设置的 GOMAXPROCS 为2, 添加了三个任务 task1, task2, task3 此时 task3 是不会运行的! -// // 只有调用了 Wait task3 才有运行的机会 -// g.Wait() // task3 运行 -package errgroup diff --git a/pkg/sync/errgroup/errgroup.go b/pkg/sync/errgroup/errgroup.go deleted file mode 100644 index c795e1141..000000000 --- a/pkg/sync/errgroup/errgroup.go +++ /dev/null @@ -1,119 +0,0 @@ -package errgroup - -import ( - "context" - "fmt" - "runtime" - "sync" -) - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid and does not cancel on error. -type Group struct { - err error - wg sync.WaitGroup - errOnce sync.Once - - workerOnce sync.Once - ch chan func(ctx context.Context) error - chs []func(ctx context.Context) error - - ctx context.Context - cancel func() -} - -// WithContext create a Group. -// given function from Go will receive this context, -func WithContext(ctx context.Context) *Group { - return &Group{ctx: ctx} -} - -// WithCancel create a new Group and an associated Context derived from ctx. -// -// given function from Go will receive context derived from this ctx, -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithCancel(ctx context.Context) *Group { - ctx, cancel := context.WithCancel(ctx) - return &Group{ctx: ctx, cancel: cancel} -} - -func (g *Group) do(f func(ctx context.Context) error) { - ctx := g.ctx - if ctx == nil { - ctx = context.Background() - } - var err error - defer func() { - if r := recover(); r != nil { - buf := make([]byte, 64<<10) - buf = buf[:runtime.Stack(buf, false)] - err = fmt.Errorf("errgroup: panic recovered: %s\n%s", r, buf) - } - if err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - g.wg.Done() - }() - err = f(ctx) -} - -// GOMAXPROCS set max goroutine to work. -func (g *Group) GOMAXPROCS(n int) { - if n <= 0 { - panic("errgroup: GOMAXPROCS must great than 0") - } - g.workerOnce.Do(func() { - g.ch = make(chan func(context.Context) error, n) - for i := 0; i < n; i++ { - go func() { - for f := range g.ch { - g.do(f) - } - }() - } - }) -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (g *Group) Go(f func(ctx context.Context) error) { - g.wg.Add(1) - if g.ch != nil { - select { - case g.ch <- f: - default: - g.chs = append(g.chs, f) - } - return - } - go g.do(f) -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - if g.ch != nil { - for _, f := range g.chs { - g.ch <- f - } - } - g.wg.Wait() - if g.ch != nil { - close(g.ch) // let all receiver exit - } - if g.cancel != nil { - g.cancel() - } - return g.err -} diff --git a/pkg/sync/errgroup/errgroup_test.go b/pkg/sync/errgroup/errgroup_test.go deleted file mode 100644 index bb050c160..000000000 --- a/pkg/sync/errgroup/errgroup_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package errgroup - -import ( - "context" - "errors" - "fmt" - "math" - "net/http" - "os" - "testing" - "time" -) - -type ABC struct { - CBA int -} - -func TestNormal(t *testing.T) { - var ( - abcs = make(map[int]*ABC) - g Group - err error - ) - for i := 0; i < 10; i++ { - abcs[i] = &ABC{CBA: i} - } - g.Go(func(context.Context) (err error) { - abcs[1].CBA++ - return - }) - g.Go(func(context.Context) (err error) { - abcs[2].CBA++ - return - }) - if err = g.Wait(); err != nil { - t.Log(err) - } - t.Log(abcs) -} - -func sleep1s(context.Context) error { - time.Sleep(time.Second) - return nil -} - -func TestGOMAXPROCS(t *testing.T) { - // 没有并发数限制 - g := Group{} - now := time.Now() - g.Go(sleep1s) - g.Go(sleep1s) - g.Go(sleep1s) - g.Go(sleep1s) - g.Wait() - sec := math.Round(time.Since(now).Seconds()) - if sec != 1 { - t.FailNow() - } - // 限制并发数 - g2 := Group{} - g2.GOMAXPROCS(2) - now = time.Now() - g2.Go(sleep1s) - g2.Go(sleep1s) - g2.Go(sleep1s) - g2.Go(sleep1s) - g2.Wait() - sec = math.Round(time.Since(now).Seconds()) - if sec != 2 { - t.FailNow() - } - // context canceled - var canceled bool - g3 := WithCancel(context.Background()) - g3.GOMAXPROCS(2) - g3.Go(func(context.Context) error { - return fmt.Errorf("error for testing errgroup context") - }) - g3.Go(func(ctx context.Context) error { - time.Sleep(time.Second) - select { - case <-ctx.Done(): - canceled = true - default: - } - return nil - }) - g3.Wait() - if !canceled { - t.FailNow() - } -} - -func TestRecover(t *testing.T) { - var ( - abcs = make(map[int]*ABC) - g Group - err error - ) - g.Go(func(context.Context) (err error) { - abcs[1].CBA++ - return - }) - g.Go(func(context.Context) (err error) { - abcs[2].CBA++ - return - }) - if err = g.Wait(); err != nil { - t.Logf("error:%+v", err) - return - } - t.FailNow() -} - -func TestRecover2(t *testing.T) { - var ( - g Group - err error - ) - g.Go(func(context.Context) (err error) { - panic("2233") - }) - if err = g.Wait(); err != nil { - t.Logf("error:%+v", err) - return - } - t.FailNow() -} - -var ( - Web = fakeSearch("web") - Image = fakeSearch("image") - Video = fakeSearch("video") -) - -type Result string -type Search func(ctx context.Context, query string) (Result, error) - -func fakeSearch(kind string) Search { - return func(_ context.Context, query string) (Result, error) { - return Result(fmt.Sprintf("%s result for %q", kind, query)), nil - } -} - -// JustErrors illustrates the use of a Group in place of a sync.WaitGroup to -// simplify goroutine counting and error handling. This example is derived from -// the sync.WaitGroup example at https://golang.org/pkg/sync/#example_WaitGroup. -func ExampleGroup_justErrors() { - var g Group - var urls = []string{ - "http://www.golang.org/", - "http://www.google.com/", - "http://www.somestupidname.com/", - } - for _, url := range urls { - // Launch a goroutine to fetch the URL. - url := url // https://golang.org/doc/faq#closures_and_goroutines - g.Go(func(context.Context) error { - // Fetch the URL. - resp, err := http.Get(url) - if err == nil { - resp.Body.Close() - } - return err - }) - } - // Wait for all HTTP fetches to complete. - if err := g.Wait(); err == nil { - fmt.Println("Successfully fetched all URLs.") - } -} - -// Parallel illustrates the use of a Group for synchronizing a simple parallel -// task: the "Google Search 2.0" function from -// https://talks.golang.org/2012/concurrency.slide#46, augmented with a Context -// and error-handling. -func ExampleGroup_parallel() { - Google := func(ctx context.Context, query string) ([]Result, error) { - g := WithContext(ctx) - - searches := []Search{Web, Image, Video} - results := make([]Result, len(searches)) - for i, search := range searches { - i, search := i, search // https://golang.org/doc/faq#closures_and_goroutines - g.Go(func(context.Context) error { - result, err := search(ctx, query) - if err == nil { - results[i] = result - } - return err - }) - } - if err := g.Wait(); err != nil { - return nil, err - } - return results, nil - } - - results, err := Google(context.Background(), "golang") - if err != nil { - fmt.Fprintln(os.Stderr, err) - return - } - for _, result := range results { - fmt.Println(result) - } - - // Output: - // web result for "golang" - // image result for "golang" - // video result for "golang" -} - -func TestZeroGroup(t *testing.T) { - err1 := errors.New("errgroup_test: 1") - err2 := errors.New("errgroup_test: 2") - - cases := []struct { - errs []error - }{ - {errs: []error{}}, - {errs: []error{nil}}, - {errs: []error{err1}}, - {errs: []error{err1, nil}}, - {errs: []error{err1, nil, err2}}, - } - - for _, tc := range cases { - var g Group - - var firstErr error - for i, err := range tc.errs { - err := err - g.Go(func(context.Context) error { return err }) - - if firstErr == nil && err != nil { - firstErr = err - } - - if gErr := g.Wait(); gErr != firstErr { - t.Errorf("after g.Go(func() error { return err }) for err in %v\n"+ - "g.Wait() = %v; want %v", tc.errs[:i+1], err, firstErr) - } - } - } -} - -func TestWithCancel(t *testing.T) { - g := WithCancel(context.Background()) - g.Go(func(ctx context.Context) error { - time.Sleep(100 * time.Millisecond) - return fmt.Errorf("boom") - }) - var doneErr error - g.Go(func(ctx context.Context) error { - select { - case <-ctx.Done(): - doneErr = ctx.Err() - } - return doneErr - }) - g.Wait() - if doneErr != context.Canceled { - t.Error("error should be Canceled") - } -} diff --git a/pkg/sync/errgroup/example_test.go b/pkg/sync/errgroup/example_test.go deleted file mode 100644 index e59da4b57..000000000 --- a/pkg/sync/errgroup/example_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package errgroup - -import ( - "context" -) - -func fakeRunTask(ctx context.Context) error { - return nil -} - -func ExampleGroup_group() { - g := Group{} - g.Go(fakeRunTask) - g.Go(fakeRunTask) - if err := g.Wait(); err != nil { - // handle err - } -} - -func ExampleGroup_ctx() { - g := WithContext(context.Background()) - g.Go(fakeRunTask) - g.Go(fakeRunTask) - if err := g.Wait(); err != nil { - // handle err - } -} - -func ExampleGroup_cancel() { - g := WithCancel(context.Background()) - g.Go(fakeRunTask) - g.Go(fakeRunTask) - if err := g.Wait(); err != nil { - // handle err - } -} - -func ExampleGroup_maxproc() { - g := Group{} - // set max concurrency - g.GOMAXPROCS(2) - g.Go(fakeRunTask) - g.Go(fakeRunTask) - if err := g.Wait(); err != nil { - // handle err - } -} diff --git a/pkg/sync/pipeline/CHANGELOG.md b/pkg/sync/pipeline/CHANGELOG.md deleted file mode 100755 index 2120a2356..000000000 --- a/pkg/sync/pipeline/CHANGELOG.md +++ /dev/null @@ -1,9 +0,0 @@ -### pipeline - -#### Version 1.2.0 -> 1. 默认为平滑触发事件 -> 2. 增加metric上报 -#### Version 1.1.0 -> 1. 增加平滑时间的支持 -#### Version 1.0.0 -> 1. 提供聚合方法 内部区分压测流量 diff --git a/pkg/sync/pipeline/README.md b/pkg/sync/pipeline/README.md deleted file mode 100644 index b056c0901..000000000 --- a/pkg/sync/pipeline/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# pkg/sync/pipeline - -提供内存批量聚合工具 diff --git a/pkg/sync/pipeline/fanout/CHANGELOG.md b/pkg/sync/pipeline/fanout/CHANGELOG.md deleted file mode 100755 index c289a6425..000000000 --- a/pkg/sync/pipeline/fanout/CHANGELOG.md +++ /dev/null @@ -1,6 +0,0 @@ -### pipeline/fanout - -#### Version 1.1.0 -> 1. 增加处理速度metric上报 -#### Version 1.0.0 -> 1. library/cache包改为fanout diff --git a/pkg/sync/pipeline/fanout/README.md b/pkg/sync/pipeline/fanout/README.md deleted file mode 100644 index cf88ddd8e..000000000 --- a/pkg/sync/pipeline/fanout/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# pkg/sync/pipeline/fanout - -功能: - -* 支持定义Worker 数量的goroutine,进行消费 -* 内部支持的元数据传递(pkg/net/metadata) - -示例: -```golang -//名称为cache 执行线程为1 buffer长度为1024 -cache := fanout.New("cache", fanout.Worker(1), fanout.Buffer(1024)) -cache.Do(c, func(c context.Context) { SomeFunc(c, args...) }) -cache.Close() -``` \ No newline at end of file diff --git a/pkg/sync/pipeline/fanout/example_test.go b/pkg/sync/pipeline/fanout/example_test.go deleted file mode 100644 index 5de973199..000000000 --- a/pkg/sync/pipeline/fanout/example_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package fanout - -import "context" - -// addCache 加缓存的例子 -func addCache(c context.Context, id, value int) { - // some thing... -} - -func Example() { - // 这里只是举个例子 真正使用的时候 应该用bm/rpc 传过来的context - var c = context.Background() - // 新建一个fanout 对象 名称为cache 名称主要用来上报监控和打日志使用 最好不要重复 - // (可选参数) worker数量为1 表示后台只有1个线程在工作 - // (可选参数) buffer 为1024 表示缓存chan长度为1024 如果chan慢了 再调用Do方法就会报错 设定长度主要为了防止OOM - cache := New("cache", Worker(1), Buffer(1024)) - // 需要异步执行的方法 - // 这里传进来的c里面的meta信息会被复制 超时会忽略 addCache拿到的context已经没有超时信息了 - cache.Do(c, func(c context.Context) { addCache(c, 0, 0) }) - // 程序结束的时候关闭fanout 会等待后台线程完成后返回 - cache.Close() -} diff --git a/pkg/sync/pipeline/fanout/fanout.go b/pkg/sync/pipeline/fanout/fanout.go deleted file mode 100644 index 2253395ce..000000000 --- a/pkg/sync/pipeline/fanout/fanout.go +++ /dev/null @@ -1,150 +0,0 @@ -package fanout - -import ( - "context" - "errors" - "runtime" - "sync" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/metadata" - "github.com/go-kratos/kratos/pkg/net/trace" -) - -var ( - // ErrFull chan full. - ErrFull = errors.New("fanout: chan full") - traceTags = []trace.Tag{ - {Key: trace.TagSpanKind, Value: "background"}, - {Key: trace.TagComponent, Value: "sync/pipeline/fanout"}, - } -) - -type options struct { - worker int - buffer int -} - -// Option fanout option -type Option func(*options) - -// Worker specifies the worker of fanout -func Worker(n int) Option { - if n <= 0 { - panic("fanout: worker should > 0") - } - return func(o *options) { - o.worker = n - } -} - -// Buffer specifies the buffer of fanout -func Buffer(n int) Option { - if n <= 0 { - panic("fanout: buffer should > 0") - } - return func(o *options) { - o.buffer = n - } -} - -type item struct { - f func(c context.Context) - ctx context.Context -} - -// Fanout async consume data from chan. -type Fanout struct { - name string - ch chan item - options *options - waiter sync.WaitGroup - - ctx context.Context - cancel func() -} - -// New new a fanout struct. -func New(name string, opts ...Option) *Fanout { - if name == "" { - name = "anonymous" - } - o := &options{ - worker: 1, - buffer: 1024, - } - for _, op := range opts { - op(o) - } - c := &Fanout{ - ch: make(chan item, o.buffer), - name: name, - options: o, - } - c.ctx, c.cancel = context.WithCancel(context.Background()) - c.waiter.Add(o.worker) - for i := 0; i < o.worker; i++ { - go c.proc() - } - return c -} - -func (c *Fanout) proc() { - defer c.waiter.Done() - for { - select { - case t := <-c.ch: - wrapFunc(t.f)(t.ctx) - _metricChanSize.Set(float64(len(c.ch)), c.name) - _metricCount.Inc(c.name) - case <-c.ctx.Done(): - return - } - } -} - -func wrapFunc(f func(c context.Context)) (res func(context.Context)) { - res = func(ctx context.Context) { - defer func() { - if r := recover(); r != nil { - buf := make([]byte, 64*1024) - buf = buf[:runtime.Stack(buf, false)] - log.Error("panic in fanout proc, err: %s, stack: %s", r, buf) - } - }() - f(ctx) - if tr, ok := trace.FromContext(ctx); ok { - tr.Finish(nil) - } - } - return -} - -// Do save a callback func. -func (c *Fanout) Do(ctx context.Context, f func(ctx context.Context)) (err error) { - if f == nil || c.ctx.Err() != nil { - return c.ctx.Err() - } - nakeCtx := metadata.WithContext(ctx) - if tr, ok := trace.FromContext(ctx); ok { - tr = tr.Fork("", "Fanout:Do").SetTag(traceTags...) - nakeCtx = trace.NewContext(nakeCtx, tr) - } - select { - case c.ch <- item{f: f, ctx: nakeCtx}: - default: - err = ErrFull - } - _metricChanSize.Set(float64(len(c.ch)), c.name) - return -} - -// Close close fanout -func (c *Fanout) Close() error { - if err := c.ctx.Err(); err != nil { - return err - } - c.cancel() - c.waiter.Wait() - return nil -} diff --git a/pkg/sync/pipeline/fanout/fanout_test.go b/pkg/sync/pipeline/fanout/fanout_test.go deleted file mode 100644 index ef1df1169..000000000 --- a/pkg/sync/pipeline/fanout/fanout_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package fanout - -import ( - "context" - "testing" - "time" -) - -func TestFanout_Do(t *testing.T) { - ca := New("cache", Worker(1), Buffer(1024)) - var run bool - ca.Do(context.Background(), func(c context.Context) { - run = true - panic("error") - }) - time.Sleep(time.Millisecond * 50) - t.Log("not panic") - if !run { - t.Fatal("expect run be true") - } -} - -func TestFanout_Close(t *testing.T) { - ca := New("cache", Worker(1), Buffer(1024)) - ca.Close() - err := ca.Do(context.Background(), func(c context.Context) {}) - if err == nil { - t.Fatal("expect get err") - } -} diff --git a/pkg/sync/pipeline/fanout/metrics.go b/pkg/sync/pipeline/fanout/metrics.go deleted file mode 100644 index 6a7207326..000000000 --- a/pkg/sync/pipeline/fanout/metrics.go +++ /dev/null @@ -1,27 +0,0 @@ -package fanout - -import ( - "github.com/go-kratos/kratos/pkg/stat/metric" -) - -const ( - _metricNamespace = "sync" - _metricSubSystem = "pipeline_fanout" -) - -var ( - _metricChanSize = metric.NewGaugeVec(&metric.GaugeVecOpts{ - Namespace: _metricNamespace, - Subsystem: _metricSubSystem, - Name: "chan_len", - Help: "sync pipeline fanout current channel size.", - Labels: []string{"name"}, - }) - _metricCount = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: _metricNamespace, - Subsystem: _metricSubSystem, - Name: "process_count", - Help: "process count", - Labels: []string{"name"}, - }) -) diff --git a/pkg/sync/pipeline/pipeline.go b/pkg/sync/pipeline/pipeline.go deleted file mode 100644 index 7347e5f6c..000000000 --- a/pkg/sync/pipeline/pipeline.go +++ /dev/null @@ -1,223 +0,0 @@ -package pipeline - -import ( - "context" - "errors" - "strconv" - "sync" - "time" - - "github.com/go-kratos/kratos/pkg/net/metadata" - "github.com/go-kratos/kratos/pkg/stat/metric" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// ErrFull channel full error -var ErrFull = errors.New("channel full") - -const _metricNamespace = "sync" -const _metricSubSystem = "pipeline" - -var ( - _metricCount = metric.NewCounterVec(&metric.CounterVecOpts{ - Namespace: _metricNamespace, - Subsystem: _metricSubSystem, - Name: "process_count", - Help: "process count", - Labels: []string{"name", "chan"}, - }) - _metricChanLen = metric.NewGaugeVec(&metric.GaugeVecOpts{ - Namespace: _metricNamespace, - Subsystem: _metricSubSystem, - Name: "chan_len", - Help: "channel length", - Labels: []string{"name", "chan"}, - }) -) - -type message struct { - key string - value interface{} -} - -// Pipeline pipeline struct -type Pipeline struct { - Do func(c context.Context, index int, values map[string][]interface{}) - Split func(key string) int - chans []chan *message - mirrorChans []chan *message - config *Config - wait sync.WaitGroup - name string -} - -// Config Pipeline config -type Config struct { - // MaxSize merge size - MaxSize int - // Interval merge interval - Interval xtime.Duration - // Buffer channel size - Buffer int - // Worker channel number - Worker int - // Name use for metrics - Name string -} - -func (c *Config) fix() { - if c.MaxSize <= 0 { - c.MaxSize = 1000 - } - if c.Interval <= 0 { - c.Interval = xtime.Duration(time.Second) - } - if c.Buffer <= 0 { - c.Buffer = 1000 - } - if c.Worker <= 0 { - c.Worker = 10 - } - if c.Name == "" { - c.Name = "anonymous" - } -} - -// NewPipeline new pipline -func NewPipeline(config *Config) (res *Pipeline) { - if config == nil { - config = &Config{} - } - config.fix() - res = &Pipeline{ - chans: make([]chan *message, config.Worker), - mirrorChans: make([]chan *message, config.Worker), - config: config, - name: config.Name, - } - for i := 0; i < config.Worker; i++ { - res.chans[i] = make(chan *message, config.Buffer) - res.mirrorChans[i] = make(chan *message, config.Buffer) - } - return -} - -// Start start all mergeproc -func (p *Pipeline) Start() { - if p.Do == nil { - panic("pipeline: do func is nil") - } - if p.Split == nil { - panic("pipeline: split func is nil") - } - var mirror bool - p.wait.Add(len(p.chans) + len(p.mirrorChans)) - for i, ch := range p.chans { - go p.mergeproc(mirror, i, ch) - } - mirror = true - for i, ch := range p.mirrorChans { - go p.mergeproc(mirror, i, ch) - } -} - -// SyncAdd sync add a value to channal, channel shard in split method -func (p *Pipeline) SyncAdd(c context.Context, key string, value interface{}) (err error) { - ch, msg := p.add(c, key, value) - select { - case ch <- msg: - case <-c.Done(): - err = c.Err() - } - return -} - -// Add async add a value to channal, channel shard in split method -func (p *Pipeline) Add(c context.Context, key string, value interface{}) (err error) { - ch, msg := p.add(c, key, value) - select { - case ch <- msg: - default: - err = ErrFull - } - return -} - -func (p *Pipeline) add(c context.Context, key string, value interface{}) (ch chan *message, m *message) { - shard := p.Split(key) % p.config.Worker - if metadata.String(c, metadata.Mirror) != "" { - ch = p.mirrorChans[shard] - } else { - ch = p.chans[shard] - } - m = &message{key: key, value: value} - return -} - -// Close all goroutinue -func (p *Pipeline) Close() (err error) { - for _, ch := range p.chans { - ch <- nil - } - for _, ch := range p.mirrorChans { - ch <- nil - } - p.wait.Wait() - return -} - -func (p *Pipeline) mergeproc(mirror bool, index int, ch <-chan *message) { - defer p.wait.Done() - var ( - m *message - vals = make(map[string][]interface{}, p.config.MaxSize) - closed bool - count int - inteval = p.config.Interval - timeout = false - ) - if index > 0 { - inteval = xtime.Duration(int64(index) * (int64(p.config.Interval) / int64(p.config.Worker))) - } - timer := time.NewTimer(time.Duration(inteval)) - defer timer.Stop() - for { - select { - case m = <-ch: - if m == nil { - closed = true - break - } - count++ - vals[m.key] = append(vals[m.key], m.value) - if count >= p.config.MaxSize { - break - } - continue - case <-timer.C: - timeout = true - } - name := p.name - process := count - if len(vals) > 0 { - ctx := context.Background() - if mirror { - ctx = metadata.NewContext(ctx, metadata.MD{metadata.Mirror: "1"}) - name = "mirror_" + name - } - p.Do(ctx, index, vals) - vals = make(map[string][]interface{}, p.config.MaxSize) - count = 0 - } - _metricChanLen.Set(float64(len(ch)), name, strconv.Itoa(index)) - _metricCount.Add(float64(process), name, strconv.Itoa(index)) - if closed { - return - } - if !timer.Stop() && !timeout { - <-timer.C - timeout = false - } - timer.Reset(time.Duration(p.config.Interval)) - } -} diff --git a/pkg/sync/pipeline/pipeline_test.go b/pkg/sync/pipeline/pipeline_test.go deleted file mode 100644 index ea9d61838..000000000 --- a/pkg/sync/pipeline/pipeline_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package pipeline - -import ( - "context" - "reflect" - "strconv" - "testing" - "time" - - "github.com/go-kratos/kratos/pkg/net/metadata" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -func TestPipeline(t *testing.T) { - conf := &Config{ - MaxSize: 3, - Interval: xtime.Duration(time.Millisecond * 20), - Buffer: 3, - Worker: 10, - } - type recv struct { - mirror string - ch int - values map[string][]interface{} - } - var runs []recv - do := func(c context.Context, ch int, values map[string][]interface{}) { - runs = append(runs, recv{ - mirror: metadata.String(c, metadata.Mirror), - values: values, - ch: ch, - }) - } - split := func(s string) int { - n, _ := strconv.Atoi(s) - return n - } - p := NewPipeline(conf) - p.Do = do - p.Split = split - p.Start() - p.Add(context.Background(), "1", 1) - p.Add(context.Background(), "1", 2) - p.Add(context.Background(), "11", 3) - p.Add(context.Background(), "2", 3) - time.Sleep(time.Millisecond * 60) - mirrorCtx := metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "1"}) - p.Add(mirrorCtx, "2", 3) - time.Sleep(time.Millisecond * 60) - p.SyncAdd(mirrorCtx, "5", 5) - time.Sleep(time.Millisecond * 60) - p.Close() - expt := []recv{ - { - mirror: "", - ch: 1, - values: map[string][]interface{}{ - "1": {1, 2}, - "11": {3}, - }, - }, - { - mirror: "", - ch: 2, - values: map[string][]interface{}{ - "2": {3}, - }, - }, - { - mirror: "1", - ch: 2, - values: map[string][]interface{}{ - "2": {3}, - }, - }, - { - mirror: "1", - ch: 5, - values: map[string][]interface{}{ - "5": {5}, - }, - }, - } - if !reflect.DeepEqual(runs, expt) { - t.Errorf("expect get %+v,\n got: %+v", expt, runs) - } -} - -func TestPipelineSmooth(t *testing.T) { - conf := &Config{ - MaxSize: 100, - Interval: xtime.Duration(time.Second), - Buffer: 100, - Worker: 10, - } - type result struct { - index int - ts time.Time - } - var results []result - do := func(c context.Context, index int, values map[string][]interface{}) { - results = append(results, result{ - index: index, - ts: time.Now(), - }) - } - split := func(s string) int { - n, _ := strconv.Atoi(s) - return n - } - p := NewPipeline(conf) - p.Do = do - p.Split = split - p.Start() - for i := 0; i < 10; i++ { - p.Add(context.Background(), strconv.Itoa(i), 1) - } - time.Sleep(time.Millisecond * 1500) - if len(results) != conf.Worker { - t.Errorf("expect results equal worker") - t.FailNow() - } - for i, r := range results { - if i > 0 { - if r.ts.Sub(results[i-1].ts) < time.Millisecond*20 { - t.Errorf("expect runs be smooth") - t.FailNow() - } - } - } -} diff --git a/pkg/testing/lich/README.md b/pkg/testing/lich/README.md deleted file mode 100644 index 541c6c681..000000000 --- a/pkg/testing/lich/README.md +++ /dev/null @@ -1,4 +0,0 @@ -## testing/lich 运行环境构建 -基于 docker-compose 实现跨平台跨语言环境的容器依赖管理方案,以解决运行ut场景下的 (mysql, redis, mc)容器依赖问题。 - -使用说明参见:https://go-kratos.github.io/kratos/#/ut \ No newline at end of file diff --git a/pkg/testing/lich/composer.go b/pkg/testing/lich/composer.go deleted file mode 100644 index 21ac82d07..000000000 --- a/pkg/testing/lich/composer.go +++ /dev/null @@ -1,129 +0,0 @@ -package lich - -import ( - "bytes" - "crypto/md5" - "encoding/json" - "flag" - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "time" - - "github.com/go-kratos/kratos/pkg/log" -) - -var ( - retry int - noDown bool - yamlPath string - pathHash string - services map[string]*Container -) - -func init() { - flag.StringVar(&yamlPath, "f", "docker-compose.yaml", "composer yaml path.") - flag.IntVar(&retry, "retry", 5, "number of retries on network failure.") - flag.BoolVar(&noDown, "nodown", false, "containers are not recycled.") -} - -func runCompose(args ...string) (output []byte, err error) { - if _, err = os.Stat(yamlPath); os.IsNotExist(err) { - log.Error("os.Stat(%s) composer yaml is not exist!", yamlPath) - return - } - if yamlPath, err = filepath.Abs(yamlPath); err != nil { - log.Error("filepath.Abs(%s) error(%v)", yamlPath, err) - return - } - pathHash = fmt.Sprintf("%x", md5.Sum([]byte(yamlPath)))[:9] - args = append([]string{"-f", yamlPath, "-p", pathHash}, args...) - if output, err = exec.Command("docker-compose", args...).CombinedOutput(); err != nil { - log.Error("exec.Command(docker-compose) args(%v) stdout(%s) error(%v)", args, string(output), err) - return - } - return -} - -// Setup setup UT related environment dependence for everything. -func Setup() (err error) { - if _, err = runCompose("up", "-d"); err != nil { - return - } - defer func() { - if err != nil { - Teardown() - } - }() - if _, err = getServices(); err != nil { - return - } - _, err = checkServices() - return -} - -// Teardown unsetup all environment dependence. -func Teardown() (err error) { - if !noDown { - _, err = runCompose("down") - } - return -} - -func getServices() (output []byte, err error) { - if output, err = runCompose("config", "--services"); err != nil { - return - } - var eol = []byte("\n") - if output = bytes.TrimSpace(output); runtime.GOOS == "windows" { - eol = []byte("\r\n") - } - services = make(map[string]*Container) - for _, svr := range bytes.Split(output, eol) { - if output, err = runCompose("ps", "-q", string(svr)); err != nil { - return - } - var ( - id = string(bytes.TrimSpace(output)) - args = []string{"inspect", id, "--format", "'{{json .}}'"} - ) - if output, err = exec.Command("docker", args...).CombinedOutput(); err != nil { - log.Error("exec.Command(docker) args(%v) stdout(%s) error(%v)", args, string(output), err) - return - } - if output = bytes.TrimSpace(output); bytes.Equal(output, []byte("")) { - err = fmt.Errorf("service: %s | container: %s fails to launch", svr, id) - log.Error("exec.Command(docker) args(%v) error(%v)", args, err) - return - } - var c = &Container{} - if err = json.Unmarshal(bytes.Trim(output, "'"), c); err != nil { - log.Error("json.Unmarshal(%s) error(%v)", string(output), err) - return - } - services[string(svr)] = c - } - return -} - -func checkServices() (output []byte, err error) { - defer func() { - if err != nil && retry > 0 { - retry-- - getServices() - time.Sleep(time.Second * 5) - output, err = checkServices() - return - } - }() - for svr, c := range services { - if err = c.Healthcheck(); err != nil { - log.Error("healthcheck(%s) error(%v) retrying %d times...", svr, err, retry) - return - } - // TODO About container check and more... - } - return -} diff --git a/pkg/testing/lich/healthcheck.go b/pkg/testing/lich/healthcheck.go deleted file mode 100644 index 68bc758ee..000000000 --- a/pkg/testing/lich/healthcheck.go +++ /dev/null @@ -1,85 +0,0 @@ -package lich - -import ( - "database/sql" - "fmt" - "net" - "strconv" - "strings" - - "github.com/go-kratos/kratos/pkg/log" - // Register go-sql-driver stuff - _ "github.com/go-sql-driver/mysql" -) - -var healthchecks = map[string]func(*Container) error{"mysql": checkMysql, "mariadb": checkMysql} - -// Healthcheck check container health. -func (c *Container) Healthcheck() (err error) { - if status, health := c.State.Status, c.State.Health.Status; !c.State.Running || (health != "" && health != "healthy") { - err = fmt.Errorf("service: %s | container: %s not running", c.GetImage(), c.GetID()) - log.Error("docker status(%s) health(%s) error(%v)", status, health, err) - return - } - if check, ok := healthchecks[c.GetImage()]; ok { - err = check(c) - return - } - for proto, ports := range c.NetworkSettings.Ports { - if id := c.GetID(); !strings.Contains(proto, "tcp") { - log.Error("container: %s proto(%s) unsupported.", id, proto) - continue - } - for _, publish := range ports { - var ( - ip = net.ParseIP(publish.HostIP) - port, _ = strconv.Atoi(publish.HostPort) - tcpAddr = &net.TCPAddr{IP: ip, Port: port} - tcpConn *net.TCPConn - ) - if tcpConn, err = net.DialTCP("tcp", nil, tcpAddr); err != nil { - log.Error("net.DialTCP(%s:%s) error(%v)", publish.HostIP, publish.HostPort, err) - return - } - tcpConn.Close() - } - } - return -} - -func checkMysql(c *Container) (err error) { - var ip, port, user, passwd string - for _, env := range c.Config.Env { - splits := strings.Split(env, "=") - if strings.Contains(splits[0], "MYSQL_ROOT_PASSWORD") { - user, passwd = "root", splits[1] - continue - } - if strings.Contains(splits[0], "MYSQL_ALLOW_EMPTY_PASSWORD") { - user, passwd = "root", "" - continue - } - if strings.Contains(splits[0], "MYSQL_USER") { - user = splits[1] - continue - } - if strings.Contains(splits[0], "MYSQL_PASSWORD") { - passwd = splits[1] - continue - } - } - var db *sql.DB - if ports, ok := c.NetworkSettings.Ports["3306/tcp"]; ok { - ip, port = ports[0].HostIP, ports[0].HostPort - } - var dsn = fmt.Sprintf("%s:%s@tcp(%s:%s)/", user, passwd, ip, port) - if db, err = sql.Open("mysql", dsn); err != nil { - log.Error("sql.Open(mysql) dsn(%s) error(%v)", dsn, err) - return - } - if err = db.Ping(); err != nil { - log.Error("ping(db) dsn(%s) error(%v)", dsn, err) - } - defer db.Close() - return -} diff --git a/pkg/testing/lich/model.go b/pkg/testing/lich/model.go deleted file mode 100644 index 64653a681..000000000 --- a/pkg/testing/lich/model.go +++ /dev/null @@ -1,88 +0,0 @@ -package lich - -import ( - "strings" - "time" -) - -// Container docker inspect resp. -type Container struct { - ID string `json:"Id"` - Created time.Time `json:"Created"` - Path string `json:"Path"` - Args []string `json:"Args"` - State struct { - Status string `json:"Status"` - Running bool `json:"Running"` - Paused bool `json:"Paused"` - Restarting bool `json:"Restarting"` - OOMKilled bool `json:"OOMKilled"` - Dead bool `json:"Dead"` - Pid int `json:"Pid"` - ExitCode int `json:"ExitCode"` - Error string `json:"Error"` - StartedAt time.Time `json:"StartedAt"` - FinishedAt time.Time `json:"FinishedAt"` - Health struct { - Status string `json:"Status"` - FailingStreak int `json:"FailingStreak"` - Log []struct { - Start time.Time `json:"Start"` - End time.Time `json:"End"` - ExitCode int `json:"ExitCode"` - Output string `json:"Output"` - } `json:"Log"` - } `json:"Health"` - } `json:"State"` - Config struct { - Hostname string `json:"Hostname"` - Domainname string `json:"Domainname"` - User string `json:"User"` - Tty bool `json:"Tty"` - OpenStdin bool `json:"OpenStdin"` - StdinOnce bool `json:"StdinOnce"` - Env []string `json:"Env"` - Cmd []string `json:"Cmd"` - Image string `json:"Image"` - WorkingDir string `json:"WorkingDir"` - Entrypoint []string `json:"Entrypoint"` - } `json:"Config"` - Image string `json:"Image"` - ResolvConfPath string `json:"ResolvConfPath"` - HostnamePath string `json:"HostnamePath"` - HostsPath string `json:"HostsPath"` - LogPath string `json:"LogPath"` - Name string `json:"Name"` - RestartCount int `json:"RestartCount"` - Driver string `json:"Driver"` - Platform string `json:"Platform"` - MountLabel string `json:"MountLabel"` - ProcessLabel string `json:"ProcessLabel"` - AppArmorProfile string `json:"AppArmorProfile"` - NetworkSettings struct { - Bridge string `json:"Bridge"` - SandboxID string `json:"SandboxID"` - HairpinMode bool `json:"HairpinMode"` - Ports map[string][]struct { - HostIP string `json:"HostIp"` - HostPort string `json:"HostPort"` - } `json:"Ports"` - } `json:"NetworkSettings"` -} - -// GetImage get image name at container -func (c *Container) GetImage() (image string) { - image = c.Config.Image - if images := strings.Split(image, ":"); len(images) > 0 { - image = images[0] - } - return -} - -// GetID get id at container -func (c *Container) GetID() (id string) { - if id = c.ID; len(id) > 9 { - id = id[0:9] - } - return -} diff --git a/pkg/time/README.md b/pkg/time/README.md deleted file mode 100644 index 6e099f312..000000000 --- a/pkg/time/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# time - -## 项目简介 - -Kratos的时间模块,主要用于mysql时间戳转换、配置文件读取并转换、Context超时时间比较 diff --git a/pkg/time/time.go b/pkg/time/time.go deleted file mode 100644 index cb03e9919..000000000 --- a/pkg/time/time.go +++ /dev/null @@ -1,59 +0,0 @@ -package time - -import ( - "context" - "database/sql/driver" - "strconv" - xtime "time" -) - -// Time be used to MySql timestamp converting. -type Time int64 - -// Scan scan time. -func (jt *Time) Scan(src interface{}) (err error) { - switch sc := src.(type) { - case xtime.Time: - *jt = Time(sc.Unix()) - case string: - var i int64 - i, err = strconv.ParseInt(sc, 10, 64) - *jt = Time(i) - } - return -} - -// Value get time value. -func (jt Time) Value() (driver.Value, error) { - return xtime.Unix(int64(jt), 0), nil -} - -// Time get time. -func (jt Time) Time() xtime.Time { - return xtime.Unix(int64(jt), 0) -} - -// Duration be used toml unmarshal string time, like 1s, 500ms. -type Duration xtime.Duration - -// UnmarshalText unmarshal text to duration. -func (d *Duration) UnmarshalText(text []byte) error { - tmp, err := xtime.ParseDuration(string(text)) - if err == nil { - *d = Duration(tmp) - } - return err -} - -// Shrink will decrease the duration by comparing with context's timeout duration -// and return new timeout\context\CancelFunc. -func (d Duration) Shrink(c context.Context) (Duration, context.Context, context.CancelFunc) { - if deadline, ok := c.Deadline(); ok { - if ctimeout := xtime.Until(deadline); ctimeout < xtime.Duration(d) { - // deliver small timeout - return Duration(ctimeout), c, func() {} - } - } - ctx, cancel := context.WithTimeout(c, xtime.Duration(d)) - return d, ctx, cancel -} diff --git a/pkg/time/time_test.go b/pkg/time/time_test.go deleted file mode 100644 index f0c2d6604..000000000 --- a/pkg/time/time_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package time - -import ( - "context" - "testing" - "time" -) - -func TestShrink(t *testing.T) { - var d Duration - err := d.UnmarshalText([]byte("1s")) - if err != nil { - t.Fatalf("TestShrink: d.UnmarshalText failed!err:=%v", err) - } - c := context.Background() - to, ctx, cancel := d.Shrink(c) - defer cancel() - if time.Duration(to) != time.Second { - t.Fatalf("new timeout must be equal 1 second") - } - if deadline, ok := ctx.Deadline(); !ok || time.Until(deadline) > time.Second || time.Until(deadline) < time.Millisecond*500 { - t.Fatalf("ctx deadline must be less than 1s and greater than 500ms") - } -} - -func TestShrinkWithTimeout(t *testing.T) { - var d Duration - err := d.UnmarshalText([]byte("1s")) - if err != nil { - t.Fatalf("TestShrink: d.UnmarshalText failed!err:=%v", err) - } - c, cancel := context.WithTimeout(context.Background(), time.Second*2) - defer cancel() - to, ctx, cancel := d.Shrink(c) - defer cancel() - if time.Duration(to) != time.Second { - t.Fatalf("new timeout must be equal 1 second") - } - if deadline, ok := ctx.Deadline(); !ok || time.Until(deadline) > time.Second || time.Until(deadline) < time.Millisecond*500 { - t.Fatalf("ctx deadline must be less than 1s and greater than 500ms") - } -} - -func TestShrinkWithDeadline(t *testing.T) { - var d Duration - err := d.UnmarshalText([]byte("1s")) - if err != nil { - t.Fatalf("TestShrink: d.UnmarshalText failed!err:=%v", err) - } - c, cancel := context.WithTimeout(context.Background(), time.Millisecond*500) - defer cancel() - to, ctx, cancel := d.Shrink(c) - defer cancel() - if time.Duration(to) >= time.Millisecond*500 { - t.Fatalf("new timeout must be less than 500 ms") - } - if deadline, ok := ctx.Deadline(); !ok || time.Until(deadline) > time.Millisecond*500 || time.Until(deadline) < time.Millisecond*200 { - t.Fatalf("ctx deadline must be less than 500ms and greater than 200ms") - } -} diff --git a/registry/registry.go b/registry/registry.go new file mode 100644 index 000000000..cd74ca694 --- /dev/null +++ b/registry/registry.go @@ -0,0 +1,41 @@ +package registry + +// Registry is service registry. +type Registry interface { + // Register the registration. + Register(service *ServiceInstance) error + // Deregister the registration. + Deregister(service *ServiceInstance) error + // Service return the service instances in memory according to the service name. + Service(name string) ([]*ServiceInstance, error) + // Watch creates a watcher according to the service name. + Watch(name string) (Watcher, error) +} + +// Watcher is service watcher. +type Watcher interface { + // Watch returns services in the following two cases: + // 1.the first time to watch and the service instance list is not empty. + // 2.any service instance changes found. + // if the above two conditions are not met, it will block until context deadline exceeded or canceled + Next() ([]*ServiceInstance, error) + // Close close the watcher. + Close() error +} + +// ServiceInstance is an instance of a service in a discovery system. +type ServiceInstance struct { + // ID is the unique instance ID as registered. + ID string + // Name is the service name as registered. + Name string + // Version is the version of the compiled. + Version string + // Metadata is the kv pair metadata associated with the service instance. + Metadata map[string]string + // Endpoints is endpoint addresses of the service instance. + // schema: + // http://127.0.0.1:8000?isSecure=false + // grpc://127.0.0.1:9000?isSecure=false + Endpoints []string +} diff --git a/third_party/CHANGELOG.md b/third_party/CHANGELOG.md deleted file mode 100644 index 99f2e38bc..000000000 --- a/third_party/CHANGELOG.md +++ /dev/null @@ -1,7 +0,0 @@ -#### Version 1.0.1 - -> 1.gogo.protobuf迁移到third_party/github目录下 - -#### Version 1.0.0 - -> 1.添加 gogo proto diff --git a/third_party/README.md b/third_party/README.md new file mode 100644 index 000000000..005faa2cf --- /dev/null +++ b/third_party/README.md @@ -0,0 +1 @@ +# third_party diff --git a/third_party/github.com/gogo/protobuf/AUTHORS b/third_party/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a2..000000000 --- a/third_party/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/third_party/github.com/gogo/protobuf/CONTRIBUTORS b/third_party/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c208..000000000 --- a/third_party/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/third_party/github.com/gogo/protobuf/LICENSE b/third_party/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90da..000000000 --- a/third_party/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/third_party/github.com/gogo/protobuf/gogoproto/gogo.proto b/third_party/github.com/gogo/protobuf/gogoproto/gogo.proto deleted file mode 100644 index b80c85653..000000000 --- a/third_party/github.com/gogo/protobuf/gogoproto/gogo.proto +++ /dev/null @@ -1,144 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option java_package = "com.google.protobuf"; -option java_outer_classname = "GoGoProtos"; -option go_package = "github.com/gogo/protobuf/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; - - optional bool goproto_sizecache_all = 63034; - optional bool goproto_unkeyed_all = 63035; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; - - optional bool goproto_sizecache = 64034; - optional bool goproto_unkeyed = 64035; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; - optional bool wktpointer = 65012; - -} diff --git a/third_party/google/api/README.md b/third_party/google/api/README.md deleted file mode 100644 index eafe58802..000000000 --- a/third_party/google/api/README.md +++ /dev/null @@ -1,5 +0,0 @@ -This folder contains the schema of the configuration model for the API services -platform. - -**Note**: Protos under this directory are in Alpha status, and therefore are -subject to breaking changes. diff --git a/third_party/google/api/auth.proto b/third_party/google/api/auth.proto deleted file mode 100644 index 7c5d61666..000000000 --- a/third_party/google/api/auth.proto +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "AuthProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// `Authentication` defines the authentication configuration for an API. -// -// Example for an API targeted for external use: -// -// name: calendar.googleapis.com -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth -message Authentication { - // A list of authentication rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated AuthenticationRule rules = 3; - - // Defines a set of authentication providers that a service supports. - repeated AuthProvider providers = 4; -} - -// Authentication rules for the service. -// -// By default, if a method has any authentication requirements, every request -// must include a valid credential matching one of the requirements. -// It's an error to include more than one kind of credential in a single -// request. -// -// If a method doesn't have any auth requirements, request credentials will be -// ignored. -message AuthenticationRule { - // Selects the methods to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // The requirements for OAuth credentials. - OAuthRequirements oauth = 2; - - // If true, the service accepts API keys without any other credential. - bool allow_without_credential = 5; - - // Requirements for additional authentication providers. - repeated AuthRequirement requirements = 7; -} - -// Configuration for an authentication provider, including support for -// [JSON Web Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). -message AuthProvider { - // The unique identifier of the auth provider. It will be referred to by - // `AuthRequirement.provider_id`. - // - // Example: "bookstore_auth". - string id = 1; - - // Identifies the principal that issued the JWT. See - // https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1 - // Usually a URL or an email address. - // - // Example: https://securetoken.google.com - // Example: 1234567-compute@developer.gserviceaccount.com - string issuer = 2; - - // URL of the provider's public key set to validate signature of the JWT. See - // [OpenID - // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). - // Optional if the key set document: - // - can be retrieved from - // [OpenID - // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html of - // the issuer. - // - can be inferred from the email domain of the issuer (e.g. a Google - // service account). - // - // Example: https://www.googleapis.com/oauth2/v1/certs - string jwks_uri = 3; - - // The list of JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences will - // be accepted. When this setting is absent, only JWTs with audience - // "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]" - // will be accepted. For example, if no audiences are in the setting, - // LibraryService API will only accept JWTs with the following audience - // "https://library-example.googleapis.com/google.example.library.v1.LibraryService". - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com - string audiences = 4; - - // Redirect URL if JWT token is required but not present or is expired. - // Implement authorizationUrl of securityDefinitions in OpenAPI spec. - string authorization_url = 5; -} - -// OAuth scopes are a way to define data and permissions on data. For example, -// there are scopes defined for "Read-only access to Google Calendar" and -// "Access to Cloud Platform". Users can consent to a scope for an application, -// giving it permission to access that data on their behalf. -// -// OAuth scope specifications should be fairly coarse grained; a user will need -// to see and understand the text description of what your scope means. -// -// In most cases: use one or at most two OAuth scopes for an entire family of -// products. If your product has multiple APIs, you should probably be sharing -// the OAuth scope across all of those APIs. -// -// When you need finer grained OAuth consent screens: talk with your product -// management about how developers will use them in practice. -// -// Please note that even though each of the canonical scopes is enough for a -// request to be accepted and passed to the backend, a request can still fail -// due to the backend requiring additional scopes or permissions. -message OAuthRequirements { - // The list of publicly documented OAuth scopes that are allowed access. An - // OAuth token containing any of these scopes will be accepted. - // - // Example: - // - // canonical_scopes: https://www.googleapis.com/auth/calendar, - // https://www.googleapis.com/auth/calendar.read - string canonical_scopes = 1; -} - -// User-defined authentication requirements, including support for -// [JSON Web Token -// (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). -message AuthRequirement { - // [id][google.api.AuthProvider.id] from authentication provider. - // - // Example: - // - // provider_id: bookstore_auth - string provider_id = 1; - - // NOTE: This will be deprecated soon, once AuthProvider.audiences is - // implemented and accepted in all the runtime components. - // - // The list of JWT - // [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). - // that are allowed to access. A JWT containing any of these audiences will - // be accepted. When this setting is absent, only JWTs with audience - // "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]" - // will be accepted. For example, if no audiences are in the setting, - // LibraryService API will only accept JWTs with the following audience - // "https://library-example.googleapis.com/google.example.library.v1.LibraryService". - // - // Example: - // - // audiences: bookstore_android.apps.googleusercontent.com, - // bookstore_web.apps.googleusercontent.com - string audiences = 2; -} diff --git a/third_party/google/api/backend.proto b/third_party/google/api/backend.proto deleted file mode 100644 index 26a16ef41..000000000 --- a/third_party/google/api/backend.proto +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "BackendProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// `Backend` defines the backend configuration for a service. -message Backend { - // A list of API backend rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated BackendRule rules = 1; -} - -// A backend rule provides configuration for an individual API element. -message BackendRule { - // Path Translation specifies how to combine the backend address with the - // request path in order to produce the appropriate forwarding URL for the - // request. - // - // Path Translation is applicable only to HTTP-based backends. Backends which - // do not accept requests over HTTP/HTTPS should leave `path_translation` - // unspecified. - enum PathTranslation { - PATH_TRANSLATION_UNSPECIFIED = 0; - - // Use the backend address as-is, with no modification to the path. If the - // URL pattern contains variables, the variable names and values will be - // appended to the query string. If a query string parameter and a URL - // pattern variable have the same name, this may result in duplicate keys in - // the query string. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: https://example.cloudfunctions.net/getUser - // - // Requests to the following request paths will call the backend at the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // https://example.cloudfunctions.net/getUser?cid=widgetworks&uid=johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // https://example.cloudfunctions.net/getUser?timezone=EST&cid=widgetworks&uid=johndoe - CONSTANT_ADDRESS = 1; - - // The request path will be appended to the backend address. - // - // # Examples - // - // Given the following operation config: - // - // Method path: /api/company/{cid}/user/{uid} - // Backend address: https://example.appspot.com - // - // Requests to the following request paths will call the backend at the - // translated path: - // - // Request path: /api/company/widgetworks/user/johndoe - // Translated: - // https://example.appspot.com/api/company/widgetworks/user/johndoe - // - // Request path: /api/company/widgetworks/user/johndoe?timezone=EST - // Translated: - // https://example.appspot.com/api/company/widgetworks/user/johndoe?timezone=EST - APPEND_PATH_TO_ADDRESS = 2; - } - - // Selects the methods to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // The address of the API backend. - string address = 2; - - // The number of seconds to wait for a response from a request. The default - // deadline for gRPC is infinite (no deadline) and HTTP requests is 5 seconds. - double deadline = 3; - - // Minimum deadline in seconds needed for this method. Calls having deadline - // value lower than this will be rejected. - double min_deadline = 4; - - // The number of seconds to wait for the completion of a long running - // operation. The default is no deadline. - double operation_deadline = 5; - - PathTranslation path_translation = 6; - - // Authentication settings used by the backend. - // - // These are typically used to provide service management functionality to - // a backend served on a publicly-routable URL. The `authentication` - // details should match the authentication behavior used by the backend. - // - // For example, specifying `jwt_audience` implies that the backend expects - // authentication via a JWT. - oneof authentication { - // The JWT audience is used when generating a JWT id token for the backend. - string jwt_audience = 7; - } -} diff --git a/third_party/google/api/billing.proto b/third_party/google/api/billing.proto deleted file mode 100644 index 87c11e7ff..000000000 --- a/third_party/google/api/billing.proto +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/api/metric.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "BillingProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Billing related configuration of the service. -// -// The following example shows how to configure monitored resources and metrics -// for billing: -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located in. -// - key: /name -// description: The name of the branch. -// metrics: -// - name: library.googleapis.com/book/borrowed_count -// metric_kind: DELTA -// value_type: INT64 -// billing: -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/borrowed_count -message Billing { - // Configuration of a specific billing destination (Currently only support - // bill against consumer project). - message BillingDestination { - // The monitored resource type. The type must be defined in - // [Service.monitored_resources][google.api.Service.monitored_resources] section. - string monitored_resource = 1; - - // Names of the metrics to report to this billing destination. - // Each name must be defined in [Service.metrics][google.api.Service.metrics] section. - repeated string metrics = 2; - } - - // Billing configurations for sending metrics to the consumer project. - // There can be multiple consumer destinations per service, each one must have - // a different monitored resource type. A metric can be used in at most - // one consumer destination. - repeated BillingDestination consumer_destinations = 8; -} diff --git a/third_party/google/api/client.proto b/third_party/google/api/client.proto deleted file mode 100644 index 56f8664aa..000000000 --- a/third_party/google/api/client.proto +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "ClientProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // A definition of a client library method signature. - // - // In client libraries, each proto RPC corresponds to one or more methods - // which the end user is able to call, and calls the underlying RPC. - // Normally, this method receives a single argument (a struct or instance - // corresponding to the RPC request object). Defining this field will - // add one or more overloads providing flattened or simpler method signatures - // in some languages. - // - // The fields on the method signature are provided as a comma-separated - // string. - // - // For example, the proto RPC and annotation: - // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } - // - // Would add the following Java overload (in addition to the method accepting - // the request object): - // - // public final Subscription createSubscription(String name, String topic) - // - // The following backwards-compatibility guidelines apply: - // - // * Adding this annotation to an unannotated method is backwards - // compatible. - // * Adding this annotation to a method which already has existing - // method signature annotations is backwards compatible if and only if - // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is - // a breaking change. - // * Re-ordering existing method signature annotations is a breaking - // change. - repeated string method_signature = 1051; -} - -extend google.protobuf.ServiceOptions { - // The hostname for this service. - // This should be specified with no prefix or protocol. - // - // Example: - // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } - string default_host = 1049; - - // OAuth scopes needed for the client. - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } - // - // If there is more than one scope, use a comma-separated string: - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } - string oauth_scopes = 1050; -} diff --git a/third_party/google/api/config_change.proto b/third_party/google/api/config_change.proto deleted file mode 100644 index c36764a5a..000000000 --- a/third_party/google/api/config_change.proto +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/configchange;configchange"; -option java_multiple_files = true; -option java_outer_classname = "ConfigChangeProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Output generated from semantically comparing two versions of a service -// configuration. -// -// Includes detailed information about a field that have changed with -// applicable advice about potential consequences for the change, such as -// backwards-incompatibility. -message ConfigChange { - // Object hierarchy path to the change, with levels separated by a '.' - // character. For repeated fields, an applicable unique identifier field is - // used for the index (usually selector, name, or id). For maps, the term - // 'key' is used. If the field has no unique identifier, the numeric index - // is used. - // Examples: - // - visibility.rules[selector=="google.LibraryService.ListBooks"].restriction - // - quota.metric_rules[selector=="google"].metric_costs[key=="reads"].value - // - logging.producer_destinations[0] - string element = 1; - - // Value of the changed object in the old Service configuration, - // in JSON format. This field will not be populated if ChangeType == ADDED. - string old_value = 2; - - // Value of the changed object in the new Service configuration, - // in JSON format. This field will not be populated if ChangeType == REMOVED. - string new_value = 3; - - // The type for this change, either ADDED, REMOVED, or MODIFIED. - ChangeType change_type = 4; - - // Collection of advice provided for this change, useful for determining the - // possible impact of this change. - repeated Advice advices = 5; -} - -// Generated advice about this change, used for providing more -// information about how a change will affect the existing service. -message Advice { - // Useful description for why this advice was applied and what actions should - // be taken to mitigate any implied risks. - string description = 2; -} - -// Classifies set of possible modifications to an object in the service -// configuration. -enum ChangeType { - // No value was provided. - CHANGE_TYPE_UNSPECIFIED = 0; - - // The changed object exists in the 'new' service configuration, but not - // in the 'old' service configuration. - ADDED = 1; - - // The changed object exists in the 'old' service configuration, but not - // in the 'new' service configuration. - REMOVED = 2; - - // The changed object exists in both service configurations, but its value - // is different. - MODIFIED = 3; -} diff --git a/third_party/google/api/consumer.proto b/third_party/google/api/consumer.proto deleted file mode 100644 index 0facc2eb1..000000000 --- a/third_party/google/api/consumer.proto +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2016 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "ConsumerProto"; -option java_package = "com.google.api"; - -// A descriptor for defining project properties for a service. One service may -// have many consumer projects, and the service may want to behave differently -// depending on some properties on the project. For example, a project may be -// associated with a school, or a business, or a government agency, a business -// type property on the project may affect how a service responds to the client. -// This descriptor defines which properties are allowed to be set on a project. -// -// Example: -// -// project_properties: -// properties: -// - name: NO_WATERMARK -// type: BOOL -// description: Allows usage of the API without watermarks. -// - name: EXTENDED_TILE_CACHE_PERIOD -// type: INT64 -message ProjectProperties { - // List of per consumer project-specific properties. - repeated Property properties = 1; -} - -// Defines project properties. -// -// API services can define properties that can be assigned to consumer projects -// so that backends can perform response customization without having to make -// additional calls or maintain additional storage. For example, Maps API -// defines properties that controls map tile cache period, or whether to embed a -// watermark in a result. -// -// These values can be set via API producer console. Only API providers can -// define and set these properties. -message Property { - // Supported data type of the property values - enum PropertyType { - // The type is unspecified, and will result in an error. - UNSPECIFIED = 0; - - // The type is `int64`. - INT64 = 1; - - // The type is `bool`. - BOOL = 2; - - // The type is `string`. - STRING = 3; - - // The type is 'double'. - DOUBLE = 4; - } - - // The name of the property (a.k.a key). - string name = 1; - - // The type of this property. - PropertyType type = 2; - - // The description of the property - string description = 3; -} diff --git a/third_party/google/api/context.proto b/third_party/google/api/context.proto deleted file mode 100644 index 2d330f6f2..000000000 --- a/third_party/google/api/context.proto +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "ContextProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// `Context` defines which contexts an API requests. -// -// Example: -// -// context: -// rules: -// - selector: "*" -// requested: -// - google.rpc.context.ProjectContext -// - google.rpc.context.OriginContext -// -// The above specifies that all methods in the API request -// `google.rpc.context.ProjectContext` and -// `google.rpc.context.OriginContext`. -// -// Available context types are defined in package -// `google.rpc.context`. -// -// This also provides mechanism to whitelist any protobuf message extension that -// can be sent in grpc metadata using “x-goog-ext--bin” and -// “x-goog-ext--jspb” format. For example, list any service -// specific protobuf types that can appear in grpc metadata as follows in your -// yaml file: -// -// Example: -// -// context: -// rules: -// - selector: "google.example.library.v1.LibraryService.CreateBook" -// allowed_request_extensions: -// - google.foo.v1.NewExtension -// allowed_response_extensions: -// - google.foo.v1.NewExtension -// -// You can also specify extension ID instead of fully qualified extension name -// here. -message Context { - // A list of RPC context rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated ContextRule rules = 1; -} - -// A context rule provides information about the context for an individual API -// element. -message ContextRule { - // Selects the methods to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // A list of full type names of requested contexts. - repeated string requested = 2; - - // A list of full type names of provided contexts. - repeated string provided = 3; - - // A list of full type names or extension IDs of extensions allowed in grpc - // side channel from client to backend. - repeated string allowed_request_extensions = 4; - - // A list of full type names or extension IDs of extensions allowed in grpc - // side channel from backend to client. - repeated string allowed_response_extensions = 5; -} diff --git a/third_party/google/api/control.proto b/third_party/google/api/control.proto deleted file mode 100644 index e769f9783..000000000 --- a/third_party/google/api/control.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "ControlProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Selects and configures the service controller used by the service. The -// service controller handles features like abuse, quota, billing, logging, -// monitoring, etc. -message Control { - // The service control environment to use. If empty, no control plane - // feature (like quota and billing) will be enabled. - string environment = 1; -} diff --git a/third_party/google/api/distribution.proto b/third_party/google/api/distribution.proto deleted file mode 100644 index ee45d9dcd..000000000 --- a/third_party/google/api/distribution.proto +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/distribution;distribution"; -option java_multiple_files = true; -option java_outer_classname = "DistributionProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// `Distribution` contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those values -// across a set of buckets. -// -// The summary statistics are the count, mean, sum of the squared deviation from -// the mean, the minimum, and the maximum of the set of population of values. -// The histogram is based on a sequence of buckets and gives a count of values -// that fall into each bucket. The boundaries of the buckets are given either -// explicitly or by formulas for buckets of fixed or exponentially increasing -// widths. -// -// Although it is not forbidden, it is generally a bad idea to include -// non-finite values (infinities or NaNs) in the population of values, as this -// will render the `mean` and `sum_of_squared_deviation` fields meaningless. -message Distribution { - // The range of the population values. - message Range { - // The minimum of the population values. - double min = 1; - - // The maximum of the population values. - double max = 2; - } - - // `BucketOptions` describes the bucket boundaries used to create a histogram - // for the distribution. The buckets can be in a linear sequence, an - // exponential sequence, or each bucket can be specified explicitly. - // `BucketOptions` does not include the number of values in each bucket. - // - // A bucket has an inclusive lower bound and exclusive upper bound for the - // values that are counted for that bucket. The upper bound of a bucket must - // be strictly greater than the lower bound. The sequence of N buckets for a - // distribution consists of an underflow bucket (number 0), zero or more - // finite buckets (number 1 through N - 2) and an overflow bucket (number N - - // 1). The buckets are contiguous: the lower bound of bucket i (i > 0) is the - // same as the upper bound of bucket i - 1. The buckets span the whole range - // of finite values: lower bound of the underflow bucket is -infinity and the - // upper bound of the overflow bucket is +infinity. The finite buckets are - // so-called because both bounds are finite. - message BucketOptions { - // Specifies a linear sequence of buckets that all have the same width - // (except overflow and underflow). Each bucket represents a constant - // absolute uncertainty on the specific value in the bucket. - // - // There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the - // following boundaries: - // - // Upper bound (0 <= i < N-1): offset + (width * i). - // Lower bound (1 <= i < N): offset + (width * (i - 1)). - message Linear { - // Must be greater than 0. - int32 num_finite_buckets = 1; - - // Must be greater than 0. - double width = 2; - - // Lower bound of the first bucket. - double offset = 3; - } - - // Specifies an exponential sequence of buckets that have a width that is - // proportional to the value of the lower bound. Each bucket represents a - // constant relative uncertainty on a specific value in the bucket. - // - // There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the - // following boundaries: - // - // Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). - // Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). - message Exponential { - // Must be greater than 0. - int32 num_finite_buckets = 1; - - // Must be greater than 1. - double growth_factor = 2; - - // Must be greater than 0. - double scale = 3; - } - - // Specifies a set of buckets with arbitrary widths. - // - // There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following - // boundaries: - // - // Upper bound (0 <= i < N-1): bounds[i] - // Lower bound (1 <= i < N); bounds[i - 1] - // - // The `bounds` field must contain at least one element. If `bounds` has - // only one element, then there are no finite buckets, and that single - // element is the common boundary of the overflow and underflow buckets. - message Explicit { - // The values must be monotonically increasing. - repeated double bounds = 1; - } - - // Exactly one of these three fields must be set. - oneof options { - // The linear bucket. - Linear linear_buckets = 1; - - // The exponential buckets. - Exponential exponential_buckets = 2; - - // The explicit buckets. - Explicit explicit_buckets = 3; - } - } - - // Exemplars are example points that may be used to annotate aggregated - // distribution values. They are metadata that gives information about a - // particular value added to a Distribution bucket, such as a trace ID that - // was active when a value was added. They may contain further information, - // such as a example values and timestamps, origin, etc. - message Exemplar { - // Value of the exemplar point. This value determines to which bucket the - // exemplar belongs. - double value = 1; - - // The observation (sampling) time of the above value. - google.protobuf.Timestamp timestamp = 2; - - // Contextual information about the example value. Examples are: - // - // Trace: type.googleapis.com/google.monitoring.v3.SpanContext - // - // Literal string: type.googleapis.com/google.protobuf.StringValue - // - // Labels dropped during aggregation: - // type.googleapis.com/google.monitoring.v3.DroppedLabels - // - // There may be only a single attachment of any given message type in a - // single exemplar, and this is enforced by the system. - repeated google.protobuf.Any attachments = 3; - } - - // The number of values in the population. Must be non-negative. This value - // must equal the sum of the values in `bucket_counts` if a histogram is - // provided. - int64 count = 1; - - // The arithmetic mean of the values in the population. If `count` is zero - // then this field must be zero. - double mean = 2; - - // The sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If `count` is zero then this field must be zero. - double sum_of_squared_deviation = 3; - - // If specified, contains the range of the population values. The field - // must not be present if the `count` is zero. - Range range = 4; - - // Defines the histogram bucket boundaries. If the distribution does not - // contain a histogram, then omit this field. - BucketOptions bucket_options = 6; - - // The number of values in each bucket of the histogram, as described in - // `bucket_options`. If the distribution does not have a histogram, then omit - // this field. If there is a histogram, then the sum of the values in - // `bucket_counts` must equal the value in the `count` field of the - // distribution. - // - // If present, `bucket_counts` should contain N values, where N is the number - // of buckets specified in `bucket_options`. If you supply fewer than N - // values, the remaining values are assumed to be 0. - // - // The order of the values in `bucket_counts` follows the bucket numbering - // schemes described for the three bucket types. The first value must be the - // count for the underflow bucket (number 0). The next N-2 values are the - // counts for the finite buckets (number 1 through N-2). The N'th value in - // `bucket_counts` is the count for the overflow bucket (number N-1). - repeated int64 bucket_counts = 7; - - // Must be in increasing order of `value` field. - repeated Exemplar exemplars = 10; -} diff --git a/third_party/google/api/documentation.proto b/third_party/google/api/documentation.proto deleted file mode 100644 index 74cbf67e9..000000000 --- a/third_party/google/api/documentation.proto +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "DocumentationProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// `Documentation` provides the information for describing a service. -// -// Example: -//
documentation:
-//   summary: >
-//     The Google Calendar API gives access
-//     to most calendar features.
-//   pages:
-//   - name: Overview
-//     content: (== include google/foo/overview.md ==)
-//   - name: Tutorial
-//     content: (== include google/foo/tutorial.md ==)
-//     subpages;
-//     - name: Java
-//       content: (== include google/foo/tutorial_java.md ==)
-//   rules:
-//   - selector: google.calendar.Calendar.Get
-//     description: >
-//       ...
-//   - selector: google.calendar.Calendar.Put
-//     description: >
-//       ...
-// 
-// Documentation is provided in markdown syntax. In addition to -// standard markdown features, definition lists, tables and fenced -// code blocks are supported. Section headers can be provided and are -// interpreted relative to the section nesting of the context where -// a documentation fragment is embedded. -// -// Documentation from the IDL is merged with documentation defined -// via the config at normalization time, where documentation provided -// by config rules overrides IDL provided. -// -// A number of constructs specific to the API platform are supported -// in documentation text. -// -// In order to reference a proto element, the following -// notation can be used: -//
[fully.qualified.proto.name][]
-// To override the display text used for the link, this can be used: -//
[display text][fully.qualified.proto.name]
-// Text can be excluded from doc using the following notation: -//
(-- internal comment --)
-// -// A few directives are available in documentation. Note that -// directives must appear on a single line to be properly -// identified. The `include` directive includes a markdown file from -// an external source: -//
(== include path/to/file ==)
-// The `resource_for` directive marks a message to be the resource of -// a collection in REST view. If it is not specified, tools attempt -// to infer the resource from the operations in a collection: -//
(== resource_for v1.shelves.books ==)
-// The directive `suppress_warning` does not directly affect documentation -// and is documented together with service config validation. -message Documentation { - // A short summary of what the service does. Can only be provided by - // plain text. - string summary = 1; - - // The top level pages for the documentation set. - repeated Page pages = 5; - - // A list of documentation rules that apply to individual API elements. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated DocumentationRule rules = 3; - - // The URL to the root of documentation. - string documentation_root_url = 4; - - // Declares a single overview page. For example: - //
documentation:
-  //   summary: ...
-  //   overview: (== include overview.md ==)
-  // 
- // This is a shortcut for the following declaration (using pages style): - //
documentation:
-  //   summary: ...
-  //   pages:
-  //   - name: Overview
-  //     content: (== include overview.md ==)
-  // 
- // Note: you cannot specify both `overview` field and `pages` field. - string overview = 2; -} - -// A documentation rule provides information about individual API elements. -message DocumentationRule { - // The selector is a comma-separated list of patterns. Each pattern is a - // qualified name of the element which may end in "*", indicating a wildcard. - // Wildcards are only allowed at the end and for a whole component of the - // qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". A - // wildcard will match one or more components. To specify a default for all - // applicable elements, the whole pattern "*" is used. - string selector = 1; - - // Description of the selected API(s). - string description = 2; - - // Deprecation description of the selected element(s). It can be provided if - // an element is marked as `deprecated`. - string deprecation_description = 3; -} - -// Represents a documentation page. A page can contain subpages to represent -// nested documentation set structure. -message Page { - // The name of the page. It will be used as an identity of the page to - // generate URI of the page, text of the link to this page in navigation, - // etc. The full page name (start from the root page name to this page - // concatenated with `.`) can be used as reference to the page in your - // documentation. For example: - //
pages:
-  // - name: Tutorial
-  //   content: (== include tutorial.md ==)
-  //   subpages:
-  //   - name: Java
-  //     content: (== include tutorial_java.md ==)
-  // 
- // You can reference `Java` page using Markdown reference link syntax: - // `[Java][Tutorial.Java]`. - string name = 1; - - // The Markdown content of the page. You can use (== include {path} - // ==) to include content from a Markdown file. - string content = 2; - - // Subpages of this page. The order of subpages specified here will be - // honored in the generated docset. - repeated Page subpages = 3; -} diff --git a/third_party/google/api/endpoint.proto b/third_party/google/api/endpoint.proto deleted file mode 100644 index 2077334d2..000000000 --- a/third_party/google/api/endpoint.proto +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "EndpointProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// `Endpoint` describes a network endpoint that serves a set of APIs. -// A service may expose any number of endpoints, and all endpoints share the -// same service configuration, such as quota configuration and monitoring -// configuration. -// -// Example service configuration: -// -// name: library-example.googleapis.com -// endpoints: -// # Below entry makes 'google.example.library.v1.Library' -// # API be served from endpoint address library-example.googleapis.com. -// # It also allows HTTP OPTIONS calls to be passed to the backend, for -// # it to decide whether the subsequent cross-origin request is -// # allowed to proceed. -// - name: library-example.googleapis.com -// allow_cors: true -message Endpoint { - // The canonical name of this endpoint. - string name = 1; - - // DEPRECATED: This field is no longer supported. Instead of using aliases, - // please specify multiple [google.api.Endpoint][google.api.Endpoint] for each of the intended - // aliases. - // - // Additional names that this endpoint will be hosted on. - repeated string aliases = 2 [deprecated = true]; - - // The list of features enabled on this endpoint. - repeated string features = 4; - - // The specification of an Internet routable address of API frontend that will - // handle requests to this [API - // Endpoint](https://cloud.google.com/apis/design/glossary). It should be - // either a valid IPv4 address or a fully-qualified domain name. For example, - // "8.8.8.8" or "myservice.appspot.com". - string target = 101; - - // Allowing - // [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka - // cross-domain traffic, would allow the backends served from this endpoint to - // receive and respond to HTTP OPTIONS requests. The response will be used by - // the browser to determine whether the subsequent cross-origin request is - // allowed to proceed. - bool allow_cors = 5; -} diff --git a/third_party/google/api/experimental/authorization_config.proto b/third_party/google/api/experimental/authorization_config.proto deleted file mode 100644 index 4fb24ecdb..000000000 --- a/third_party/google/api/experimental/authorization_config.proto +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api;api"; -option java_multiple_files = true; -option java_outer_classname = "AuthorizationConfigProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Configuration of authorization. -// -// This section determines the authorization provider, if unspecified, then no -// authorization check will be done. -// -// Example: -// -// experimental: -// authorization: -// provider: firebaserules.googleapis.com -message AuthorizationConfig { - // The name of the authorization provider, such as - // firebaserules.googleapis.com. - string provider = 1; -} diff --git a/third_party/google/api/experimental/experimental.proto b/third_party/google/api/experimental/experimental.proto deleted file mode 100644 index 6f40d71f1..000000000 --- a/third_party/google/api/experimental/experimental.proto +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/api/annotations.proto"; -import "google/api/experimental/authorization_config.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api;api"; -option java_multiple_files = true; -option java_outer_classname = "ExperimentalProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Experimental service configuration. These configuration options can -// only be used by whitelisted users. -message Experimental { - // Authorization configuration. - AuthorizationConfig authorization = 8; -} diff --git a/third_party/google/api/expr/artman_cel.yaml b/third_party/google/api/expr/artman_cel.yaml deleted file mode 100644 index 89d2d28b2..000000000 --- a/third_party/google/api/expr/artman_cel.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# This file is auto-generated based on service config and could be -# incorrect. The API producer can manually edit it. Remove all the FIXMEs -# before sending this file out for review. -common: - api_name: cel - api_version: v1alpha1 - organization_name: google-cloud - proto_deps: - - name: google-common-protos - src_proto_paths: - - v1alpha1 - service_yaml: cel.yaml - gapic_yaml: v1alpha1/cel_gapic.yaml -artifacts: -- name: gapic_config - type: GAPIC_CONFIG -- name: java_gapic - type: GAPIC - language: JAVA -- name: python_gapic - type: GAPIC - language: PYTHON -- name: nodejs_gapic - type: GAPIC - language: NODEJS -- name: php_gapic - type: GAPIC - language: PHP -- name: go_gapic - type: GAPIC - language: GO -- name: ruby_gapic - type: GAPIC - language: RUBY -- name: csharp_gapic - type: GAPIC - language: CSHARP diff --git a/third_party/google/api/expr/cel.yaml b/third_party/google/api/expr/cel.yaml deleted file mode 100644 index bbe7fbde1..000000000 --- a/third_party/google/api/expr/cel.yaml +++ /dev/null @@ -1,61 +0,0 @@ -type: google.api.Service -config_version: 3 -name: cel.googleapis.com -title: Common Expression Language - -apis: -- name: google.api.expr.v1alpha1.ConformanceService -- name: google.api.expr.v1alpha1.CelService - -documentation: - summary: Defines common types for the Common Expression Language. - overview: |- - # Common Expression Language - - The Common Expression Language (CEL) implements common semantics for - expression evaluation, enabling different applications to more easily - interoperate. - - Key Applications - - * Security policy: organization have complex infrastructure and need - common tooling to reason about the system as a whole * Protocols: - expressions are a useful data type and require interoperability across - programming languages and platforms. - - - - Guiding philosophy: - - 1. Keep it small & fast. * CEL evaluates in linear time, is mutation - free, and not Turing-complete. This limitation is a feature of the language - design, which allows the implementation to evaluate orders of magnitude - faster than equivalently sandboxed JavaScript. 2. Make it extensible. * - CEL is designed to be embedded in applications, and allows for extensibility - via its context which allows for functions and data to be provided by the - software that embeds it. 3. Developer-friendly * The language is - approachable to developers. The initial spec was based on the experience of - developing Firebase Rules and usability testing many prior iterations. * - The library itself and accompanying toolings should be easy to adopt by - teams that seek to integrate CEL into their platforms. - - The required components of a system that supports CEL are: - - * The textual representation of an expression as written by a developer. - It is of similar syntax of expressions in C/C++/Java/JavaScript * A binary - representation of an expression. It is an abstract syntax tree (AST). * A - compiler library that converts the textual representation to the binary - representation. This can be done ahead of time (in the control plane) or - just before evaluation (in the data plane). * A context containing one or - more typed variables, often protobuf messages. Most use-case will use - attribute_context.proto * An evaluator library that takes the binary - format in the context and produces a result, usually a Boolean. - - Example of boolean conditions and object construction: - - ``` c // Condition account.balance >= transaction.withdrawal || - (account.overdraftProtection && account.overdraftLimit >= - transaction.withdrawal - account.balance) - - // Object construction common.GeoPoint{ latitude: 10.0, longitude: -5.5 } - ``` diff --git a/third_party/google/api/expr/v1alpha1/cel_gapic.yaml b/third_party/google/api/expr/v1alpha1/cel_gapic.yaml deleted file mode 100644 index 68a3fd0a2..000000000 --- a/third_party/google/api/expr/v1alpha1/cel_gapic.yaml +++ /dev/null @@ -1,246 +0,0 @@ -type: com.google.api.codegen.ConfigProto -config_schema_version: 1.0.0 -# The settings of generated code in a specific language. -language_settings: - java: - package_name: com.google.cloud.api.expr.v1alpha1 - python: - package_name: google.cloud.api.expr_v1alpha1.gapic - go: - package_name: cloud.google.com/go/expr/apiv1alpha1 - csharp: - package_name: Google.Api.Expr.V1alpha1 - ruby: - package_name: Google::Cloud::Api::Expr::V1alpha1 - php: - package_name: Google\Cloud\Api\Expr\V1alpha1 - nodejs: - package_name: expr.v1alpha1 -# A list of API interface configurations. -interfaces: - # The fully qualified name of the API interface. -- name: google.api.expr.v1alpha1.CelService - # A list of resource collection configurations. - # Consists of a name_pattern and an entity_name. - # The name_pattern is a pattern to describe the names of the resources of this - # collection, using the platform's conventions for URI patterns. A generator - # may use this to generate methods to compose and decompose such names. The - # pattern should use named placeholders as in `shelves/{shelf}/books/{book}`; - # those will be taken as hints for the parameter names of the generated - # methods. If empty, no name methods are generated. - # The entity_name is the name to be used as a basis for generated methods and - # classes. - collections: [] - # Definition for retryable codes. - retry_codes_def: - - name: idempotent - retry_codes: - - UNAVAILABLE - - name: non_idempotent - retry_codes: [] - # Definition for retry/backoff parameters. - retry_params_def: - - name: default - initial_retry_delay_millis: 100 - retry_delay_multiplier: 1.3 - max_retry_delay_millis: 60000 - initial_rpc_timeout_millis: 20000 - rpc_timeout_multiplier: 1 - max_rpc_timeout_millis: 20000 - total_timeout_millis: 600000 - # A list of method configurations. - # Common properties: - # - # name - The simple name of the method. - # - # flattening - Specifies the configuration for parameter flattening. - # Describes the parameter groups for which a generator should produce method - # overloads which allow a client to directly pass request message fields as - # method parameters. This information may or may not be used, depending on - # the target language. - # Consists of groups, which each represent a list of parameters to be - # flattened. Each parameter listed must be a field of the request message. - # - # required_fields - Fields that are always required for a request to be - # valid. - # - # resource_name_treatment - An enum that specifies how to treat the resource - # name formats defined in the field_name_patterns and - # response_field_name_patterns fields. - # UNSET: default value - # NONE: the collection configs will not be used by the generated code. - # VALIDATE: string fields will be validated by the client against the - # specified resource name formats. - # STATIC_TYPES: the client will use generated types for resource names. - # - # page_streaming - Specifies the configuration for paging. - # Describes information for generating a method which transforms a paging - # list RPC into a stream of resources. - # Consists of a request and a response. - # The request specifies request information of the list method. It defines - # which fields match the paging pattern in the request. The request consists - # of a page_size_field and a token_field. The page_size_field is the name of - # the optional field specifying the maximum number of elements to be - # returned in the response. The token_field is the name of the field in the - # request containing the page token. - # The response specifies response information of the list method. It defines - # which fields match the paging pattern in the response. The response - # consists of a token_field and a resources_field. The token_field is the - # name of the field in the response containing the next page token. The - # resources_field is the name of the field in the response containing the - # list of resources belonging to the page. - # - # retry_codes_name - Specifies the configuration for retryable codes. The - # name must be defined in interfaces.retry_codes_def. - # - # retry_params_name - Specifies the configuration for retry/backoff - # parameters. The name must be defined in interfaces.retry_params_def. - # - # field_name_patterns - Maps the field name of the request type to - # entity_name of interfaces.collections. - # Specifies the string pattern that the field must follow. - # - # timeout_millis - Specifies the default timeout for a non-retrying call. If - # the call is retrying, refer to retry_params_name instead. - methods: - - name: Parse - flattening: - groups: - - parameters: - - cel_source - required_fields: - - cel_source - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 60000 - - name: Check - flattening: - groups: - - parameters: - - parsed_expr - required_fields: - - parsed_expr - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 60000 - - name: Eval - flattening: - groups: - - parameters: - required_fields: - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 60000 - # The fully qualified name of the API interface. -- name: google.api.expr.v1alpha1.ConformanceService - # A list of resource collection configurations. - # Consists of a name_pattern and an entity_name. - # The name_pattern is a pattern to describe the names of the resources of this - # collection, using the platform's conventions for URI patterns. A generator - # may use this to generate methods to compose and decompose such names. The - # pattern should use named placeholders as in `shelves/{shelf}/books/{book}`; - # those will be taken as hints for the parameter names of the generated - # methods. If empty, no name methods are generated. - # The entity_name is the name to be used as a basis for generated methods and - # classes. - collections: [] - # Definition for retryable codes. - retry_codes_def: - - name: idempotent - retry_codes: - - UNAVAILABLE - - name: non_idempotent - retry_codes: [] - # Definition for retry/backoff parameters. - retry_params_def: - - name: default - initial_retry_delay_millis: 100 - retry_delay_multiplier: 1.3 - max_retry_delay_millis: 60000 - initial_rpc_timeout_millis: 20000 - rpc_timeout_multiplier: 1 - max_rpc_timeout_millis: 20000 - total_timeout_millis: 600000 - # A list of method configurations. - # Common properties: - # - # name - The simple name of the method. - # - # flattening - Specifies the configuration for parameter flattening. - # Describes the parameter groups for which a generator should produce method - # overloads which allow a client to directly pass request message fields as - # method parameters. This information may or may not be used, depending on - # the target language. - # Consists of groups, which each represent a list of parameters to be - # flattened. Each parameter listed must be a field of the request message. - # - # required_fields - Fields that are always required for a request to be - # valid. - # - # resource_name_treatment - An enum that specifies how to treat the resource - # name formats defined in the field_name_patterns and - # response_field_name_patterns fields. - # UNSET: default value - # NONE: the collection configs will not be used by the generated code. - # VALIDATE: string fields will be validated by the client against the - # specified resource name formats. - # STATIC_TYPES: the client will use generated types for resource names. - # - # page_streaming - Specifies the configuration for paging. - # Describes information for generating a method which transforms a paging - # list RPC into a stream of resources. - # Consists of a request and a response. - # The request specifies request information of the list method. It defines - # which fields match the paging pattern in the request. The request consists - # of a page_size_field and a token_field. The page_size_field is the name of - # the optional field specifying the maximum number of elements to be - # returned in the response. The token_field is the name of the field in the - # request containing the page token. - # The response specifies response information of the list method. It defines - # which fields match the paging pattern in the response. The response - # consists of a token_field and a resources_field. The token_field is the - # name of the field in the response containing the next page token. The - # resources_field is the name of the field in the response containing the - # list of resources belonging to the page. - # - # retry_codes_name - Specifies the configuration for retryable codes. The - # name must be defined in interfaces.retry_codes_def. - # - # retry_params_name - Specifies the configuration for retry/backoff - # parameters. The name must be defined in interfaces.retry_params_def. - # - # field_name_patterns - Maps the field name of the request type to - # entity_name of interfaces.collections. - # Specifies the string pattern that the field must follow. - # - # timeout_millis - Specifies the default timeout for a non-retrying call. If - # the call is retrying, refer to retry_params_name instead. - methods: - - name: Parse - flattening: - groups: - - parameters: - - cel_source - required_fields: - - cel_source - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 60000 - - name: Check - flattening: - groups: - - parameters: - - parsed_expr - required_fields: - - parsed_expr - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 60000 - - name: Eval - flattening: - groups: - - parameters: - required_fields: - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 60000 diff --git a/third_party/google/api/expr/v1alpha1/cel_service.proto b/third_party/google/api/expr/v1alpha1/cel_service.proto deleted file mode 100644 index 0bf649ff5..000000000 --- a/third_party/google/api/expr/v1alpha1/cel_service.proto +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1alpha1; - -import "google/api/expr/v1alpha1/conformance_service.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1alpha1;expr"; -option java_multiple_files = true; -option java_outer_classname = "CelServiceProto"; -option java_package = "com.google.api.expr.v1alpha1"; - -// Access a CEL implementation from another process or machine. -// A CEL implementation is decomposed as a parser, a static checker, -// and an evaluator. Every CEL implementation is expected to provide -// a server for this API. The API will be used for conformance testing, -// utilities, and execution as a service. -service CelService { - // Transforms CEL source text into a parsed representation. - rpc Parse(ParseRequest) returns (ParseResponse) {} - - // Runs static checks on a parsed CEL representation and return - // an annotated representation, or a set of issues. - rpc Check(CheckRequest) returns (CheckResponse) {} - - // Evaluates a parsed or annotation CEL representation given - // values of external bindings. - rpc Eval(EvalRequest) returns (EvalResponse) {} -} diff --git a/third_party/google/api/expr/v1alpha1/checked.proto b/third_party/google/api/expr/v1alpha1/checked.proto deleted file mode 100644 index 60dd09e20..000000000 --- a/third_party/google/api/expr/v1alpha1/checked.proto +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1alpha1; - -import "google/api/expr/v1alpha1/syntax.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/struct.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1alpha1;expr"; -option java_multiple_files = true; -option java_outer_classname = "DeclProto"; -option java_package = "com.google.api.expr.v1alpha1"; - -// Protos for representing CEL declarations and typed checked expressions. - -// A CEL expression which has been successfully type checked. -message CheckedExpr { - // A map from expression ids to resolved references. - // - // The following entries are in this table: - // - // - An Ident or Select expression is represented here if it resolves to a - // declaration. For instance, if `a.b.c` is represented by - // `select(select(id(a), b), c)`, and `a.b` resolves to a declaration, - // while `c` is a field selection, then the reference is attached to the - // nested select expression (but not to the id or or the outer select). - // In turn, if `a` resolves to a declaration and `b.c` are field selections, - // the reference is attached to the ident expression. - // - Every Call expression has an entry here, identifying the function being - // called. - // - Every CreateStruct expression for a message has an entry, identifying - // the message. - map reference_map = 2; - - // A map from expression ids to types. - // - // Every expression node which has a type different than DYN has a mapping - // here. If an expression has type DYN, it is omitted from this map to save - // space. - map type_map = 3; - - // The source info derived from input that generated the parsed `expr` and - // any optimizations made during the type-checking pass. - SourceInfo source_info = 5; - - // The checked expression. Semantically equivalent to the parsed `expr`, but - // may have structural differences. - Expr expr = 4; -} - -// Represents a CEL type. -message Type { - // List type with typed elements, e.g. `list`. - message ListType { - // The element type. - Type elem_type = 1; - } - - // Map type with parameterized key and value types, e.g. `map`. - message MapType { - // The type of the key. - Type key_type = 1; - - // The type of the value. - Type value_type = 2; - } - - // Function type with result and arg types. - message FunctionType { - // Result type of the function. - Type result_type = 1; - - // Argument types of the function. - repeated Type arg_types = 2; - } - - // Application defined abstract type. - message AbstractType { - // The fully qualified name of this abstract type. - string name = 1; - - // Parameter types for this abstract type. - repeated Type parameter_types = 2; - } - - // CEL primitive types. - enum PrimitiveType { - // Unspecified type. - PRIMITIVE_TYPE_UNSPECIFIED = 0; - - // Boolean type. - BOOL = 1; - - // Int64 type. - // - // Proto-based integer values are widened to int64. - INT64 = 2; - - // Uint64 type. - // - // Proto-based unsigned integer values are widened to uint64. - UINT64 = 3; - - // Double type. - // - // Proto-based float values are widened to double values. - DOUBLE = 4; - - // String type. - STRING = 5; - - // Bytes type. - BYTES = 6; - } - - // Well-known protobuf types treated with first-class support in CEL. - enum WellKnownType { - // Unspecified type. - WELL_KNOWN_TYPE_UNSPECIFIED = 0; - - // Well-known protobuf.Any type. - // - // Any types are a polymorphic message type. During type-checking they are - // treated like `DYN` types, but at runtime they are resolved to a specific - // message type specified at evaluation time. - ANY = 1; - - // Well-known protobuf.Timestamp type, internally referenced as `timestamp`. - TIMESTAMP = 2; - - // Well-known protobuf.Duration type, internally referenced as `duration`. - DURATION = 3; - } - - // The kind of type. - oneof type_kind { - // Dynamic type. - google.protobuf.Empty dyn = 1; - - // Null value. - google.protobuf.NullValue null = 2; - - // Primitive types: `true`, `1u`, `-2.0`, `'string'`, `b'bytes'`. - PrimitiveType primitive = 3; - - // Wrapper of a primitive type, e.g. `google.protobuf.Int64Value`. - PrimitiveType wrapper = 4; - - // Well-known protobuf type such as `google.protobuf.Timestamp`. - WellKnownType well_known = 5; - - // Parameterized list with elements of `list_type`, e.g. `list`. - ListType list_type = 6; - - // Parameterized map with typed keys and values. - MapType map_type = 7; - - // Function type. - FunctionType function = 8; - - // Protocol buffer message type. - // - // The `message_type` string specifies the qualified message type name. For - // example, `google.plus.Profile`. - string message_type = 9; - - // Type param type. - // - // The `type_param` string specifies the type parameter name, e.g. `list` - // would be a `list_type` whose element type was a `type_param` type - // named `E`. - string type_param = 10; - - // Type type. - // - // The `type` value specifies the target type. e.g. int is type with a - // target type of `Primitive.INT`. - Type type = 11; - - // Error type. - // - // During type-checking if an expression is an error, its type is propagated - // as the `ERROR` type. This permits the type-checker to discover other - // errors present in the expression. - google.protobuf.Empty error = 12; - - // Abstract, application defined type. - AbstractType abstract_type = 14; - } -} - -// Represents a declaration of a named value or function. -// -// A declaration is part of the contract between the expression, the agent -// evaluating that expression, and the caller requesting evaluation. -message Decl { - // Identifier declaration which specifies its type and optional `Expr` value. - // - // An identifier without a value is a declaration that must be provided at - // evaluation time. An identifier with a value should resolve to a constant, - // but may be used in conjunction with other identifiers bound at evaluation - // time. - message IdentDecl { - // Required. The type of the identifier. - Type type = 1; - - // The constant value of the identifier. If not specified, the identifier - // must be supplied at evaluation time. - Constant value = 2; - - // Documentation string for the identifier. - string doc = 3; - } - - // Function declaration specifies one or more overloads which indicate the - // function's parameter types and return type, and may optionally specify a - // function definition in terms of CEL expressions. - // - // Functions have no observable side-effects (there may be side-effects like - // logging which are not observable from CEL). - message FunctionDecl { - // An overload indicates a function's parameter types and return type, and - // may optionally include a function body described in terms of - // [Expr][google.api.expr.v1alpha1.Expr] values. - // - // Functions overloads are declared in either a function or method - // call-style. For methods, the `params[0]` is the expected type of the - // target receiver. - // - // Overloads must have non-overlapping argument types after erasure of all - // parameterized type variables (similar as type erasure in Java). - message Overload { - // Required. Globally unique overload name of the function which reflects - // the function name and argument types. - // - // This will be used by a [Reference][google.api.expr.v1alpha1.Reference] - // to indicate the `overload_id` that was resolved for the function - // `name`. - string overload_id = 1; - - // List of function parameter [Type][google.api.expr.v1alpha1.Type] - // values. - // - // Param types are disjoint after generic type parameters have been - // replaced with the type `DYN`. Since the `DYN` type is compatible with - // any other type, this means that if `A` is a type parameter, the - // function types `int` and `int` are not disjoint. Likewise, - // `map` is not disjoint from `map`. - // - // When the `result_type` of a function is a generic type param, the - // type param name also appears as the `type` of on at least one params. - repeated Type params = 2; - - // The type param names associated with the function declaration. - // - // For example, `function ex(K key, map map) : V` would yield - // the type params of `K, V`. - repeated string type_params = 3; - - // Required. The result type of the function. For example, the operator - // `string.isEmpty()` would have `result_type` of `kind: BOOL`. - Type result_type = 4; - - // Whether the function is to be used in a method call-style `x.f(...)` - // of a function call-style `f(x, ...)`. - // - // For methods, the first parameter declaration, `params[0]` is the - // expected type of the target receiver. - bool is_instance_function = 5; - - // Documentation string for the overload. - string doc = 6; - } - - // Required. List of function overloads, must contain at least one overload. - repeated Overload overloads = 1; - } - - // The fully qualified name of the declaration. - // - // Declarations are organized in containers and this represents the full path - // to the declaration in its container, as in `google.api.expr.Decl`. - // - // Declarations used as - // [FunctionDecl.Overload][google.api.expr.v1alpha1.Decl.FunctionDecl.Overload] - // parameters may or may not have a name depending on whether the overload is - // function declaration or a function definition containing a result - // [Expr][google.api.expr.v1alpha1.Expr]. - string name = 1; - - // Required. The declaration kind. - oneof decl_kind { - // Identifier declaration. - IdentDecl ident = 2; - - // Function declaration. - FunctionDecl function = 3; - } -} - -// Describes a resolved reference to a declaration. -message Reference { - // The fully qualified name of the declaration. - string name = 1; - - // For references to functions, this is a list of `Overload.overload_id` - // values which match according to typing rules. - // - // If the list has more than one element, overload resolution among the - // presented candidates must happen at runtime because of dynamic types. The - // type checker attempts to narrow down this list as much as possible. - // - // Empty if this is not a reference to a - // [Decl.FunctionDecl][google.api.expr.v1alpha1.Decl.FunctionDecl]. - repeated string overload_id = 3; - - // For references to constants, this may contain the value of the - // constant if known at compile time. - Constant value = 4; -} diff --git a/third_party/google/api/expr/v1alpha1/conformance_service.proto b/third_party/google/api/expr/v1alpha1/conformance_service.proto deleted file mode 100644 index 7a9321a0e..000000000 --- a/third_party/google/api/expr/v1alpha1/conformance_service.proto +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1alpha1; - -import "google/api/expr/v1alpha1/checked.proto"; -import "google/api/expr/v1alpha1/eval.proto"; -import "google/api/expr/v1alpha1/syntax.proto"; -import "google/rpc/status.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1alpha1;expr"; -option java_multiple_files = true; -option java_outer_classname = "ConformanceServiceProto"; -option java_package = "com.google.api.expr.v1alpha1"; - -// Access a CEL implementation from another process or machine. -// A CEL implementation is decomposed as a parser, a static checker, -// and an evaluator. Every CEL implementation is expected to provide -// a server for this API. The API will be used for conformance testing -// and other utilities. -service ConformanceService { - // Transforms CEL source text into a parsed representation. - rpc Parse(ParseRequest) returns (ParseResponse) {} - - // Runs static checks on a parsed CEL representation and return - // an annotated representation, or a set of issues. - rpc Check(CheckRequest) returns (CheckResponse) {} - - // Evaluates a parsed or annotation CEL representation given - // values of external bindings. - rpc Eval(EvalRequest) returns (EvalResponse) {} -} - -// Request message for the Parse method. -message ParseRequest { - // Required. Source text in CEL syntax. - string cel_source = 1; - - // Tag for version of CEL syntax, for future use. - string syntax_version = 2; - - // File or resource for source text, used in - // [SourceInfo][google.api.expr.v1alpha1.SourceInfo]. - string source_location = 3; - - // Prevent macro expansion. See "Macros" in Language Defiinition. - bool disable_macros = 4; -} - -// Response message for the Parse method. -message ParseResponse { - // The parsed representation, or unset if parsing failed. - ParsedExpr parsed_expr = 1; - - // Any number of issues with [StatusDetails][] as the details. - repeated google.rpc.Status issues = 2; -} - -// Request message for the Check method. -message CheckRequest { - // Required. The parsed representation of the CEL program. - ParsedExpr parsed_expr = 1; - - // Declarations of types for external variables and functions. - // Required if program uses external variables or functions - // not in the default environment. - repeated Decl type_env = 2; - - // The protocol buffer context. See "Name Resolution" in the - // Language Definition. - string container = 3; - - // If true, use only the declarations in - // [type_env][google.api.expr.v1alpha1.CheckRequest.type_env]. If false - // (default), add declarations for the standard definitions to the type - // environment. See "Standard Definitions" in the Language Definition. - bool no_std_env = 4; -} - -// Response message for the Check method. -message CheckResponse { - // The annotated representation, or unset if checking failed. - CheckedExpr checked_expr = 1; - - // Any number of issues with [StatusDetails][] as the details. - repeated google.rpc.Status issues = 2; -} - -// Request message for the Eval method. -message EvalRequest { - // Required. Either the parsed or annotated representation of the CEL program. - oneof expr_kind { - // Evaluate based on the parsed representation. - ParsedExpr parsed_expr = 1; - - // Evaluate based on the checked representation. - CheckedExpr checked_expr = 2; - } - - // Bindings for the external variables. The types SHOULD be compatible - // with the type environment in - // [CheckRequest][google.api.expr.v1alpha1.CheckRequest], if checked. - map bindings = 3; - - // SHOULD be the same container as used in - // [CheckRequest][google.api.expr.v1alpha1.CheckRequest], if checked. - string container = 4; -} - -// Response message for the Eval method. -message EvalResponse { - // The execution result, or unset if execution couldn't start. - ExprValue result = 1; - - // Any number of issues with [StatusDetails][] as the details. - // Note that CEL execution errors are reified into - // [ExprValue][google.api.expr.v1alpha1.ExprValue]. Nevertheless, we'll allow - // out-of-band issues to be raised, which also makes the replies more regular. - repeated google.rpc.Status issues = 2; -} - -// Warnings or errors in service execution are represented by -// [google.rpc.Status][google.rpc.Status] messages, with the following message -// in the details field. -message IssueDetails { - // Severities of issues. - enum Severity { - // An unspecified severity. - SEVERITY_UNSPECIFIED = 0; - - // Deprecation issue for statements and method that may no longer be - // supported or maintained. - DEPRECATION = 1; - - // Warnings such as: unused variables. - WARNING = 2; - - // Errors such as: unmatched curly braces or variable redefinition. - ERROR = 3; - } - - // The severity of the issue. - Severity severity = 1; - - // Position in the source, if known. - SourcePosition position = 2; - - // Expression ID from [Expr][google.api.expr.v1alpha1.Expr], 0 if unknown. - int64 id = 3; -} diff --git a/third_party/google/api/expr/v1alpha1/eval.proto b/third_party/google/api/expr/v1alpha1/eval.proto deleted file mode 100644 index f516ba6bc..000000000 --- a/third_party/google/api/expr/v1alpha1/eval.proto +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1alpha1; - -import "google/api/expr/v1alpha1/value.proto"; -import "google/rpc/status.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1alpha1;expr"; -option java_multiple_files = true; -option java_outer_classname = "EvalProto"; -option java_package = "com.google.api.expr.v1alpha1"; - -// The state of an evaluation. -// -// Can represent an inital, partial, or completed state of evaluation. -message EvalState { - // A single evalution result. - message Result { - // The id of the expression this result if for. - int64 expr = 1; - - // The index in `values` of the resulting value. - int64 value = 2; - } - - // The unique values referenced in this message. - repeated ExprValue values = 1; - - // An ordered list of results. - // - // Tracks the flow of evaluation through the expression. - // May be sparse. - repeated Result results = 3; -} - -// The value of an evaluated expression. -message ExprValue { - // An expression can resolve to a value, error or unknown. - oneof kind { - // A concrete value. - Value value = 1; - - // The set of errors in the critical path of evalution. - // - // Only errors in the critical path are included. For example, - // `( || true) && ` will only result in ``, - // while ` || ` will result in both `` and - // ``. - // - // Errors cause by the presence of other errors are not included in the - // set. For example `.foo`, `foo()`, and ` + 1` will - // only result in ``. - // - // Multiple errors *might* be included when evaluation could result - // in different errors. For example ` + ` and - // `foo(, )` may result in ``, `` or both. - // The exact subset of errors included for this case is unspecified and - // depends on the implementation details of the evaluator. - ErrorSet error = 2; - - // The set of unknowns in the critical path of evaluation. - // - // Unknown behaves identically to Error with regards to propagation. - // Specifically, only unknowns in the critical path are included, unknowns - // caused by the presence of other unknowns are not included, and multiple - // unknowns *might* be included included when evaluation could result in - // different unknowns. For example: - // - // ( || true) && -> - // || -> - // .foo -> - // foo() -> - // + -> or - // - // Unknown takes precidence over Error in cases where a `Value` can short - // circuit the result: - // - // || -> - // && -> - // - // Errors take precidence in all other cases: - // - // + -> - // foo(, ) -> - UnknownSet unknown = 3; - } -} - -// A set of errors. -// -// The errors included depend on the context. See `ExprValue.error`. -message ErrorSet { - // The errors in the set. - repeated google.rpc.Status errors = 1; -} - -// A set of expressions for which the value is unknown. -// -// The unknowns included depend on the context. See `ExprValue.unknown`. -message UnknownSet { - // The ids of the expressions with unknown values. - repeated int64 exprs = 1; -} diff --git a/third_party/google/api/expr/v1alpha1/explain.proto b/third_party/google/api/expr/v1alpha1/explain.proto deleted file mode 100644 index 089e144a1..000000000 --- a/third_party/google/api/expr/v1alpha1/explain.proto +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1alpha1; - -import "google/api/expr/v1alpha1/value.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1alpha1;expr"; -option java_multiple_files = true; -option java_outer_classname = "ExplainProto"; -option java_package = "com.google.api.expr.v1alpha1"; - -// Values of intermediate expressions produced when evaluating expression. -// Deprecated, use `EvalState` instead. -message Explain { - option deprecated = true; - - // ID and value index of one step. - message ExprStep { - // ID of corresponding Expr node. - int64 id = 1; - - // Index of the value in the values list. - int32 value_index = 2; - } - - // All of the observed values. - // - // The field value_index is an index in the values list. - // Separating values from steps is needed to remove redundant values. - repeated Value values = 1; - - // List of steps. - // - // Repeated evaluations of the same expression generate new ExprStep - // instances. The order of such ExprStep instances matches the order of - // elements returned by Comprehension.iter_range. - repeated ExprStep expr_steps = 2; -} diff --git a/third_party/google/api/expr/v1alpha1/syntax.proto b/third_party/google/api/expr/v1alpha1/syntax.proto deleted file mode 100644 index 4a3cb907a..000000000 --- a/third_party/google/api/expr/v1alpha1/syntax.proto +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1alpha1; - -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1alpha1;expr"; -option java_multiple_files = true; -option java_outer_classname = "SyntaxProto"; -option java_package = "com.google.api.expr.v1alpha1"; - -// A representation of the abstract syntax of the Common Expression Language. - -// An expression together with source information as returned by the parser. -message ParsedExpr { - // The parsed expression. - Expr expr = 2; - - // The source info derived from input that generated the parsed `expr`. - SourceInfo source_info = 3; -} - -// An abstract representation of a common expression. -// -// Expressions are abstractly represented as a collection of identifiers, -// select statements, function calls, literals, and comprehensions. All -// operators with the exception of the '.' operator are modelled as function -// calls. This makes it easy to represent new operators into the existing AST. -// -// All references within expressions must resolve to a -// [Decl][google.api.expr.v1alpha1.Decl] provided at type-check for an -// expression to be valid. A reference may either be a bare identifier `name` or -// a qualified identifier `google.api.name`. References may either refer to a -// value or a function declaration. -// -// For example, the expression `google.api.name.startsWith('expr')` references -// the declaration `google.api.name` within a -// [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and the -// function declaration `startsWith`. -message Expr { - // An identifier expression. e.g. `request`. - message Ident { - // Required. Holds a single, unqualified identifier, possibly preceded by a - // '.'. - // - // Qualified names are represented by the - // [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression. - string name = 1; - } - - // A field selection expression. e.g. `request.auth`. - message Select { - // Required. The target of the selection expression. - // - // For example, in the select expression `request.auth`, the `request` - // portion of the expression is the `operand`. - Expr operand = 1; - - // Required. The name of the field to select. - // - // For example, in the select expression `request.auth`, the `auth` portion - // of the expression would be the `field`. - string field = 2; - - // Whether the select is to be interpreted as a field presence test. - // - // This results from the macro `has(request.auth)`. - bool test_only = 3; - } - - // A call expression, including calls to predefined functions and operators. - // - // For example, `value == 10`, `size(map_value)`. - message Call { - // The target of an method call-style expression. For example, `x` in - // `x.f()`. - Expr target = 1; - - // Required. The name of the function or method being called. - string function = 2; - - // The arguments. - repeated Expr args = 3; - } - - // A list creation expression. - // - // Lists may either be homogenous, e.g. `[1, 2, 3]`, or heterogenous, e.g. - // `dyn([1, 'hello', 2.0])` - message CreateList { - // The elements part of the list. - repeated Expr elements = 1; - } - - // A map or message creation expression. - // - // Maps are constructed as `{'key_name': 'value'}`. Message construction is - // similar, but prefixed with a type name and composed of field ids: - // `types.MyType{field_id: 'value'}`. - message CreateStruct { - // Represents an entry. - message Entry { - // Required. An id assigned to this node by the parser which is unique - // in a given expression tree. This is used to associate type - // information and other attributes to the node. - int64 id = 1; - - // The `Entry` key kinds. - oneof key_kind { - // The field key for a message creator statement. - string field_key = 2; - - // The key expression for a map creation statement. - Expr map_key = 3; - } - - // Required. The value assigned to the key. - Expr value = 4; - } - - // The type name of the message to be created, empty when creating map - // literals. - string message_name = 1; - - // The entries in the creation expression. - repeated Entry entries = 2; - } - - // A comprehension expression applied to a list or map. - // - // Comprehensions are not part of the core syntax, but enabled with macros. - // A macro matches a specific call signature within a parsed AST and replaces - // the call with an alternate AST block. Macro expansion happens at parse - // time. - // - // The following macros are supported within CEL: - // - // Aggregate type macros may be applied to all elements in a list or all keys - // in a map: - // - // * `all`, `exists`, `exists_one` - test a predicate expression against - // the inputs and return `true` if the predicate is satisfied for all, - // any, or only one value `list.all(x, x < 10)`. - // * `filter` - test a predicate expression against the inputs and return - // the subset of elements which satisfy the predicate: - // `payments.filter(p, p > 1000)`. - // * `map` - apply an expression to all elements in the input and return the - // output aggregate type: `[1, 2, 3].map(i, i * i)`. - // - // The `has(m.x)` macro tests whether the property `x` is present in struct - // `m`. The semantics of this macro depend on the type of `m`. For proto2 - // messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the - // macro tests whether the property is set to its default. For map and struct - // types, the macro tests whether the property `x` is defined on `m`. - message Comprehension { - // The name of the iteration variable. - string iter_var = 1; - - // The range over which var iterates. - Expr iter_range = 2; - - // The name of the variable used for accumulation of the result. - string accu_var = 3; - - // The initial value of the accumulator. - Expr accu_init = 4; - - // An expression which can contain iter_var and accu_var. - // - // Returns false when the result has been computed and may be used as - // a hint to short-circuit the remainder of the comprehension. - Expr loop_condition = 5; - - // An expression which can contain iter_var and accu_var. - // - // Computes the next value of accu_var. - Expr loop_step = 6; - - // An expression which can contain accu_var. - // - // Computes the result. - Expr result = 7; - } - - // Required. An id assigned to this node by the parser which is unique in a - // given expression tree. This is used to associate type information and other - // attributes to a node in the parse tree. - int64 id = 2; - - // Required. Variants of expressions. - oneof expr_kind { - // A literal expression. - Constant const_expr = 3; - - // An identifier expression. - Ident ident_expr = 4; - - // A field selection expression, e.g. `request.auth`. - Select select_expr = 5; - - // A call expression, including calls to predefined functions and operators. - Call call_expr = 6; - - // A list creation expression. - CreateList list_expr = 7; - - // A map or message creation expression. - CreateStruct struct_expr = 8; - - // A comprehension expression. - Comprehension comprehension_expr = 9; - } -} - -// Represents a primitive literal. -// -// Named 'Constant' here for backwards compatibility. -// -// This is similar as the primitives supported in the well-known type -// `google.protobuf.Value`, but richer so it can represent CEL's full range of -// primitives. -// -// Lists and structs are not included as constants as these aggregate types may -// contain [Expr][google.api.expr.v1alpha1.Expr] elements which require -// evaluation and are thus not constant. -// -// Examples of literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`, -// `true`, `null`. -message Constant { - // Required. The valid constant kinds. - oneof constant_kind { - // null value. - google.protobuf.NullValue null_value = 1; - - // boolean value. - bool bool_value = 2; - - // int64 value. - int64 int64_value = 3; - - // uint64 value. - uint64 uint64_value = 4; - - // double value. - double double_value = 5; - - // string value. - string string_value = 6; - - // bytes value. - bytes bytes_value = 7; - - // protobuf.Duration value. - // - // Deprecated: duration is no longer considered a builtin cel type. - google.protobuf.Duration duration_value = 8 [deprecated = true]; - - // protobuf.Timestamp value. - // - // Deprecated: timestamp is no longer considered a builtin cel type. - google.protobuf.Timestamp timestamp_value = 9 [deprecated = true]; - } -} - -// Source information collected at parse time. -message SourceInfo { - // The syntax version of the source, e.g. `cel1`. - string syntax_version = 1; - - // The location name. All position information attached to an expression is - // relative to this location. - // - // The location could be a file, UI element, or similar. For example, - // `acme/app/AnvilPolicy.cel`. - string location = 2; - - // Monotonically increasing list of character offsets where newlines appear. - // - // The line number of a given position is the index `i` where for a given - // `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. The - // column may be derivd from `id_positions[id] - line_offsets[i]`. - repeated int32 line_offsets = 3; - - // A map from the parse node id (e.g. `Expr.id`) to the character offset - // within source. - map positions = 4; -} - -// A specific position in source. -message SourcePosition { - // The soucre location name (e.g. file name). - string location = 1; - - // The character offset. - int32 offset = 2; - - // The 1-based index of the starting line in the source text - // where the issue occurs, or 0 if unknown. - int32 line = 3; - - // The 0-based index of the starting position within the line of source text - // where the issue occurs. Only meaningful if line is nonzero. - int32 column = 4; -} diff --git a/third_party/google/api/expr/v1alpha1/value.proto b/third_party/google/api/expr/v1alpha1/value.proto deleted file mode 100644 index a0508ed91..000000000 --- a/third_party/google/api/expr/v1alpha1/value.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1alpha1; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1alpha1;expr"; -option java_multiple_files = true; -option java_outer_classname = "ValueProto"; -option java_package = "com.google.api.expr.v1alpha1"; - -// Contains representations for CEL runtime values. - -// Represents a CEL value. -// -// This is similar to `google.protobuf.Value`, but can represent CEL's full -// range of values. -message Value { - // Required. The valid kinds of values. - oneof kind { - // Null value. - google.protobuf.NullValue null_value = 1; - - // Boolean value. - bool bool_value = 2; - - // Signed integer value. - int64 int64_value = 3; - - // Unsigned integer value. - uint64 uint64_value = 4; - - // Floating point value. - double double_value = 5; - - // UTF-8 string value. - string string_value = 6; - - // Byte string value. - bytes bytes_value = 7; - - // An enum value. - EnumValue enum_value = 9; - - // The proto message backing an object value. - google.protobuf.Any object_value = 10; - - // Map value. - MapValue map_value = 11; - - // List value. - ListValue list_value = 12; - - // Type value. - string type_value = 15; - } -} - -// An enum value. -message EnumValue { - // The fully qualified name of the enum type. - string type = 1; - - // The value of the enum. - int32 value = 2; -} - -// A list. -// -// Wrapped in a message so 'not set' and empty can be differentiated, which is -// required for use in a 'oneof'. -message ListValue { - // The ordered values in the list. - repeated Value values = 1; -} - -// A map. -// -// Wrapped in a message so 'not set' and empty can be differentiated, which is -// required for use in a 'oneof'. -message MapValue { - // An entry in the map. - message Entry { - // The key. - // - // Must be unique with in the map. - // Currently only boolean, int, uint, and string values can be keys. - Value key = 1; - - // The value. - Value value = 2; - } - - // The set of map entries. - // - // CEL has fewer restrictions on keys, so a protobuf map represenation - // cannot be used. - repeated Entry entries = 1; -} diff --git a/third_party/google/api/expr/v1beta1/decl.proto b/third_party/google/api/expr/v1beta1/decl.proto deleted file mode 100644 index 6d079b822..000000000 --- a/third_party/google/api/expr/v1beta1/decl.proto +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1beta1; - -import "google/api/expr/v1beta1/expr.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1beta1;expr"; -option java_multiple_files = true; -option java_outer_classname = "DeclProto"; -option java_package = "com.google.api.expr.v1beta1"; - -// A declaration. -message Decl { - // The id of the declaration. - int32 id = 1; - - // The name of the declaration. - string name = 2; - - // The documentation string for the declaration. - string doc = 3; - - // The kind of declaration. - oneof kind { - // An identifier declaration. - IdentDecl ident = 4; - - // A function declaration. - FunctionDecl function = 5; - } -} - -// The declared type of a variable. -// -// Extends runtime type values with extra information used for type checking -// and dispatching. -message DeclType { - // The expression id of the declared type, if applicable. - int32 id = 1; - - // The type name, e.g. 'int', 'my.type.Type' or 'T' - string type = 2; - - // An ordered list of type parameters, e.g. ``. - // Only applies to a subset of types, e.g. `map`, `list`. - repeated DeclType type_params = 4; -} - -// An identifier declaration. -message IdentDecl { - // Optional type of the identifier. - DeclType type = 3; - - // Optional value of the identifier. - Expr value = 4; -} - -// A function declaration. -message FunctionDecl { - // The function arguments. - repeated IdentDecl args = 1; - - // Optional declared return type. - DeclType return_type = 2; - - // If the first argument of the function is the receiver. - bool receiver_function = 3; -} diff --git a/third_party/google/api/expr/v1beta1/eval.proto b/third_party/google/api/expr/v1beta1/eval.proto deleted file mode 100644 index cdbe6ac3f..000000000 --- a/third_party/google/api/expr/v1beta1/eval.proto +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1beta1; - -import "google/api/expr/v1beta1/value.proto"; -import "google/rpc/status.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1beta1;expr"; -option java_multiple_files = true; -option java_outer_classname = "EvalProto"; -option java_package = "com.google.api.expr.v1beta1"; - -// The state of an evaluation. -// -// Can represent an initial, partial, or completed state of evaluation. -message EvalState { - // A single evaluation result. - message Result { - // The expression this result is for. - IdRef expr = 1; - - // The index in `values` of the resulting value. - int32 value = 2; - } - - // The unique values referenced in this message. - repeated ExprValue values = 1; - - // An ordered list of results. - // - // Tracks the flow of evaluation through the expression. - // May be sparse. - repeated Result results = 3; -} - -// The value of an evaluated expression. -message ExprValue { - // An expression can resolve to a value, error or unknown. - oneof kind { - // A concrete value. - Value value = 1; - - // The set of errors in the critical path of evalution. - // - // Only errors in the critical path are included. For example, - // `( || true) && ` will only result in ``, - // while ` || ` will result in both `` and - // ``. - // - // Errors cause by the presence of other errors are not included in the - // set. For example `.foo`, `foo()`, and ` + 1` will - // only result in ``. - // - // Multiple errors *might* be included when evaluation could result - // in different errors. For example ` + ` and - // `foo(, )` may result in ``, `` or both. - // The exact subset of errors included for this case is unspecified and - // depends on the implementation details of the evaluator. - ErrorSet error = 2; - - // The set of unknowns in the critical path of evaluation. - // - // Unknown behaves identically to Error with regards to propagation. - // Specifically, only unknowns in the critical path are included, unknowns - // caused by the presence of other unknowns are not included, and multiple - // unknowns *might* be included included when evaluation could result in - // different unknowns. For example: - // - // ( || true) && -> - // || -> - // .foo -> - // foo() -> - // + -> or - // - // Unknown takes precidence over Error in cases where a `Value` can short - // circuit the result: - // - // || -> - // && -> - // - // Errors take precidence in all other cases: - // - // + -> - // foo(, ) -> - UnknownSet unknown = 3; - } -} - -// A set of errors. -// -// The errors included depend on the context. See `ExprValue.error`. -message ErrorSet { - // The errors in the set. - repeated google.rpc.Status errors = 1; -} - -// A set of expressions for which the value is unknown. -// -// The unknowns included depend on the context. See `ExprValue.unknown`. -message UnknownSet { - // The ids of the expressions with unknown values. - repeated IdRef exprs = 1; -} - -// A reference to an expression id. -message IdRef { - // The expression id. - int32 id = 1; -} diff --git a/third_party/google/api/expr/v1beta1/expr.proto b/third_party/google/api/expr/v1beta1/expr.proto deleted file mode 100644 index 93b917f14..000000000 --- a/third_party/google/api/expr/v1beta1/expr.proto +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1beta1; - -import "google/api/expr/v1beta1/source.proto"; -import "google/protobuf/struct.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1beta1;expr"; -option java_multiple_files = true; -option java_outer_classname = "ExprProto"; -option java_package = "com.google.api.expr.v1beta1"; - -// An expression together with source information as returned by the parser. -message ParsedExpr { - // The parsed expression. - Expr expr = 2; - - // The source info derived from input that generated the parsed `expr`. - SourceInfo source_info = 3; - - // The syntax version of the source, e.g. `cel1`. - string syntax_version = 4; -} - -// An abstract representation of a common expression. -// -// Expressions are abstractly represented as a collection of identifiers, -// select statements, function calls, literals, and comprehensions. All -// operators with the exception of the '.' operator are modelled as function -// calls. This makes it easy to represent new operators into the existing AST. -// -// All references within expressions must resolve to a -// [Decl][google.api.expr.v1beta1.Decl] provided at type-check for an expression -// to be valid. A reference may either be a bare identifier `name` or a -// qualified identifier `google.api.name`. References may either refer to a -// value or a function declaration. -// -// For example, the expression `google.api.name.startsWith('expr')` references -// the declaration `google.api.name` within a -// [Expr.Select][google.api.expr.v1beta1.Expr.Select] expression, and the -// function declaration `startsWith`. -message Expr { - // An identifier expression. e.g. `request`. - message Ident { - // Required. Holds a single, unqualified identifier, possibly preceded by a - // '.'. - // - // Qualified names are represented by the - // [Expr.Select][google.api.expr.v1beta1.Expr.Select] expression. - string name = 1; - } - - // A field selection expression. e.g. `request.auth`. - message Select { - // Required. The target of the selection expression. - // - // For example, in the select expression `request.auth`, the `request` - // portion of the expression is the `operand`. - Expr operand = 1; - - // Required. The name of the field to select. - // - // For example, in the select expression `request.auth`, the `auth` portion - // of the expression would be the `field`. - string field = 2; - - // Whether the select is to be interpreted as a field presence test. - // - // This results from the macro `has(request.auth)`. - bool test_only = 3; - } - - // A call expression, including calls to predefined functions and operators. - // - // For example, `value == 10`, `size(map_value)`. - message Call { - // The target of an method call-style expression. For example, `x` in - // `x.f()`. - Expr target = 1; - - // Required. The name of the function or method being called. - string function = 2; - - // The arguments. - repeated Expr args = 3; - } - - // A list creation expression. - // - // Lists may either be homogenous, e.g. `[1, 2, 3]`, or heterogenous, e.g. - // `dyn([1, 'hello', 2.0])` - message CreateList { - // The elements part of the list. - repeated Expr elements = 1; - } - - // A map or message creation expression. - // - // Maps are constructed as `{'key_name': 'value'}`. Message construction is - // similar, but prefixed with a type name and composed of field ids: - // `types.MyType{field_id: 'value'}`. - message CreateStruct { - // Represents an entry. - message Entry { - // Required. An id assigned to this node by the parser which is unique - // in a given expression tree. This is used to associate type - // information and other attributes to the node. - int32 id = 1; - - // The `Entry` key kinds. - oneof key_kind { - // The field key for a message creator statement. - string field_key = 2; - - // The key expression for a map creation statement. - Expr map_key = 3; - } - - // Required. The value assigned to the key. - Expr value = 4; - } - - // The type name of the message to be created, empty when creating map - // literals. - string type = 1; - - // The entries in the creation expression. - repeated Entry entries = 2; - } - - // A comprehension expression applied to a list or map. - // - // Comprehensions are not part of the core syntax, but enabled with macros. - // A macro matches a specific call signature within a parsed AST and replaces - // the call with an alternate AST block. Macro expansion happens at parse - // time. - // - // The following macros are supported within CEL: - // - // Aggregate type macros may be applied to all elements in a list or all keys - // in a map: - // - // * `all`, `exists`, `exists_one` - test a predicate expression against - // the inputs and return `true` if the predicate is satisfied for all, - // any, or only one value `list.all(x, x < 10)`. - // * `filter` - test a predicate expression against the inputs and return - // the subset of elements which satisfy the predicate: - // `payments.filter(p, p > 1000)`. - // * `map` - apply an expression to all elements in the input and return the - // output aggregate type: `[1, 2, 3].map(i, i * i)`. - // - // The `has(m.x)` macro tests whether the property `x` is present in struct - // `m`. The semantics of this macro depend on the type of `m`. For proto2 - // messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the - // macro tests whether the property is set to its default. For map and struct - // types, the macro tests whether the property `x` is defined on `m`. - message Comprehension { - // The name of the iteration variable. - string iter_var = 1; - - // The range over which var iterates. - Expr iter_range = 2; - - // The name of the variable used for accumulation of the result. - string accu_var = 3; - - // The initial value of the accumulator. - Expr accu_init = 4; - - // An expression which can contain iter_var and accu_var. - // - // Returns false when the result has been computed and may be used as - // a hint to short-circuit the remainder of the comprehension. - Expr loop_condition = 5; - - // An expression which can contain iter_var and accu_var. - // - // Computes the next value of accu_var. - Expr loop_step = 6; - - // An expression which can contain accu_var. - // - // Computes the result. - Expr result = 7; - } - - // Required. An id assigned to this node by the parser which is unique in a - // given expression tree. This is used to associate type information and other - // attributes to a node in the parse tree. - int32 id = 2; - - // Required. Variants of expressions. - oneof expr_kind { - // A literal expression. - Literal literal_expr = 3; - - // An identifier expression. - Ident ident_expr = 4; - - // A field selection expression, e.g. `request.auth`. - Select select_expr = 5; - - // A call expression, including calls to predefined functions and operators. - Call call_expr = 6; - - // A list creation expression. - CreateList list_expr = 7; - - // A map or object creation expression. - CreateStruct struct_expr = 8; - - // A comprehension expression. - Comprehension comprehension_expr = 9; - } -} - -// Represents a primitive literal. -// -// This is similar to the primitives supported in the well-known type -// `google.protobuf.Value`, but richer so it can represent CEL's full range of -// primitives. -// -// Lists and structs are not included as constants as these aggregate types may -// contain [Expr][google.api.expr.v1beta1.Expr] elements which require -// evaluation and are thus not constant. -// -// Examples of literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`, -// `true`, `null`. -message Literal { - // Required. The valid constant kinds. - oneof constant_kind { - // null value. - google.protobuf.NullValue null_value = 1; - - // boolean value. - bool bool_value = 2; - - // int64 value. - int64 int64_value = 3; - - // uint64 value. - uint64 uint64_value = 4; - - // double value. - double double_value = 5; - - // string value. - string string_value = 6; - - // bytes value. - bytes bytes_value = 7; - } -} diff --git a/third_party/google/api/expr/v1beta1/source.proto b/third_party/google/api/expr/v1beta1/source.proto deleted file mode 100644 index adaf84d5e..000000000 --- a/third_party/google/api/expr/v1beta1/source.proto +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1beta1; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1beta1;expr"; -option java_multiple_files = true; -option java_outer_classname = "SourceProto"; -option java_package = "com.google.api.expr.v1beta1"; - -// Source information collected at parse time. -message SourceInfo { - // The location name. All position information attached to an expression is - // relative to this location. - // - // The location could be a file, UI element, or similar. For example, - // `acme/app/AnvilPolicy.cel`. - string location = 2; - - // Monotonically increasing list of character offsets where newlines appear. - // - // The line number of a given position is the index `i` where for a given - // `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. The - // column may be derivd from `id_positions[id] - line_offsets[i]`. - repeated int32 line_offsets = 3; - - // A map from the parse node id (e.g. `Expr.id`) to the character offset - // within source. - map positions = 4; -} - -// A specific position in source. -message SourcePosition { - // The soucre location name (e.g. file name). - string location = 1; - - // The character offset. - int32 offset = 2; - - // The 1-based index of the starting line in the source text - // where the issue occurs, or 0 if unknown. - int32 line = 3; - - // The 0-based index of the starting position within the line of source text - // where the issue occurs. Only meaningful if line is nonzer.. - int32 column = 4; -} diff --git a/third_party/google/api/expr/v1beta1/value.proto b/third_party/google/api/expr/v1beta1/value.proto deleted file mode 100644 index a5ae06766..000000000 --- a/third_party/google/api/expr/v1beta1/value.proto +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api.expr.v1beta1; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/expr/v1beta1;expr"; -option java_multiple_files = true; -option java_outer_classname = "ValueProto"; -option java_package = "com.google.api.expr.v1beta1"; - -// Represents a CEL value. -// -// This is similar to `google.protobuf.Value`, but can represent CEL's full -// range of values. -message Value { - // Required. The valid kinds of values. - oneof kind { - // Null value. - google.protobuf.NullValue null_value = 1; - - // Boolean value. - bool bool_value = 2; - - // Signed integer value. - int64 int64_value = 3; - - // Unsigned integer value. - uint64 uint64_value = 4; - - // Floating point value. - double double_value = 5; - - // UTF-8 string value. - string string_value = 6; - - // Byte string value. - bytes bytes_value = 7; - - // An enum value. - EnumValue enum_value = 9; - - // The proto message backing an object value. - google.protobuf.Any object_value = 10; - - // Map value. - MapValue map_value = 11; - - // List value. - ListValue list_value = 12; - - // A Type value represented by the fully qualified name of the type. - string type_value = 15; - } -} - -// An enum value. -message EnumValue { - // The fully qualified name of the enum type. - string type = 1; - - // The value of the enum. - int32 value = 2; -} - -// A list. -// -// Wrapped in a message so 'not set' and empty can be differentiated, which is -// required for use in a 'oneof'. -message ListValue { - // The ordered values in the list. - repeated Value values = 1; -} - -// A map. -// -// Wrapped in a message so 'not set' and empty can be differentiated, which is -// required for use in a 'oneof'. -message MapValue { - // An entry in the map. - message Entry { - // The key. - // - // Must be unique with in the map. - // Currently only boolean, int, uint, and string values can be keys. - Value key = 1; - - // The value. - Value value = 2; - } - - // The set of map entries. - // - // CEL has fewer restrictions on keys, so a protobuf map represenation - // cannot be used. - repeated Entry entries = 1; -} diff --git a/third_party/google/api/field_behavior.proto b/third_party/google/api/field_behavior.proto deleted file mode 100644 index eb7f78ef1..000000000 --- a/third_party/google/api/field_behavior.proto +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "FieldBehaviorProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.FieldOptions { - // A designation of a specific field behavior (required, output only, etc.) - // in protobuf messages. - // - // Examples: - // - // string name = 1 [(google.api.field_behavior) = REQUIRED]; - // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // google.protobuf.Duration ttl = 1 - // [(google.api.field_behavior) = INPUT_ONLY]; - // google.protobuf.Timestamp expire_time = 1 - // [(google.api.field_behavior) = OUTPUT_ONLY, - // (google.api.field_behavior) = IMMUTABLE]; - repeated google.api.FieldBehavior field_behavior = 1052; -} - -// An indicator of the behavior of a given field (for example, that a field -// is required in requests, or given as output but ignored as input). -// This **does not** change the behavior in protocol buffers itself; it only -// denotes the behavior and may affect how API tooling handles the field. -// -// Note: This enum **may** receive new values in the future. -enum FieldBehavior { - // Conventional default for enums. Do not use this. - FIELD_BEHAVIOR_UNSPECIFIED = 0; - - // Specifically denotes a field as optional. - // While all fields in protocol buffers are optional, this may be specified - // for emphasis if appropriate. - OPTIONAL = 1; - - // Denotes a field as required. - // This indicates that the field **must** be provided as part of the request, - // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). - REQUIRED = 2; - - // Denotes a field as output only. - // This indicates that the field is provided in responses, but including the - // field in a request does nothing (the server *must* ignore it and - // *must not* throw an error as a result of the field's presence). - OUTPUT_ONLY = 3; - - // Denotes a field as input only. - // This indicates that the field is provided in requests, and the - // corresponding field is not included in output. - INPUT_ONLY = 4; - - // Denotes a field as immutable. - // This indicates that the field may be set once in a request to create a - // resource, but may not be changed thereafter. - IMMUTABLE = 5; -} diff --git a/third_party/google/api/http.proto b/third_party/google/api/http.proto index b2977f514..69460cf79 100644 --- a/third_party/google/api/http.proto +++ b/third_party/google/api/http.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/third_party/google/api/httpbody.proto b/third_party/google/api/httpbody.proto index 45c1e76b1..1a5bb78be 100644 --- a/third_party/google/api/httpbody.proto +++ b/third_party/google/api/httpbody.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/third_party/google/api/label.proto b/third_party/google/api/label.proto deleted file mode 100644 index 668efd1c6..000000000 --- a/third_party/google/api/label.proto +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/label;label"; -option java_multiple_files = true; -option java_outer_classname = "LabelProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// A description of a label. -message LabelDescriptor { - // Value types that can be used as label values. - enum ValueType { - // A variable-length string. This is the default. - STRING = 0; - - // Boolean; true or false. - BOOL = 1; - - // A 64-bit signed integer. - INT64 = 2; - } - - // The label key. - string key = 1; - - // The type of data that can be assigned to the label. - ValueType value_type = 2; - - // A human-readable description for the label. - string description = 3; -} diff --git a/third_party/google/api/launch_stage.proto b/third_party/google/api/launch_stage.proto deleted file mode 100644 index 55fd91424..000000000 --- a/third_party/google/api/launch_stage.proto +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api;api"; -option java_multiple_files = true; -option java_outer_classname = "LaunchStageProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// The launch stage as defined by [Google Cloud Platform -// Launch Stages](http://cloud.google.com/terms/launch-stages). -enum LaunchStage { - // Do not use this default value. - LAUNCH_STAGE_UNSPECIFIED = 0; - - // Early Access features are limited to a closed group of testers. To use - // these features, you must sign up in advance and sign a Trusted Tester - // agreement (which includes confidentiality provisions). These features may - // be unstable, changed in backward-incompatible ways, and are not - // guaranteed to be released. - EARLY_ACCESS = 1; - - // Alpha is a limited availability test for releases before they are cleared - // for widespread use. By Alpha, all significant design issues are resolved - // and we are in the process of verifying functionality. Alpha customers - // need to apply for access, agree to applicable terms, and have their - // projects whitelisted. Alpha releases don’t have to be feature complete, - // no SLAs are provided, and there are no technical support obligations, but - // they will be far enough along that customers can actually use them in - // test environments or for limited-use tests -- just like they would in - // normal production cases. - ALPHA = 2; - - // Beta is the point at which we are ready to open a release for any - // customer to use. There are no SLA or technical support obligations in a - // Beta release. Products will be complete from a feature perspective, but - // may have some open outstanding issues. Beta releases are suitable for - // limited production use cases. - BETA = 3; - - // GA features are open to all developers and are considered stable and - // fully qualified for production use. - GA = 4; - - // Deprecated features are scheduled to be shut down and removed. For more - // information, see the “Deprecation Policy” section of our [Terms of - // Service](https://cloud.google.com/terms/) - // and the [Google Cloud Platform Subject to the Deprecation - // Policy](https://cloud.google.com/terms/deprecation) documentation. - DEPRECATED = 5; -} diff --git a/third_party/google/api/log.proto b/third_party/google/api/log.proto deleted file mode 100644 index 1125e1fe3..000000000 --- a/third_party/google/api/log.proto +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/api/label.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "LogProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// A description of a log type. Example in YAML format: -// -// - name: library.googleapis.com/activity_history -// description: The history of borrowing and returning library items. -// display_name: Activity -// labels: -// - key: /customer_id -// description: Identifier of a library customer -message LogDescriptor { - // The name of the log. It must be less than 512 characters long and can - // include the following characters: upper- and lower-case alphanumeric - // characters [A-Za-z0-9], and punctuation characters including - // slash, underscore, hyphen, period [/_-.]. - string name = 1; - - // The set of labels that are available to describe a specific log entry. - // Runtime requests that contain labels not specified here are - // considered invalid. - repeated LabelDescriptor labels = 2; - - // A human-readable description of this log. This information appears in - // the documentation and can contain details. - string description = 3; - - // The human-readable name for this log. This information appears on - // the user interface and should be concise. - string display_name = 4; -} diff --git a/third_party/google/api/logging.proto b/third_party/google/api/logging.proto deleted file mode 100644 index 9090b2a1c..000000000 --- a/third_party/google/api/logging.proto +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "LoggingProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Logging configuration of the service. -// -// The following example shows how to configure logs to be sent to the -// producer and consumer projects. In the example, the `activity_history` -// log is sent to both the producer and consumer projects, whereas the -// `purchase_history` log is only sent to the producer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located in. -// - key: /name -// description: The name of the branch. -// logs: -// - name: activity_history -// labels: -// - key: /customer_id -// - name: purchase_history -// logging: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history -// - purchase_history -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// logs: -// - activity_history -message Logging { - // Configuration of a specific logging destination (the producer project - // or the consumer project). - message LoggingDestination { - // The monitored resource type. The type must be defined in the - // [Service.monitored_resources][google.api.Service.monitored_resources] section. - string monitored_resource = 3; - - // Names of the logs to be sent to this destination. Each name must - // be defined in the [Service.logs][google.api.Service.logs] section. If the log name is - // not a domain scoped name, it will be automatically prefixed with - // the service name followed by "/". - repeated string logs = 1; - } - - // Logging configurations for sending logs to the producer project. - // There can be multiple producer destinations, each one must have a - // different monitored resource type. A log can be used in at most - // one producer destination. - repeated LoggingDestination producer_destinations = 1; - - // Logging configurations for sending logs to the consumer project. - // There can be multiple consumer destinations, each one must have a - // different monitored resource type. A log can be used in at most - // one consumer destination. - repeated LoggingDestination consumer_destinations = 2; -} diff --git a/third_party/google/api/metric.proto b/third_party/google/api/metric.proto deleted file mode 100644 index 4d3c488f2..000000000 --- a/third_party/google/api/metric.proto +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/api/label.proto"; -import "google/api/launch_stage.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/metric;metric"; -option java_multiple_files = true; -option java_outer_classname = "MetricProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Defines a metric type and its schema. Once a metric descriptor is created, -// deleting or altering it stops data collection and makes the metric type's -// existing data unusable. -message MetricDescriptor { - // Additional annotations that can be used to guide the usage of a metric. - message MetricDescriptorMetadata { - // The launch stage of the metric definition. - LaunchStage launch_stage = 1; - - // The sampling period of metric data points. For metrics which are written - // periodically, consecutive data points are stored at this time interval, - // excluding data loss due to errors. Metrics with a higher granularity have - // a smaller sampling period. - google.protobuf.Duration sample_period = 2; - - // The delay of data points caused by ingestion. Data points older than this - // age are guaranteed to be ingested and available to be read, excluding - // data loss due to errors. - google.protobuf.Duration ingest_delay = 3; - } - - // The kind of measurement. It describes how the data is reported. - enum MetricKind { - // Do not use this default value. - METRIC_KIND_UNSPECIFIED = 0; - - // An instantaneous measurement of a value. - GAUGE = 1; - - // The change in a value during a time interval. - DELTA = 2; - - // A value accumulated over a time interval. Cumulative - // measurements in a time series should have the same start time - // and increasing end times, until an event resets the cumulative - // value to zero and sets a new start time for the following - // points. - CUMULATIVE = 3; - } - - // The value type of a metric. - enum ValueType { - // Do not use this default value. - VALUE_TYPE_UNSPECIFIED = 0; - - // The value is a boolean. - // This value type can be used only if the metric kind is `GAUGE`. - BOOL = 1; - - // The value is a signed 64-bit integer. - INT64 = 2; - - // The value is a double precision floating point number. - DOUBLE = 3; - - // The value is a text string. - // This value type can be used only if the metric kind is `GAUGE`. - STRING = 4; - - // The value is a [`Distribution`][google.api.Distribution]. - DISTRIBUTION = 5; - - // The value is money. - MONEY = 6; - } - - // The resource name of the metric descriptor. - string name = 1; - - // The metric type, including its DNS name prefix. The type is not - // URL-encoded. All user-defined metric types have the DNS name - // `custom.googleapis.com` or `external.googleapis.com`. Metric types should - // use a natural hierarchical grouping. For example: - // - // "custom.googleapis.com/invoice/paid/amount" - // "external.googleapis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_latencies" - string type = 8; - - // The set of labels that can be used to describe a specific - // instance of this metric type. For example, the - // `appengine.googleapis.com/http/server/response_latencies` metric - // type has a label for the HTTP response code, `response_code`, so - // you can look at latencies for successful responses or just - // for responses that failed. - repeated LabelDescriptor labels = 2; - - // Whether the metric records instantaneous values, changes to a value, etc. - // Some combinations of `metric_kind` and `value_type` might not be supported. - MetricKind metric_kind = 3; - - // Whether the measurement is an integer, a floating-point number, etc. - // Some combinations of `metric_kind` and `value_type` might not be supported. - ValueType value_type = 4; - - // The unit in which the metric value is reported. It is only applicable - // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The - // supported units are a subset of [The Unified Code for Units of - // Measure](http://unitsofmeasure.org/ucum.html) standard: - // - // **Basic units (UNIT)** - // - // * `bit` bit - // * `By` byte - // * `s` second - // * `min` minute - // * `h` hour - // * `d` day - // - // **Prefixes (PREFIX)** - // - // * `k` kilo (10**3) - // * `M` mega (10**6) - // * `G` giga (10**9) - // * `T` tera (10**12) - // * `P` peta (10**15) - // * `E` exa (10**18) - // * `Z` zetta (10**21) - // * `Y` yotta (10**24) - // * `m` milli (10**-3) - // * `u` micro (10**-6) - // * `n` nano (10**-9) - // * `p` pico (10**-12) - // * `f` femto (10**-15) - // * `a` atto (10**-18) - // * `z` zepto (10**-21) - // * `y` yocto (10**-24) - // * `Ki` kibi (2**10) - // * `Mi` mebi (2**20) - // * `Gi` gibi (2**30) - // * `Ti` tebi (2**40) - // - // **Grammar** - // - // The grammar also includes these connectors: - // - // * `/` division (as an infix operator, e.g. `1/s`). - // * `.` multiplication (as an infix operator, e.g. `GBy.d`) - // - // The grammar for a unit is as follows: - // - // Expression = Component { "." Component } { "/" Component } ; - // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; - // - // Annotation = "{" NAME "}" ; - // - // Notes: - // - // * `Annotation` is just a comment if it follows a `UNIT` and is - // equivalent to `1` if it is used alone. For examples, - // `{requests}/s == 1/s`, `By{transmitted}/s == By/s`. - // * `NAME` is a sequence of non-blank printable ASCII characters not - // containing '{' or '}'. - // * `1` represents dimensionless value 1, such as in `1/s`. - // * `%` represents dimensionless value 1/100, and annotates values giving - // a percentage. - string unit = 5; - - // A detailed description of the metric, which can be used in documentation. - string description = 6; - - // A concise name for the metric, which can be displayed in user interfaces. - // Use sentence case without an ending period, for example "Request count". - // This field is optional but it is recommended to be set for any metrics - // associated with user-visible concepts, such as Quota. - string display_name = 7; - - // Optional. Metadata which can be used to guide usage of the metric. - MetricDescriptorMetadata metadata = 10; -} - -// A specific metric, identified by specifying values for all of the -// labels of a [`MetricDescriptor`][google.api.MetricDescriptor]. -message Metric { - // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor]. - // For example, `custom.googleapis.com/invoice/paid/amount`. - string type = 3; - - // The set of label values that uniquely identify this metric. All - // labels listed in the `MetricDescriptor` must be assigned values. - map labels = 2; -} diff --git a/third_party/google/api/monitored_resource.proto b/third_party/google/api/monitored_resource.proto deleted file mode 100644 index a2c4525bb..000000000 --- a/third_party/google/api/monitored_resource.proto +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/api/label.proto"; -import "google/protobuf/struct.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/monitoredres;monitoredres"; -option java_multiple_files = true; -option java_outer_classname = "MonitoredResourceProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a -// type name and a set of labels. For example, the monitored resource -// descriptor for Google Compute Engine VM instances has a type of -// `"gce_instance"` and specifies the use of the labels `"instance_id"` and -// `"zone"` to identify particular VM instances. -// -// Different APIs can support different monitored resource types. APIs generally -// provide a `list` method that returns the monitored resource descriptors used -// by the API. -message MonitoredResourceDescriptor { - // Optional. The resource name of the monitored resource descriptor: - // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where - // {type} is the value of the `type` field in this object and - // {project_id} is a project ID that provides API-specific context for - // accessing the type. APIs that do not use project information can use the - // resource name format `"monitoredResourceDescriptors/{type}"`. - string name = 5; - - // Required. The monitored resource type. For example, the type - // `"cloudsql_database"` represents databases in Google Cloud SQL. - // The maximum length of this value is 256 characters. - string type = 1; - - // Optional. A concise name for the monitored resource type that might be - // displayed in user interfaces. It should be a Title Cased Noun Phrase, - // without any article or other determiners. For example, - // `"Google Cloud SQL Database"`. - string display_name = 2; - - // Optional. A detailed description of the monitored resource type that might - // be used in documentation. - string description = 3; - - // Required. A set of labels used to describe instances of this monitored - // resource type. For example, an individual Google Cloud SQL database is - // identified by values for the labels `"database_id"` and `"zone"`. - repeated LabelDescriptor labels = 4; -} - -// An object representing a resource that can be used for monitoring, logging, -// billing, or other purposes. Examples include virtual machine instances, -// databases, and storage devices such as disks. The `type` field identifies a -// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's -// schema. Information in the `labels` field identifies the actual resource and -// its attributes according to the schema. For example, a particular Compute -// Engine VM instance could be represented by the following object, because the -// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels -// `"instance_id"` and `"zone"`: -// -// { "type": "gce_instance", -// "labels": { "instance_id": "12345678901234", -// "zone": "us-central1-a" }} -message MonitoredResource { - // Required. The monitored resource type. This field must match - // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For - // example, the type of a Compute Engine VM instance is `gce_instance`. - string type = 1; - - // Required. Values for all of the labels listed in the associated monitored - // resource descriptor. For example, Compute Engine VM instances use the - // labels `"project_id"`, `"instance_id"`, and `"zone"`. - map labels = 2; -} - -// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object. -// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to -// uniquely identify a monitored resource instance. There is some other useful -// auxiliary metadata. Monitoring and Logging use an ingestion -// pipeline to extract metadata for cloud resources of all types, and store -// the metadata in this message. -message MonitoredResourceMetadata { - // Output only. Values for predefined system metadata labels. - // System labels are a kind of metadata extracted by Google, including - // "machine_image", "vpc", "subnet_id", - // "security_group", "name", etc. - // System label values can be only strings, Boolean values, or a list of - // strings. For example: - // - // { "name": "my-test-instance", - // "security_group": ["a", "b", "c"], - // "spot_instance": false } - google.protobuf.Struct system_labels = 1; - - // Output only. A map of user-defined metadata labels. - map user_labels = 2; -} diff --git a/third_party/google/api/monitoring.proto b/third_party/google/api/monitoring.proto deleted file mode 100644 index 07e962d1b..000000000 --- a/third_party/google/api/monitoring.proto +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "MonitoringProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Monitoring configuration of the service. -// -// The example below shows how to configure monitored resources and metrics -// for monitoring. In the example, a monitored resource and two metrics are -// defined. The `library.googleapis.com/book/returned_count` metric is sent -// to both producer and consumer projects, whereas the -// `library.googleapis.com/book/overdue_count` metric is only sent to the -// consumer project. -// -// monitored_resources: -// - type: library.googleapis.com/branch -// labels: -// - key: /city -// description: The city where the library branch is located in. -// - key: /name -// description: The name of the branch. -// metrics: -// - name: library.googleapis.com/book/returned_count -// metric_kind: DELTA -// value_type: INT64 -// labels: -// - key: /customer_id -// - name: library.googleapis.com/book/overdue_count -// metric_kind: GAUGE -// value_type: INT64 -// labels: -// - key: /customer_id -// monitoring: -// producer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// consumer_destinations: -// - monitored_resource: library.googleapis.com/branch -// metrics: -// - library.googleapis.com/book/returned_count -// - library.googleapis.com/book/overdue_count -message Monitoring { - // Configuration of a specific monitoring destination (the producer project - // or the consumer project). - message MonitoringDestination { - // The monitored resource type. The type must be defined in - // [Service.monitored_resources][google.api.Service.monitored_resources] section. - string monitored_resource = 1; - - // Types of the metrics to report to this monitoring destination. - // Each type must be defined in [Service.metrics][google.api.Service.metrics] section. - repeated string metrics = 2; - } - - // Monitoring configurations for sending metrics to the producer project. - // There can be multiple producer destinations. A monitored resouce type may - // appear in multiple monitoring destinations if different aggregations are - // needed for different sets of metrics associated with that monitored - // resource type. A monitored resource and metric pair may only be used once - // in the Monitoring configuration. - repeated MonitoringDestination producer_destinations = 1; - - // Monitoring configurations for sending metrics to the consumer project. - // There can be multiple consumer destinations. A monitored resouce type may - // appear in multiple monitoring destinations if different aggregations are - // needed for different sets of metrics associated with that monitored - // resource type. A monitored resource and metric pair may only be used once - // in the Monitoring configuration. - repeated MonitoringDestination consumer_destinations = 2; -} diff --git a/third_party/google/api/quota.proto b/third_party/google/api/quota.proto deleted file mode 100644 index 2e6e52b66..000000000 --- a/third_party/google/api/quota.proto +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "QuotaProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Quota configuration helps to achieve fairness and budgeting in service -// usage. -// -// The metric based quota configuration works this way: -// - The service configuration defines a set of metrics. -// - For API calls, the quota.metric_rules maps methods to metrics with -// corresponding costs. -// - The quota.limits defines limits on the metrics, which will be used for -// quota checks at runtime. -// -// An example quota configuration in yaml format: -// -// quota: -// limits: -// -// - name: apiWriteQpsPerProject -// metric: library.googleapis.com/write_calls -// unit: "1/min/{project}" # rate limit for consumer projects -// values: -// STANDARD: 10000 -// -// -// # The metric rules bind all methods to the read_calls metric, -// # except for the UpdateBook and DeleteBook methods. These two methods -// # are mapped to the write_calls metric, with the UpdateBook method -// # consuming at twice rate as the DeleteBook method. -// metric_rules: -// - selector: "*" -// metric_costs: -// library.googleapis.com/read_calls: 1 -// - selector: google.example.library.v1.LibraryService.UpdateBook -// metric_costs: -// library.googleapis.com/write_calls: 2 -// - selector: google.example.library.v1.LibraryService.DeleteBook -// metric_costs: -// library.googleapis.com/write_calls: 1 -// -// Corresponding Metric definition: -// -// metrics: -// - name: library.googleapis.com/read_calls -// display_name: Read requests -// metric_kind: DELTA -// value_type: INT64 -// -// - name: library.googleapis.com/write_calls -// display_name: Write requests -// metric_kind: DELTA -// value_type: INT64 -// -// -message Quota { - // List of `QuotaLimit` definitions for the service. - repeated QuotaLimit limits = 3; - - // List of `MetricRule` definitions, each one mapping a selected method to one - // or more metrics. - repeated MetricRule metric_rules = 4; -} - -// Bind API methods to metrics. Binding a method to a metric causes that -// metric's configured quota behaviors to apply to the method call. -message MetricRule { - // Selects the methods to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // Metrics to update when the selected methods are called, and the associated - // cost applied to each metric. - // - // The key of the map is the metric name, and the values are the amount - // increased for the metric against which the quota limits are defined. - // The value must not be negative. - map metric_costs = 2; -} - -// `QuotaLimit` defines a specific limit that applies over a specified duration -// for a limit type. There can be at most one limit for a duration and limit -// type combination defined within a `QuotaGroup`. -message QuotaLimit { - // Name of the quota limit. - // - // The name must be provided, and it must be unique within the service. The - // name can only include alphanumeric characters as well as '-'. - // - // The maximum length of the limit name is 64 characters. - string name = 6; - - // Optional. User-visible, extended description for this quota limit. - // Should be used only when more context is needed to understand this limit - // than provided by the limit's display name (see: `display_name`). - string description = 2; - - // Default number of tokens that can be consumed during the specified - // duration. This is the number of tokens assigned when a client - // application developer activates the service for his/her project. - // - // Specifying a value of 0 will block all requests. This can be used if you - // are provisioning quota to selected consumers and blocking others. - // Similarly, a value of -1 will indicate an unlimited quota. No other - // negative values are allowed. - // - // Used by group-based quotas only. - int64 default_limit = 3; - - // Maximum number of tokens that can be consumed during the specified - // duration. Client application developers can override the default limit up - // to this maximum. If specified, this value cannot be set to a value less - // than the default limit. If not specified, it is set to the default limit. - // - // To allow clients to apply overrides with no upper bound, set this to -1, - // indicating unlimited maximum quota. - // - // Used by group-based quotas only. - int64 max_limit = 4; - - // Free tier value displayed in the Developers Console for this limit. - // The free tier is the number of tokens that will be subtracted from the - // billed amount when billing is enabled. - // This field can only be set on a limit with duration "1d", in a billable - // group; it is invalid on any other limit. If this field is not set, it - // defaults to 0, indicating that there is no free tier for this service. - // - // Used by group-based quotas only. - int64 free_tier = 7; - - // Duration of this limit in textual notation. Example: "100s", "24h", "1d". - // For duration longer than a day, only multiple of days is supported. We - // support only "100s" and "1d" for now. Additional support will be added in - // the future. "0" indicates indefinite duration. - // - // Used by group-based quotas only. - string duration = 5; - - // The name of the metric this quota limit applies to. The quota limits with - // the same metric will be checked together during runtime. The metric must be - // defined within the service config. - string metric = 8; - - // Specify the unit of the quota limit. It uses the same syntax as - // [Metric.unit][]. The supported unit kinds are determined by the quota - // backend system. - // - // Here are some examples: - // * "1/min/{project}" for quota per minute per project. - // - // Note: the order of unit components is insignificant. - // The "1" at the beginning is required to follow the metric unit syntax. - string unit = 9; - - // Tiered limit values. You must specify this as a key:value pair, with an - // integer value that is the maximum number of requests allowed for the - // specified unit. Currently only STANDARD is supported. - map values = 10; - - // User-visible display name for this limit. - // Optional. If not set, the UI will provide a default display name based on - // the quota configuration. This field can be used to override the default - // display name generated from the configuration. - string display_name = 12; -} diff --git a/third_party/google/api/resource.proto b/third_party/google/api/resource.proto deleted file mode 100644 index 54f8aeb78..000000000 --- a/third_party/google/api/resource.proto +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "ResourceProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.FieldOptions { - // The fully qualified message name of the type that this field references. - // Marks this as a field referring to a resource in another message. - // - // Example: - // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type: "pubsub.googleapis.com/Topic" - // }]; - // } - // - // If the referenced message is in the same proto package, the package - // may be omitted: - // - // message Subscription { - // string topic = 2 - // [(google.api.resource_reference).type = "Topic"]; - // } - // - // Only one of {`resource`, `resource_reference`} may be set. - google.api.ResourceReference resource_reference = 1055; -} - -extend google.protobuf.MessageOptions { - // An annotation describing a resource. - // - // Example: - // - // message Topic { - // option (google.api.resource) = { - // type: "pubsub.googleapis.com/Topic" - // pattern: "projects/{project}/topics/{topic}" - // }; - // } - // - // Only one of {`resource`, `resource_reference`} may be set. - google.api.ResourceDescriptor resource = 1053; -} - -// A simple descriptor of a resource type. -// -// ResourceDescriptor annotates a resource message (either by means of a -// protobuf annotation or use in the service config), and associates the -// resource's schema, the resource type, and the pattern of the resource name. -// -// Example: -// -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } -// -// Sometimes, resources have multiple patterns, typically because they can -// live under multiple parents. -// -// Example: -// -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } -message ResourceDescriptor { - // A description of the historical or future-looking state of the - // resource pattern. - enum History { - // The "unset" value. - HISTORY_UNSPECIFIED = 0; - - // The resource originally had one pattern and launched as such, and - // additional patterns were added later. - ORIGINALLY_SINGLE_PATTERN = 1; - - // The resource has one pattern, but the API owner expects to add more - // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents - // that from being necessary once there are multiple patterns.) - FUTURE_MULTI_PATTERN = 2; - } - - // The full name of the resource type. It must be in the format of - // {service_name}/{resource_type_kind}. The resource type names are - // singular and do not contain version numbers. - // - // For example: `storage.googleapis.com/Bucket` - // - // The value of the resource_type_kind must follow the regular expression - // /[A-Z][a-zA-Z0-9]+/. It must start with upper case character and - // recommended to use PascalCase (UpperCamelCase). The maximum number of - // characters allowed for the resource_type_kind is 100. - string type = 1; - - // Required. The valid pattern or patterns for this resource's names. - // - // Examples: - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" - // - // The components in braces correspond to the IDs for each resource in the - // hierarchy. It is expected that, if multiple patterns are provided, - // the same component name (e.g. "project") refers to IDs of the same - // type of resource. - repeated string pattern = 2; - - // Optional. The field on the resource that designates the resource name - // field. If omitted, this is assumed to be "name". - string name_field = 3; - - // Optional. The historical or future-looking state of the resource pattern. - // - // Example: - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } - History history = 4; -} - -// An annotation designating that this field is a reference to a resource -// defined by another message. -message ResourceReference { - // The unified resource type name of the type that this field references. - // Marks this as a field referring to a resource in another message. - // - // Example: - // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type = "pubsub.googleapis.com/Topic" - // }]; - // } - string type = 1; - - // The fully-qualified message name of a child of the type that this field - // references. - // - // This is useful for `parent` fields where a resource has more than one - // possible type of parent. - // - // Example: - // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } - // - // If the referenced message is in the same proto package, the service name - // may be omitted: - // - // message ListLogEntriesRequest { - // string parent = 1 - // [(google.api.resource_reference).child_type = "LogEntry"]; - // } - string child_type = 2; -} diff --git a/third_party/google/api/service.proto b/third_party/google/api/service.proto deleted file mode 100644 index 33b69682f..000000000 --- a/third_party/google/api/service.proto +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/api/annotations.proto"; -import "google/api/auth.proto"; -import "google/api/backend.proto"; -import "google/api/billing.proto"; -import "google/api/context.proto"; -import "google/api/control.proto"; -import "google/api/documentation.proto"; -import "google/api/endpoint.proto"; -import "google/api/experimental/experimental.proto"; -import "google/api/http.proto"; -import "google/api/label.proto"; -import "google/api/log.proto"; -import "google/api/logging.proto"; -import "google/api/metric.proto"; -import "google/api/monitored_resource.proto"; -import "google/api/monitoring.proto"; -import "google/api/quota.proto"; -import "google/api/source_info.proto"; -import "google/api/system_parameter.proto"; -import "google/api/usage.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/api.proto"; -import "google/protobuf/type.proto"; -import "google/protobuf/wrappers.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "ServiceProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// `Service` is the root object of Google service configuration schema. It -// describes basic information about a service, such as the name and the -// title, and delegates other aspects to sub-sections. Each sub-section is -// either a proto message or a repeated proto message that configures a -// specific aspect, such as auth. See each proto message definition for details. -// -// Example: -// -// type: google.api.Service -// config_version: 3 -// name: calendar.googleapis.com -// title: Google Calendar API -// apis: -// - name: google.calendar.v3.Calendar -// authentication: -// providers: -// - id: google_calendar_auth -// jwks_uri: https://www.googleapis.com/oauth2/v1/certs -// issuer: https://securetoken.google.com -// rules: -// - selector: "*" -// requirements: -// provider_id: google_calendar_auth -message Service { - // The semantic version of the service configuration. The config version - // affects the interpretation of the service configuration. For example, - // certain features are enabled by default for certain config versions. - // The latest config version is `3`. - google.protobuf.UInt32Value config_version = 20; - - // The service name, which is a DNS-like logical identifier for the - // service, such as `calendar.googleapis.com`. The service name - // typically goes through DNS verification to make sure the owner - // of the service also owns the DNS name. - string name = 1; - - // A unique ID for a specific instance of this message, typically assigned - // by the client for tracking purpose. If empty, the server may choose to - // generate one instead. Must be no longer than 60 characters. - string id = 33; - - // The product title for this service. - string title = 2; - - // The Google project that owns this service. - string producer_project_id = 22; - - // A list of API interfaces exported by this service. Only the `name` field - // of the [google.protobuf.Api][google.protobuf.Api] needs to be provided by the configuration - // author, as the remaining fields will be derived from the IDL during the - // normalization process. It is an error to specify an API interface here - // which cannot be resolved against the associated IDL files. - repeated google.protobuf.Api apis = 3; - - // A list of all proto message types included in this API service. - // Types referenced directly or indirectly by the `apis` are - // automatically included. Messages which are not referenced but - // shall be included, such as types used by the `google.protobuf.Any` type, - // should be listed here by name. Example: - // - // types: - // - name: google.protobuf.Int32 - repeated google.protobuf.Type types = 4; - - // A list of all enum types included in this API service. Enums - // referenced directly or indirectly by the `apis` are automatically - // included. Enums which are not referenced but shall be included - // should be listed here by name. Example: - // - // enums: - // - name: google.someapi.v1.SomeEnum - repeated google.protobuf.Enum enums = 5; - - // Additional API documentation. - Documentation documentation = 6; - - // API backend configuration. - Backend backend = 8; - - // HTTP configuration. - Http http = 9; - - // Quota configuration. - Quota quota = 10; - - // Auth configuration. - Authentication authentication = 11; - - // Context configuration. - Context context = 12; - - // Configuration controlling usage of this service. - Usage usage = 15; - - // Configuration for network endpoints. If this is empty, then an endpoint - // with the same name as the service is automatically generated to service all - // defined APIs. - repeated Endpoint endpoints = 18; - - // Configuration for the service control plane. - Control control = 21; - - // Defines the logs used by this service. - repeated LogDescriptor logs = 23; - - // Defines the metrics used by this service. - repeated MetricDescriptor metrics = 24; - - // Defines the monitored resources used by this service. This is required - // by the [Service.monitoring][google.api.Service.monitoring] and [Service.logging][google.api.Service.logging] configurations. - repeated MonitoredResourceDescriptor monitored_resources = 25; - - // Billing configuration. - Billing billing = 26; - - // Logging configuration. - Logging logging = 27; - - // Monitoring configuration. - Monitoring monitoring = 28; - - // System parameter configuration. - SystemParameters system_parameters = 29; - - // Output only. The source information for this configuration if available. - SourceInfo source_info = 37; - - // Experimental configuration. - Experimental experimental = 101; -} diff --git a/third_party/google/api/serviceconfig.yaml b/third_party/google/api/serviceconfig.yaml deleted file mode 100644 index 6d883d428..000000000 --- a/third_party/google/api/serviceconfig.yaml +++ /dev/null @@ -1,24 +0,0 @@ -type: google.api.Service -config_version: 1 -name: serviceconfig.googleapis.com -title: Service Config API - -types: -- name: google.api.ConfigChange -- name: google.api.Distribution -- name: google.api.DocumentationRule -- name: google.api.HttpBody -- name: google.api.LabelDescriptor -- name: google.api.Metric -- name: google.api.MonitoredResource -- name: google.api.MonitoredResourceDescriptor -- name: google.api.MonitoredResourceMetadata -- name: google.api.ResourceDescriptor -- name: google.api.ResourceReference -- name: google.api.Service - -enums: -- name: google.api.FieldBehavior - -documentation: - summary: Lets you define and config your API service. diff --git a/third_party/google/api/servicecontrol/README.md b/third_party/google/api/servicecontrol/README.md deleted file mode 100644 index 3d9590ee0..000000000 --- a/third_party/google/api/servicecontrol/README.md +++ /dev/null @@ -1,126 +0,0 @@ -Google Service Control provides control plane functionality to managed services, -such as logging, monitoring, and status checks. This page provides an overview -of what it does and how it works. - -## Why use Service Control? - -When you develop a cloud service, you typically start with the business -requirements and the architecture design, then proceed with API definition -and implementation. Before you put your service into production, you -need to deal with many control plane issues: - -* How to control access to your service. -* How to send logging and monitoring data to both consumers and producers. -* How to create and manage dashboards to visualize this data. -* How to automatically scale the control plane components with your service. - -Service Control is a mature and feature-rich control plane provider -that addresses these needs with high efficiency, high scalability, -and high availability. It provides a simple public API that can be accessed -from anywhere using JSON REST and gRPC clients, so when you move your service -from on-premise to a cloud provider, or from one cloud provider to another, -you don't need to change the control plane provider. - -Services built using Google Cloud Endpoints already take advantage of -Service Control. Cloud Endpoints sends logging and monitoring data -through Google Service Control for every request arriving at its -proxy. If you need to report any additional logging and monitoring data for -your Cloud Endpoints service, you can call the Service Control API directly -from your service. - -The Service Control API definition is open sourced and available on -[GitHub](https://github.com/googleapis/googleapis/tree/master/google/api/servicecontrol). -By changing the DNS name, you can easily use alternative implementations of -the Service Control API. - -## Architecture - -Google Service Control works with a set of *managed services* and their -*operations* (activities), *checks* whether an operation is allowed to proceed, -and *reports* completed operations. Behind the scenes, it leverages other -Google Cloud services, such as -[Google Service Management](/service-management), -[Stackdriver Logging](/logging), and [Stackdriver Monitoring](/monitoring), -while hiding their complexity from service producers. It enables service -producers to send telemetry data to their consumers. It uses caching, -batching, aggregation, and retries to deliver higher performance and -availability than the individual backend systems it encapsulates. - -
-
- The overall architecture of a service that uses Google Service Control. -
-
Figure 1: Using Google Service Control.
-
- -The Service Control API provides two methods: - -* [`services.check`](/service-control/reference/rest/v1/services/check), used for: - * Ensuring valid consumer status - * Validating API keys -* [`services.report`](/service-control/reference/rest/v1/services/report), used for: - * Sending logs to Stackdriver Logging - * Sending metrics to Stackdriver Monitoring - -We’ll look at these in more detail in the rest of this overview. - -## Managed services - -A [managed service](/service-management/reference/rest/v1/services) is -a network service managed by -[Google Service Management](/service-management). Each managed service has a -unique name, such as `example.googleapis.com`, which must be a valid -fully-qualified DNS name, as per RFC 1035. - -For example: - -* Google Cloud Pub/Sub (`pubsub.googleapis.com`) -* Google Cloud Vision (`vision.googleapis.com`) -* Google Cloud Bigtable (`bigtable.googleapis.com`) -* Google Cloud Datastore (`datastore.googleapis.com`) - -Google Service Management manages the lifecycle of each service’s -configuration, which is used to customize Google Service Control's behavior. -Service configurations are also used by Google Cloud Console -for displaying APIs and their settings, enabling/disabling APIs, and more. - -## Operations - -Google Service Control uses the generic concept of an *operation* -to represent the -activities of a managed service, such as API calls and resource usage. Each -operation is associated with a managed service and a specific service -consumer, and has a set of properties that describe the operation, such as -the API method name and resource usage amount. For more information, see the -[Operation definition](/service-control/rest/v1/Operation). - -## Check - -The [`services.check`](/service-control/reference/rest/v1/services/check) -method determines whether an operation should be allowed to proceed -for a managed service. - -For example: - -* Check if the consumer is still active. -* Check if the consumer has enabled the service. -* Check if the API key is still valid. - -By performing multiple checks within a single method call, it provides -better performance, higher reliability, and reduced development cost to -service producers compared to checking with multiple backend systems. - -## Report - -The [`services.report`](/service-control/reference/rest/v1/services/report) -method reports completed operations for -a managed service to backend systems, such as logging and monitoring. The -reported data can be seen in Google API Console and Google Cloud Console, -and retrieved with appropriate APIs, such as the Stackdriver Logging and -Stackdriver Monitoring APIs. - -## Next steps - -* Read our [Getting Started guide](/service-control/getting-started) to find out - how to set up and use the Google Service Control API. diff --git a/third_party/google/api/servicecontrol/v1/check_error.proto b/third_party/google/api/servicecontrol/v1/check_error.proto deleted file mode 100644 index 3395839d8..000000000 --- a/third_party/google/api/servicecontrol/v1/check_error.proto +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicecontrol.v1; - -import "google/api/annotations.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol"; -option java_multiple_files = true; -option java_outer_classname = "CheckErrorProto"; -option java_package = "com.google.api.servicecontrol.v1"; - -// Defines the errors to be returned in -// [google.api.servicecontrol.v1.CheckResponse.check_errors][google.api.servicecontrol.v1.CheckResponse.check_errors]. -message CheckError { - // Error codes for Check responses. - enum Code { - // This is never used in `CheckResponse`. - ERROR_CODE_UNSPECIFIED = 0; - - // The consumer's project id was not found. - // Same as [google.rpc.Code.NOT_FOUND][]. - NOT_FOUND = 5; - - // The consumer doesn't have access to the specified resource. - // Same as [google.rpc.Code.PERMISSION_DENIED][]. - PERMISSION_DENIED = 7; - - // Quota check failed. Same as [google.rpc.Code.RESOURCE_EXHAUSTED][]. - RESOURCE_EXHAUSTED = 8; - - // The consumer hasn't activated the service. - SERVICE_NOT_ACTIVATED = 104; - - // The consumer cannot access the service because billing is disabled. - BILLING_DISABLED = 107; - - // The consumer's project has been marked as deleted (soft deletion). - PROJECT_DELETED = 108; - - // The consumer's project number or id does not represent a valid project. - PROJECT_INVALID = 114; - - // The IP address of the consumer is invalid for the specific consumer - // project. - IP_ADDRESS_BLOCKED = 109; - - // The referer address of the consumer request is invalid for the specific - // consumer project. - REFERER_BLOCKED = 110; - - // The client application of the consumer request is invalid for the - // specific consumer project. - CLIENT_APP_BLOCKED = 111; - - // The API targeted by this request is invalid for the specified consumer - // project. - API_TARGET_BLOCKED = 122; - - // The consumer's API key is invalid. - API_KEY_INVALID = 105; - - // The consumer's API Key has expired. - API_KEY_EXPIRED = 112; - - // The consumer's API Key was not found in config record. - API_KEY_NOT_FOUND = 113; - - // The backend server for looking up project id/number is unavailable. - NAMESPACE_LOOKUP_UNAVAILABLE = 300; - - // The backend server for checking service status is unavailable. - SERVICE_STATUS_UNAVAILABLE = 301; - - // The backend server for checking billing status is unavailable. - BILLING_STATUS_UNAVAILABLE = 302; - } - - // The error code. - Code code = 1; - - // Free-form text providing details on the error cause of the error. - string detail = 2; -} diff --git a/third_party/google/api/servicecontrol/v1/distribution.proto b/third_party/google/api/servicecontrol/v1/distribution.proto deleted file mode 100644 index 40b89f5ba..000000000 --- a/third_party/google/api/servicecontrol/v1/distribution.proto +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicecontrol.v1; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol"; -option java_multiple_files = true; -option java_outer_classname = "DistributionProto"; -option java_package = "com.google.api.servicecontrol.v1"; - -// Distribution represents a frequency distribution of double-valued sample -// points. It contains the size of the population of sample points plus -// additional optional information: -// -// - the arithmetic mean of the samples -// - the minimum and maximum of the samples -// - the sum-squared-deviation of the samples, used to compute variance -// - a histogram of the values of the sample points -message Distribution { - // Describing buckets with constant width. - message LinearBuckets { - // The number of finite buckets. With the underflow and overflow buckets, - // the total number of buckets is `num_finite_buckets` + 2. - // See comments on `bucket_options` for details. - int32 num_finite_buckets = 1; - - // The i'th linear bucket covers the interval - // [offset + (i-1) * width, offset + i * width) - // where i ranges from 1 to num_finite_buckets, inclusive. - // Must be strictly positive. - double width = 2; - - // The i'th linear bucket covers the interval - // [offset + (i-1) * width, offset + i * width) - // where i ranges from 1 to num_finite_buckets, inclusive. - double offset = 3; - } - - // Describing buckets with exponentially growing width. - message ExponentialBuckets { - // The number of finite buckets. With the underflow and overflow buckets, - // the total number of buckets is `num_finite_buckets` + 2. - // See comments on `bucket_options` for details. - int32 num_finite_buckets = 1; - - // The i'th exponential bucket covers the interval - // [scale * growth_factor^(i-1), scale * growth_factor^i) - // where i ranges from 1 to num_finite_buckets inclusive. - // Must be larger than 1.0. - double growth_factor = 2; - - // The i'th exponential bucket covers the interval - // [scale * growth_factor^(i-1), scale * growth_factor^i) - // where i ranges from 1 to num_finite_buckets inclusive. - // Must be > 0. - double scale = 3; - } - - // Describing buckets with arbitrary user-provided width. - message ExplicitBuckets { - // 'bound' is a list of strictly increasing boundaries between - // buckets. Note that a list of length N-1 defines N buckets because - // of fenceposting. See comments on `bucket_options` for details. - // - // The i'th finite bucket covers the interval - // [bound[i-1], bound[i]) - // where i ranges from 1 to bound_size() - 1. Note that there are no - // finite buckets at all if 'bound' only contains a single element; in - // that special case the single bound defines the boundary between the - // underflow and overflow buckets. - // - // bucket number lower bound upper bound - // i == 0 (underflow) -inf bound[i] - // 0 < i < bound_size() bound[i-1] bound[i] - // i == bound_size() (overflow) bound[i-1] +inf - repeated double bounds = 1; - } - - // The total number of samples in the distribution. Must be >= 0. - int64 count = 1; - - // The arithmetic mean of the samples in the distribution. If `count` is - // zero then this field must be zero. - double mean = 2; - - // The minimum of the population of values. Ignored if `count` is zero. - double minimum = 3; - - // The maximum of the population of values. Ignored if `count` is zero. - double maximum = 4; - - // The sum of squared deviations from the mean: - // Sum[i=1..count]((x_i - mean)^2) - // where each x_i is a sample values. If `count` is zero then this field - // must be zero, otherwise validation of the request fails. - double sum_of_squared_deviation = 5; - - // The number of samples in each histogram bucket. `bucket_counts` are - // optional. If present, they must sum to the `count` value. - // - // The buckets are defined below in `bucket_option`. There are N buckets. - // `bucket_counts[0]` is the number of samples in the underflow bucket. - // `bucket_counts[1]` to `bucket_counts[N-1]` are the numbers of samples - // in each of the finite buckets. And `bucket_counts[N] is the number - // of samples in the overflow bucket. See the comments of `bucket_option` - // below for more details. - // - // Any suffix of trailing zeros may be omitted. - repeated int64 bucket_counts = 6; - - // Defines the buckets in the histogram. `bucket_option` and `bucket_counts` - // must be both set, or both unset. - // - // Buckets are numbered in the range of [0, N], with a total of N+1 buckets. - // There must be at least two buckets (a single-bucket histogram gives - // no information that isn't already provided by `count`). - // - // The first bucket is the underflow bucket which has a lower bound - // of -inf. The last bucket is the overflow bucket which has an - // upper bound of +inf. All other buckets (if any) are called "finite" - // buckets because they have finite lower and upper bounds. As described - // below, there are three ways to define the finite buckets. - // - // (1) Buckets with constant width. - // (2) Buckets with exponentially growing widths. - // (3) Buckets with arbitrary user-provided widths. - // - // In all cases, the buckets cover the entire real number line (-inf, - // +inf). Bucket upper bounds are exclusive and lower bounds are - // inclusive. The upper bound of the underflow bucket is equal to the - // lower bound of the smallest finite bucket; the lower bound of the - // overflow bucket is equal to the upper bound of the largest finite - // bucket. - oneof bucket_option { - // Buckets with constant width. - LinearBuckets linear_buckets = 7; - - // Buckets with exponentially growing width. - ExponentialBuckets exponential_buckets = 8; - - // Buckets with arbitrary user-provided width. - ExplicitBuckets explicit_buckets = 9; - } -} diff --git a/third_party/google/api/servicecontrol/v1/log_entry.proto b/third_party/google/api/servicecontrol/v1/log_entry.proto deleted file mode 100644 index 50b0fc468..000000000 --- a/third_party/google/api/servicecontrol/v1/log_entry.proto +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicecontrol.v1; - -import "google/api/annotations.proto"; -import "google/logging/type/log_severity.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol"; -option java_multiple_files = true; -option java_outer_classname = "LogEntryProto"; -option java_package = "com.google.api.servicecontrol.v1"; - -// An individual log entry. -message LogEntry { - // Required. The log to which this log entry belongs. Examples: `"syslog"`, - // `"book_log"`. - string name = 10; - - // The time the event described by the log entry occurred. If - // omitted, defaults to operation start time. - google.protobuf.Timestamp timestamp = 11; - - // The severity of the log entry. The default value is - // `LogSeverity.DEFAULT`. - google.logging.type.LogSeverity severity = 12; - - // A unique ID for the log entry used for deduplication. If omitted, - // the implementation will generate one based on operation_id. - string insert_id = 4; - - // A set of user-defined (key, value) data that provides additional - // information about the log entry. - map labels = 13; - - // The log entry payload, which can be one of multiple types. - oneof payload { - // The log entry payload, represented as a protocol buffer that is - // expressed as a JSON object. The only accepted type currently is - // [AuditLog][google.cloud.audit.AuditLog]. - google.protobuf.Any proto_payload = 2; - - // The log entry payload, represented as a Unicode string (UTF-8). - string text_payload = 3; - - // The log entry payload, represented as a structure that - // is expressed as a JSON object. - google.protobuf.Struct struct_payload = 6; - } -} diff --git a/third_party/google/api/servicecontrol/v1/metric_value.proto b/third_party/google/api/servicecontrol/v1/metric_value.proto deleted file mode 100644 index 9a62ff698..000000000 --- a/third_party/google/api/servicecontrol/v1/metric_value.proto +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicecontrol.v1; - -import "google/api/annotations.proto"; -import "google/api/servicecontrol/v1/distribution.proto"; -import "google/protobuf/timestamp.proto"; -import "google/type/money.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol"; -option java_multiple_files = true; -option java_outer_classname = "MetricValueSetProto"; -option java_package = "com.google.api.servicecontrol.v1"; - -// Represents a single metric value. -message MetricValue { - // The labels describing the metric value. - // See comments on - // [google.api.servicecontrol.v1.Operation.labels][google.api.servicecontrol.v1.Operation.labels] - // for the overriding relationship. - map labels = 1; - - // The start of the time period over which this metric value's measurement - // applies. The time period has different semantics for different metric - // types (cumulative, delta, and gauge). See the metric definition - // documentation in the service configuration for details. - google.protobuf.Timestamp start_time = 2; - - // The end of the time period over which this metric value's measurement - // applies. - google.protobuf.Timestamp end_time = 3; - - // The value. The type of value used in the request must - // agree with the metric definition in the service configuration, otherwise - // the MetricValue is rejected. - oneof value { - // A boolean value. - bool bool_value = 4; - - // A signed 64-bit integer value. - int64 int64_value = 5; - - // A double precision floating point value. - double double_value = 6; - - // A text string value. - string string_value = 7; - - // A distribution value. - Distribution distribution_value = 8; - } -} - -// Represents a set of metric values in the same metric. -// Each metric value in the set should have a unique combination of start time, -// end time, and label values. -message MetricValueSet { - // The metric name defined in the service configuration. - string metric_name = 1; - - // The values in this metric. - repeated MetricValue metric_values = 2; -} diff --git a/third_party/google/api/servicecontrol/v1/operation.proto b/third_party/google/api/servicecontrol/v1/operation.proto deleted file mode 100644 index 301f3575c..000000000 --- a/third_party/google/api/servicecontrol/v1/operation.proto +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicecontrol.v1; - -import "google/api/annotations.proto"; -import "google/api/servicecontrol/v1/log_entry.proto"; -import "google/api/servicecontrol/v1/metric_value.proto"; -import "google/protobuf/timestamp.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol"; -option java_multiple_files = true; -option java_outer_classname = "OperationProto"; -option java_package = "com.google.api.servicecontrol.v1"; - -// Represents information regarding an operation. -message Operation { - // Defines the importance of the data contained in the operation. - enum Importance { - // The API implementation may cache and aggregate the data. - // The data may be lost when rare and unexpected system failures occur. - LOW = 0; - - // The API implementation doesn't cache and aggregate the data. - // If the method returns successfully, it's guaranteed that the data has - // been persisted in durable storage. - HIGH = 1; - } - - // Identity of the operation. This must be unique within the scope of the - // service that generated the operation. If the service calls - // Check() and Report() on the same operation, the two calls should carry - // the same id. - // - // UUID version 4 is recommended, though not required. - // In scenarios where an operation is computed from existing information - // and an idempotent id is desirable for deduplication purpose, UUID version 5 - // is recommended. See RFC 4122 for details. - string operation_id = 1; - - // Fully qualified name of the operation. Reserved for future use. - string operation_name = 2; - - // Identity of the consumer who is using the service. - // This field should be filled in for the operations initiated by a - // consumer, but not for service-initiated operations that are - // not related to a specific consumer. - // - // This can be in one of the following formats: - // project:, - // project_number:, - // api_key:. - string consumer_id = 3; - - // Required. Start time of the operation. - google.protobuf.Timestamp start_time = 4; - - // End time of the operation. - // Required when the operation is used in - // [ServiceController.Report][google.api.servicecontrol.v1.ServiceController.Report], - // but optional when the operation is used in - // [ServiceController.Check][google.api.servicecontrol.v1.ServiceController.Check]. - google.protobuf.Timestamp end_time = 5; - - // Labels describing the operation. Only the following labels are allowed: - // - // - Labels describing monitored resources as defined in - // the service configuration. - // - Default labels of metric values. When specified, labels defined in the - // metric value override these default. - // - The following labels defined by Google Cloud Platform: - // - `cloud.googleapis.com/location` describing the location where the - // operation happened, - // - `servicecontrol.googleapis.com/user_agent` describing the user agent - // of the API request, - // - `servicecontrol.googleapis.com/service_agent` describing the service - // used to handle the API request (e.g. ESP), - // - `servicecontrol.googleapis.com/platform` describing the platform - // where the API is served (e.g. GAE, GCE, GKE). - map labels = 6; - - // Represents information about this operation. Each MetricValueSet - // corresponds to a metric defined in the service configuration. - // The data type used in the MetricValueSet must agree with - // the data type specified in the metric definition. - // - // Within a single operation, it is not allowed to have more than one - // MetricValue instances that have the same metric names and identical - // label value combinations. If a request has such duplicated MetricValue - // instances, the entire request is rejected with - // an invalid argument error. - repeated MetricValueSet metric_value_sets = 7; - - // Represents information to be logged. - repeated LogEntry log_entries = 8; - - // DO NOT USE. This is an experimental field. - Importance importance = 11; -} diff --git a/third_party/google/api/servicecontrol/v1/quota_controller.proto b/third_party/google/api/servicecontrol/v1/quota_controller.proto deleted file mode 100644 index 808a73545..000000000 --- a/third_party/google/api/servicecontrol/v1/quota_controller.proto +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicecontrol.v1; - -import "google/api/annotations.proto"; -import "google/api/servicecontrol/v1/metric_value.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol"; -option java_multiple_files = true; -option java_outer_classname = "QuotaControllerProto"; -option java_package = "com.google.api.servicecontrol.v1"; - -// [Google Quota Control API](/service-control/overview) -// -// Allows clients to allocate and release quota against a [managed -// service](https://cloud.google.com/service-management/reference/rpc/google.api/servicemanagement.v1#google.api.servicemanagement.v1.ManagedService). -service QuotaController { - // Attempts to allocate quota for the specified consumer. It should be called - // before the operation is executed. - // - // This method requires the `servicemanagement.services.quota` - // permission on the specified service. For more information, see - // [Cloud IAM](https://cloud.google.com/iam). - // - // **NOTE:** The client **must** fail-open on server errors `INTERNAL`, - // `UNKNOWN`, `DEADLINE_EXCEEDED`, and `UNAVAILABLE`. To ensure system - // reliability, the server may inject these errors to prohibit any hard - // dependency on the quota functionality. - rpc AllocateQuota(AllocateQuotaRequest) returns (AllocateQuotaResponse) { - option (google.api.http) = { - post: "/v1/services/{service_name}:allocateQuota" - body: "*" - }; - } -} - -// Request message for the AllocateQuota method. -message AllocateQuotaRequest { - // Name of the service as specified in the service configuration. For example, - // `"pubsub.googleapis.com"`. - // - // See [google.api.Service][google.api.Service] for the definition of a - // service name. - string service_name = 1; - - // Operation that describes the quota allocation. - QuotaOperation allocate_operation = 2; - - // Specifies which version of service configuration should be used to process - // the request. If unspecified or no matching version can be found, the latest - // one will be used. - string service_config_id = 4; -} - -// Represents information regarding a quota operation. -message QuotaOperation { - // Supported quota modes. - enum QuotaMode { - // Guard against implicit default. Must not be used. - UNSPECIFIED = 0; - - // For AllocateQuota request, allocates quota for the amount specified in - // the service configuration or specified using the quota metrics. If the - // amount is higher than the available quota, allocation error will be - // returned and no quota will be allocated. - NORMAL = 1; - - // The operation allocates quota for the amount specified in the service - // configuration or specified using the quota metrics. If the amount is - // higher than the available quota, request does not fail but all available - // quota will be allocated. - BEST_EFFORT = 2; - - // For AllocateQuota request, only checks if there is enough quota - // available and does not change the available quota. No lock is placed on - // the available quota either. - CHECK_ONLY = 3; - } - - // Identity of the operation. This is expected to be unique within the scope - // of the service that generated the operation, and guarantees idempotency in - // case of retries. - // - // UUID version 4 is recommended, though not required. In scenarios where an - // operation is computed from existing information and an idempotent id is - // desirable for deduplication purpose, UUID version 5 is recommended. See - // RFC 4122 for details. - string operation_id = 1; - - // Fully qualified name of the API method for which this quota operation is - // requested. This name is used for matching quota rules or metric rules and - // billing status rules defined in service configuration. This field is not - // required if the quota operation is performed on non-API resources. - // - // Example of an RPC method name: - // google.example.library.v1.LibraryService.CreateShelf - string method_name = 2; - - // Identity of the consumer for whom this quota operation is being performed. - // - // This can be in one of the following formats: - // project:, - // project_number:, - // api_key:. - string consumer_id = 3; - - // Labels describing the operation. - map labels = 4; - - // Represents information about this operation. Each MetricValueSet - // corresponds to a metric defined in the service configuration. - // The data type used in the MetricValueSet must agree with - // the data type specified in the metric definition. - // - // Within a single operation, it is not allowed to have more than one - // MetricValue instances that have the same metric names and identical - // label value combinations. If a request has such duplicated MetricValue - // instances, the entire request is rejected with - // an invalid argument error. - repeated MetricValueSet quota_metrics = 5; - - // Quota mode for this operation. - QuotaMode quota_mode = 6; -} - -// Response message for the AllocateQuota method. -message AllocateQuotaResponse { - // The same operation_id value used in the AllocateQuotaRequest. Used for - // logging and diagnostics purposes. - string operation_id = 1; - - // Indicates the decision of the allocate. - repeated QuotaError allocate_errors = 2; - - // Quota metrics to indicate the result of allocation. Depending on the - // request, one or more of the following metrics will be included: - // - // 1. Per quota group or per quota metric incremental usage will be specified - // using the following delta metric : - // "serviceruntime.googleapis.com/api/consumer/quota_used_count" - // - // 2. The quota limit reached condition will be specified using the following - // boolean metric : - // "serviceruntime.googleapis.com/quota/exceeded" - repeated MetricValueSet quota_metrics = 3; - - // ID of the actual config used to process the request. - string service_config_id = 4; -} - -// Represents error information for -// [QuotaOperation][google.api.servicecontrol.v1.QuotaOperation]. -message QuotaError { - // Error codes related to project config validations are deprecated since the - // quota controller methods do not perform these validations. Instead services - // have to call the Check method, without quota_properties field, to perform - // these validations before calling the quota controller methods. These - // methods check only for project deletion to be wipe out compliant. - enum Code { - // This is never used. - UNSPECIFIED = 0; - - // Quota allocation failed. - // Same as [google.rpc.Code.RESOURCE_EXHAUSTED][]. - RESOURCE_EXHAUSTED = 8; - - // Consumer cannot access the service because the service requires active - // billing. - BILLING_NOT_ACTIVE = 107; - - // Consumer's project has been marked as deleted (soft deletion). - PROJECT_DELETED = 108; - - // Specified API key is invalid. - API_KEY_INVALID = 105; - - // Specified API Key has expired. - API_KEY_EXPIRED = 112; - } - - // Error code. - Code code = 1; - - // Subject to whom this error applies. See the specific enum for more details - // on this field. For example, "clientip:" or - // "project:". - string subject = 2; - - // Free-form text that provides details on the cause of the error. - string description = 3; -} diff --git a/third_party/google/api/servicecontrol/v1/service_controller.proto b/third_party/google/api/servicecontrol/v1/service_controller.proto deleted file mode 100644 index 6e11bcf0f..000000000 --- a/third_party/google/api/servicecontrol/v1/service_controller.proto +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicecontrol.v1; - -import "google/api/annotations.proto"; -import "google/api/servicecontrol/v1/check_error.proto"; -import "google/api/servicecontrol/v1/operation.proto"; -import "google/rpc/status.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/servicecontrol/v1;servicecontrol"; -option java_multiple_files = true; -option java_outer_classname = "ServiceControllerProto"; -option java_package = "com.google.api.servicecontrol.v1"; -option objc_class_prefix = "GASC"; - -// [Google Service Control API](/service-control/overview) -// -// Lets clients check and report operations against a [managed -// service](https://cloud.google.com/service-management/reference/rpc/google.api/servicemanagement.v1#google.api.servicemanagement.v1.ManagedService). -service ServiceController { - // Checks an operation with Google Service Control to decide whether - // the given operation should proceed. It should be called before the - // operation is executed. - // - // If feasible, the client should cache the check results and reuse them for - // 60 seconds. In case of server errors, the client can rely on the cached - // results for longer time. - // - // NOTE: the [CheckRequest][google.api.servicecontrol.v1.CheckRequest] has the - // size limit of 64KB. - // - // This method requires the `servicemanagement.services.check` permission - // on the specified service. For more information, see - // [Google Cloud IAM](https://cloud.google.com/iam). - rpc Check(CheckRequest) returns (CheckResponse) { - option (google.api.http) = { - post: "/v1/services/{service_name}:check" - body: "*" - }; - } - - // Reports operation results to Google Service Control, such as logs and - // metrics. It should be called after an operation is completed. - // - // If feasible, the client should aggregate reporting data for up to 5 - // seconds to reduce API traffic. Limiting aggregation to 5 seconds is to - // reduce data loss during client crashes. Clients should carefully choose - // the aggregation time window to avoid data loss risk more than 0.01% - // for business and compliance reasons. - // - // NOTE: the [ReportRequest][google.api.servicecontrol.v1.ReportRequest] has - // the size limit of 1MB. - // - // This method requires the `servicemanagement.services.report` permission - // on the specified service. For more information, see - // [Google Cloud IAM](https://cloud.google.com/iam). - rpc Report(ReportRequest) returns (ReportResponse) { - option (google.api.http) = { - post: "/v1/services/{service_name}:report" - body: "*" - }; - } -} - -// Request message for the Check method. -message CheckRequest { - // The service name as specified in its service configuration. For example, - // `"pubsub.googleapis.com"`. - // - // See - // [google.api.Service](https://cloud.google.com/service-management/reference/rpc/google.api#google.api.Service) - // for the definition of a service name. - string service_name = 1; - - // The operation to be checked. - Operation operation = 2; - - // Specifies which version of service configuration should be used to process - // the request. - // - // If unspecified or no matching version can be found, the - // latest one will be used. - string service_config_id = 4; -} - -// Response message for the Check method. -message CheckResponse { - message CheckInfo { - // Consumer info of this check. - ConsumerInfo consumer_info = 2; - } - - // `ConsumerInfo` provides information about the consumer project. - message ConsumerInfo { - // The Google cloud project number, e.g. 1234567890. A value of 0 indicates - // no project number is found. - int64 project_number = 1; - } - - // The same operation_id value used in the - // [CheckRequest][google.api.servicecontrol.v1.CheckRequest]. Used for logging - // and diagnostics purposes. - string operation_id = 1; - - // Indicate the decision of the check. - // - // If no check errors are present, the service should process the operation. - // Otherwise the service should use the list of errors to determine the - // appropriate action. - repeated CheckError check_errors = 2; - - // The actual config id used to process the request. - string service_config_id = 5; - - // Feedback data returned from the server during processing a Check request. - CheckInfo check_info = 6; -} - -// Request message for the Report method. -message ReportRequest { - // The service name as specified in its service configuration. For example, - // `"pubsub.googleapis.com"`. - // - // See - // [google.api.Service](https://cloud.google.com/service-management/reference/rpc/google.api#google.api.Service) - // for the definition of a service name. - string service_name = 1; - - // Operations to be reported. - // - // Typically the service should report one operation per request. - // Putting multiple operations into a single request is allowed, but should - // be used only when multiple operations are natually available at the time - // of the report. - // - // If multiple operations are in a single request, the total request size - // should be no larger than 1MB. See - // [ReportResponse.report_errors][google.api.servicecontrol.v1.ReportResponse.report_errors] - // for partial failure behavior. - repeated Operation operations = 2; - - // Specifies which version of service config should be used to process the - // request. - // - // If unspecified or no matching version can be found, the - // latest one will be used. - string service_config_id = 3; -} - -// Response message for the Report method. -message ReportResponse { - // Represents the processing error of one - // [Operation][google.api.servicecontrol.v1.Operation] in the request. - message ReportError { - // The - // [Operation.operation_id][google.api.servicecontrol.v1.Operation.operation_id] - // value from the request. - string operation_id = 1; - - // Details of the error when processing the - // [Operation][google.api.servicecontrol.v1.Operation]. - google.rpc.Status status = 2; - } - - // Partial failures, one for each `Operation` in the request that failed - // processing. There are three possible combinations of the RPC status: - // - // 1. The combination of a successful RPC status and an empty `report_errors` - // list indicates a complete success where all `Operations` in the - // request are processed successfully. - // 2. The combination of a successful RPC status and a non-empty - // `report_errors` list indicates a partial success where some - // `Operations` in the request succeeded. Each - // `Operation` that failed processing has a corresponding item - // in this list. - // 3. A failed RPC status indicates a general non-deterministic failure. - // When this happens, it's impossible to know which of the - // 'Operations' in the request succeeded or failed. - repeated ReportError report_errors = 1; - - // The actual config id used to process the request. - string service_config_id = 2; -} diff --git a/third_party/google/api/servicemanagement/README.md b/third_party/google/api/servicemanagement/README.md deleted file mode 100644 index e3e36df49..000000000 --- a/third_party/google/api/servicemanagement/README.md +++ /dev/null @@ -1,102 +0,0 @@ -Google Service Management manages a set of *services*. Service -Management allows *service producers* to -publish their services on Google Cloud Platform so that they can be discovered -and used by *service consumers*. It also handles the tasks of tracking -service lifecycle and programming various backend systems -- such as -[Stackdriver Logging](https://cloud.google.com/stackdriver), -[Stackdriver Monitoring](https://cloud.google.com/stackdriver) -- to support -the managed services. - -If you are a service producer, you can use the Google Service Management API -and [Google Cloud SDK (gcloud)](/sdk) to publish and manage your services. -Each managed service has a service configuration which declares various aspects -of the service such as its API surface, along with parameters to configure the -supporting backend -systems, such as logging and monitoring. If you build your service using -[Google Cloud Endpoints](https://cloud.google.com/endpoints/), the service -configuration will be handled automatically. - -If you are a service consumer and want to use a managed service, you can use the -Google Service Management API or [Google Cloud Console](https://console.cloud.google.com) -to activate the -service for your [Google developer project](https://developers.google.com/console/help/new/), -then start using its APIs and functions. - -## Managed services - -REST URL: `https://servicemanagement.googleapis.com/v1/services/{service-name}`
-REST schema is defined [here](/service-management/reference/rest/v1/services). - -A managed service refers to a network service managed by -Service Management. Each managed service has a unique name, such as -`example.googleapis.com`, which must be a valid fully-qualified DNS name, as per -RFC 1035. - -A managed service typically provides some REST APIs and/or other -functions to their service consumers, such as mobile apps or cloud services. - -Service producers can use methods, such as -[services.create](/service-management/reference/rest/v1/services/create), -[services.delete](/service-management/reference/rest/v1/services/delete), -[services.undelete](/service-management/reference/rest/v1/services/undelete), -to manipulate their managed services. - -## Service producers - -A service producer is the Google developer project responsible for publishing -and maintaining a managed service. Each managed service is owned by exactly one -service producer. - -## Service consumers - -A service consumer is a Google developer project that has enabled and can -invoke APIs on a managed service. A managed service can have many service -consumers. - -## Service configuration - -REST URL: `https://servicemanagement.googleapis.com/v1/services/{service-name}/configs/{config_id}`
-REST schema is defined [here](/service-management/reference/rest/v1/services.configs). - -Each managed service is described by a service configuration which covers a wide -range of features, including its name, title, RPC API definitions, -REST API definitions, documentation, authentication, and more. - -To change the configuration of a managed service, the service producer needs to -publish an updated service configuration to Service Management. -Service Management keeps a history of published -service configurations, making it possible to easily retrace how a service's -configuration evolved over time. Service configurations can be published using -the -[services.configs.create](/service-management/reference/rest/v1/services.configs/create) -or [services.configs.submit](/service-management/reference/rest/v1/services.configs/submit) -methods. - -Alternatively, `services.configs.submit` allows publishing an -[OpenAPI](https://github.com/OAI/OpenAPI-Specification) specification, formerly -known as the Swagger Specification, which is automatically converted to a -corresponding service configuration. - -## Service rollout - -REST URL: `https://servicemanagement.googleapis.com/v1/services/{service-name}/rollouts/{rollout-id}`
-REST schema is defined [here](/service-management/reference/rest/v1/services.rollouts). - -A `Rollout` defines how Google Service Management should deploy service -configurations to backend systems and how the configurations take effect at -runtime. It lets service producers specify multiple service configuration -versions to be deployed together, and a strategy that indicates how they -should be used. - -Updating a managed service's configuration can be dangerous, as a configuration -error can lead to a service outage. To mitigate risks, Service Management -supports gradual rollout of service configuration changes. This feature gives -service producers time to identity potential issues and rollback service -configuration changes in case of errors, thus minimizing the customer -impact of bad configurations. For example, you could specify that 5% of traffic -uses configuration 1, while the remaining 95% uses configuration 2. - -Service Management keeps a history of rollouts so that service -producers can undo to previous configuration versions. You can rollback a configuration -by initiating a new `Rollout` that clones a previously submitted -rollout record. \ No newline at end of file diff --git a/third_party/google/api/servicemanagement/artman_servicemanagement_v1.yaml b/third_party/google/api/servicemanagement/artman_servicemanagement_v1.yaml deleted file mode 100644 index cfb5fce21..000000000 --- a/third_party/google/api/servicemanagement/artman_servicemanagement_v1.yaml +++ /dev/null @@ -1,34 +0,0 @@ -common: - api_name: servicemanagement - api_version: v1 - organization_name: google-cloud - proto_deps: - - name: google-common-protos - src_proto_paths: - - v1 - service_yaml: servicemanagement_v1.yaml - gapic_yaml: v1/servicemanagement_gapic.yaml -artifacts: -- name: gapic_config - type: GAPIC_CONFIG -- name: java_gapic - type: GAPIC - language: JAVA -- name: python_gapic - type: GAPIC - language: PYTHON -- name: nodejs_gapic - type: GAPIC - language: NODEJS -- name: php_gapic - type: GAPIC - language: PHP -- name: go_gapic - type: GAPIC - language: GO -- name: ruby_gapic - type: GAPIC - language: RUBY -- name: csharp_gapic - type: GAPIC - language: CSHARP diff --git a/third_party/google/api/servicemanagement/servicemanagement_v1.yaml b/third_party/google/api/servicemanagement/servicemanagement_v1.yaml deleted file mode 100644 index b23a788f8..000000000 --- a/third_party/google/api/servicemanagement/servicemanagement_v1.yaml +++ /dev/null @@ -1,233 +0,0 @@ -type: google.api.Service -config_version: 2 -name: servicemanagement.googleapis.com -title: Google Service Management API - -apis: -- name: google.api.servicemanagement.v1.ServiceManager - -types: -- name: google.api.servicemanagement.v1.ConfigSource -- name: google.api.servicemanagement.v1.ConfigRef -- name: google.api.servicemanagement.v1.OperationMetadata -- name: google.api.servicemanagement.v1.Rollout -- name: google.api.servicemanagement.v1.SubmitConfigSourceResponse -- name: google.api.servicemanagement.v1.UndeleteServiceResponse - -documentation: - summary: |- - Google Service Management allows service producers to publish their services - on Google Cloud Platform so that they can be discovered and used by service - consumers. - overview: |- - Google Service Management manages a set of *services*. Service Management - allows *service producers* to publish their services on Google Cloud - Platform so that they can be discovered and used by *service consumers*. It - also handles the tasks of tracking service lifecycle and programming various - backend systems -- such as [Stackdriver - Logging](https://cloud.google.com/stackdriver), [Stackdriver - Monitoring](https://cloud.google.com/stackdriver) -- to support the managed - services. - - If you are a service producer, you can use the Google Service Management API - and [Google Cloud SDK (gcloud)](/sdk) to publish and manage your services. - Each managed service has a service configuration which declares various - aspects of the service such as its API surface, along with parameters to - configure the supporting backend systems, such as logging and monitoring. If - you build your service using [Google Cloud - Endpoints](https://cloud.google.com/endpoints/), the service configuration - will be handled automatically. - - If you are a service consumer and want to use a managed service, you can use - the Google Service Management API or [Google Cloud - Console](https://console.cloud.google.com) to activate the service for your - [Google developer project](https://developers.google.com/console/help/new/), - then start using its APIs and functions. - - ## Managed services - - REST URL: - `https://servicemanagement.googleapis.com/v1/services/{service-name}`
- REST schema is defined - [here](/service-management/reference/rest/v1/services). - - A managed service refers to a network service managed by Service Management. - Each managed service has a unique name, such as `example.googleapis.com`, - which must be a valid fully-qualified DNS name, as per RFC 1035. - - A managed service typically provides some REST APIs and/or other functions - to their service consumers, such as mobile apps or cloud services. - - Service producers can use methods, such as - [services.create](/service-management/reference/rest/v1/services/create), - [services.delete](/service-management/reference/rest/v1/services/delete), - [services.undelete](/service-management/reference/rest/v1/services/undelete), - to manipulate their managed services. - - ## Service producers - - A service producer is the Google developer project responsible for - publishing and maintaining a managed service. Each managed service is owned - by exactly one service producer. - - ## Service consumers - - A service consumer is a Google developer project that has enabled and can - invoke APIs on a managed service. A managed service can have many service - consumers. - - ## Service configuration - - REST URL: - `https://servicemanagement.googleapis.com/v1/services/{service-name}/configs/{config_id}` -
REST schema is defined - [here](/service-management/reference/rest/v1/services.configs). - - Each managed service is described by a service configuration which covers a - wide range of features, including its name, title, RPC API definitions, REST - API definitions, documentation, authentication, and more. - - To change the configuration of a managed service, the service producer needs - to publish an updated service configuration to Service Management. Service - Management keeps a history of published service configurations, making it - possible to easily retrace how a service's configuration evolved over time. - Service configurations can be published using the - [services.configs.create](/service-management/reference/rest/v1/services.configs/create) - or - [services.configs.submit](/service-management/reference/rest/v1/services.configs/submit) - methods. - - Alternatively, `services.configs.submit` allows publishing an - [OpenAPI](https://github.com/OAI/OpenAPI-Specification) specification, - formerly known as the Swagger Specification, which is automatically - converted to a corresponding service configuration. - - ## Service rollout - - REST URL: - `https://servicemanagement.googleapis.com/v1/services/{service-name}/rollouts/{rollout-id}` -
REST schema is defined - [here](/service-management/reference/rest/v1/services.rollouts). - - A `Rollout` defines how Google Service Management should deploy service - configurations to backend systems and how the configurations take effect at - runtime. It lets service producers specify multiple service configuration - versions to be deployed together, and a strategy that indicates how they - should be used. - - Updating a managed service's configuration can be dangerous, as a - configuration error can lead to a service outage. To mitigate risks, Service - Management supports gradual rollout of service configuration changes. This - feature gives service producers time to identity potential issues and - rollback service configuration changes in case of errors, thus minimizing - the customer impact of bad configurations. For example, you could specify - that 5% of traffic uses configuration 1, while the remaining 95% uses - configuration 2. - - Service Management keeps a history of rollouts so that service producers can - undo to previous configuration versions. You can rollback a configuration by - initiating a new `Rollout` that clones a previously submitted rollout - record. - rules: - - selector: google.longrunning.Operations.ListOperations - description: Lists service operations that match the specified filter in the request. - -backend: - rules: - - selector: google.longrunning.Operations.ListOperations - deadline: 10.0 - - selector: google.longrunning.Operations.GetOperation - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.ListServices - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.GetService - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.CreateService - deadline: 20.0 - - selector: google.api.servicemanagement.v1.ServiceManager.DeleteService - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.UndeleteService - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.ListServiceConfigs - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.GetServiceConfig - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.CreateServiceConfig - deadline: 20.0 - - selector: google.api.servicemanagement.v1.ServiceManager.SubmitConfigSource - deadline: 20.0 - - selector: google.api.servicemanagement.v1.ServiceManager.ListServiceRollouts - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.GetServiceRollout - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.GenerateConfigReport - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.EnableService - deadline: 10.0 - - selector: google.api.servicemanagement.v1.ServiceManager.DisableService - deadline: 10.0 - - selector: google.iam.v1.IAMPolicy.SetIamPolicy - deadline: 10.0 - - selector: google.iam.v1.IAMPolicy.GetIamPolicy - deadline: 10.0 - - selector: google.iam.v1.IAMPolicy.TestIamPermissions - deadline: 10.0 - -http: - rules: - - selector: google.longrunning.Operations.ListOperations - get: /v1/operations - - - selector: google.iam.v1.IAMPolicy.SetIamPolicy - post: '/v1/{resource=services/*}:setIamPolicy' - body: '*' - additional_bindings: - - post: '/v1/{resource=services/*/consumers/*}:setIamPolicy' - body: '*' - - - selector: google.iam.v1.IAMPolicy.GetIamPolicy - post: '/v1/{resource=services/*}:getIamPolicy' - body: '*' - additional_bindings: - - post: '/v1/{resource=services/*/consumers/*}:getIamPolicy' - body: '*' - - - selector: google.iam.v1.IAMPolicy.TestIamPermissions - post: '/v1/{resource=services/*}:testIamPermissions' - body: '*' - additional_bindings: - - post: '/v1/{resource=services/*/consumers/*}:testIamPermissions' - body: '*' - - -authentication: - rules: - - selector: '*' - oauth: - canonical_scopes: |- - https://www.googleapis.com/auth/cloud-platform, - https://www.googleapis.com/auth/service.management - - selector: |- - google.api.servicemanagement.v1.ServiceManager.GetService, - google.api.servicemanagement.v1.ServiceManager.GetServiceConfig, - google.api.servicemanagement.v1.ServiceManager.GetServiceRollout, - google.api.servicemanagement.v1.ServiceManager.ListServiceConfigs, - google.api.servicemanagement.v1.ServiceManager.ListServiceRollouts, - google.api.servicemanagement.v1.ServiceManager.ListServices - oauth: - canonical_scopes: |- - https://www.googleapis.com/auth/cloud-platform, - https://www.googleapis.com/auth/cloud-platform.read-only, - https://www.googleapis.com/auth/service.management, - https://www.googleapis.com/auth/service.management.readonly - - selector: |- - google.iam.v1.IAMPolicy.GetIamPolicy, - google.iam.v1.IAMPolicy.TestIamPermissions - oauth: - canonical_scopes: |- - https://www.googleapis.com/auth/cloud-platform, - https://www.googleapis.com/auth/cloud-platform.read-only, - https://www.googleapis.com/auth/service.management, - https://www.googleapis.com/auth/service.management.readonly diff --git a/third_party/google/api/servicemanagement/v1/resources.proto b/third_party/google/api/servicemanagement/v1/resources.proto deleted file mode 100644 index 1c924849c..000000000 --- a/third_party/google/api/servicemanagement/v1/resources.proto +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicemanagement.v1; - -import "google/api/annotations.proto"; -import "google/api/config_change.proto"; -import "google/api/metric.proto"; -import "google/api/service.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.ServiceManagement.V1"; -option go_package = "google.golang.org/genproto/googleapis/api/servicemanagement/v1;servicemanagement"; -option java_multiple_files = true; -option java_outer_classname = "ResourcesProto"; -option java_package = "com.google.api.servicemanagement.v1"; -option objc_class_prefix = "GASM"; -option php_namespace = "Google\\Cloud\\ServiceManagement\\V1"; - -// The full representation of a Service that is managed by -// Google Service Management. -message ManagedService { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. - string service_name = 2; - - // ID of the project that produces and owns this service. - string producer_project_id = 3; -} - -// The metadata associated with a long running operation resource. -message OperationMetadata { - // Represents the status of one operation step. - message Step { - // The short description of the step. - string description = 2; - - // The status code. - Status status = 4; - } - - // Code describes the status of the operation (or one of its steps). - enum Status { - // Unspecifed code. - STATUS_UNSPECIFIED = 0; - - // The operation or step has completed without errors. - DONE = 1; - - // The operation or step has not started yet. - NOT_STARTED = 2; - - // The operation or step is in progress. - IN_PROGRESS = 3; - - // The operation or step has completed with errors. If the operation is - // rollbackable, the rollback completed with errors too. - FAILED = 4; - - // The operation or step has completed with cancellation. - CANCELLED = 5; - } - - // The full name of the resources that this operation is directly - // associated with. - repeated string resource_names = 1; - - // Detailed status information for each step. The order is undetermined. - repeated Step steps = 2; - - // Percentage of completion of this operation, ranging from 0 to 100. - int32 progress_percentage = 3; - - // The start time of the operation. - google.protobuf.Timestamp start_time = 4; -} - -// Represents a diagnostic message (error or warning) -message Diagnostic { - // The kind of diagnostic information possible. - enum Kind { - // Warnings and errors - WARNING = 0; - - // Only errors - ERROR = 1; - } - - // File name and line number of the error or warning. - string location = 1; - - // The kind of diagnostic information provided. - Kind kind = 2; - - // Message describing the error or warning. - string message = 3; -} - -// Represents a source file which is used to generate the service configuration -// defined by `google.api.Service`. -message ConfigSource { - // A unique ID for a specific instance of this message, typically assigned - // by the client for tracking purpose. If empty, the server may choose to - // generate one instead. - string id = 5; - - // Set of source configuration files that are used to generate a service - // configuration (`google.api.Service`). - repeated ConfigFile files = 2; -} - -// Generic specification of a source configuration file -message ConfigFile { - enum FileType { - // Unknown file type. - FILE_TYPE_UNSPECIFIED = 0; - - // YAML-specification of service. - SERVICE_CONFIG_YAML = 1; - - // OpenAPI specification, serialized in JSON. - OPEN_API_JSON = 2; - - // OpenAPI specification, serialized in YAML. - OPEN_API_YAML = 3; - - // FileDescriptorSet, generated by protoc. - // - // To generate, use protoc with imports and source info included. - // For an example test.proto file, the following command would put the value - // in a new file named out.pb. - // - // $protoc --include_imports --include_source_info test.proto -o out.pb - FILE_DESCRIPTOR_SET_PROTO = 4; - - // Uncompiled Proto file. Used for storage and display purposes only, - // currently server-side compilation is not supported. Should match the - // inputs to 'protoc' command used to generated FILE_DESCRIPTOR_SET_PROTO. A - // file of this type can only be included if at least one file of type - // FILE_DESCRIPTOR_SET_PROTO is included. - PROTO_FILE = 6; - } - - // The file name of the configuration file (full or relative path). - string file_path = 1; - - // The bytes that constitute the file. - bytes file_contents = 3; - - // The type of configuration file this represents. - FileType file_type = 4; -} - -// Represents a service configuration with its name and id. -message ConfigRef { - // Resource name of a service config. It must have the following - // format: "services/{service name}/configs/{config id}". - string name = 1; -} - -// Change report associated with a particular service configuration. -// -// It contains a list of ConfigChanges based on the comparison between -// two service configurations. -message ChangeReport { - // List of changes between two service configurations. - // The changes will be alphabetically sorted based on the identifier - // of each change. - // A ConfigChange identifier is a dot separated path to the configuration. - // Example: visibility.rules[selector='LibraryService.CreateBook'].restriction - repeated google.api.ConfigChange config_changes = 1; -} - -// A rollout resource that defines how service configuration versions are pushed -// to control plane systems. Typically, you create a new version of the -// service config, and then create a Rollout to push the service config. -message Rollout { - // Strategy that specifies how clients of Google Service Controller want to - // send traffic to use different config versions. This is generally - // used by API proxy to split traffic based on your configured precentage for - // each config version. - // - // One example of how to gradually rollout a new service configuration using - // this - // strategy: - // Day 1 - // - // Rollout { - // id: "example.googleapis.com/rollout_20160206" - // traffic_percent_strategy { - // percentages: { - // "example.googleapis.com/20160201": 70.00 - // "example.googleapis.com/20160206": 30.00 - // } - // } - // } - // - // Day 2 - // - // Rollout { - // id: "example.googleapis.com/rollout_20160207" - // traffic_percent_strategy: { - // percentages: { - // "example.googleapis.com/20160206": 100.00 - // } - // } - // } - message TrafficPercentStrategy { - // Maps service configuration IDs to their corresponding traffic percentage. - // Key is the service configuration ID, Value is the traffic percentage - // which must be greater than 0.0 and the sum must equal to 100.0. - map percentages = 1; - } - - // Strategy used to delete a service. This strategy is a placeholder only - // used by the system generated rollout to delete a service. - message DeleteServiceStrategy {} - - // Status of a Rollout. - enum RolloutStatus { - // No status specified. - ROLLOUT_STATUS_UNSPECIFIED = 0; - - // The Rollout is in progress. - IN_PROGRESS = 1; - - // The Rollout has completed successfully. - SUCCESS = 2; - - // The Rollout has been cancelled. This can happen if you have overlapping - // Rollout pushes, and the previous ones will be cancelled. - CANCELLED = 3; - - // The Rollout has failed and the rollback attempt has failed too. - FAILED = 4; - - // The Rollout has not started yet and is pending for execution. - PENDING = 5; - - // The Rollout has failed and rolled back to the previous successful - // Rollout. - FAILED_ROLLED_BACK = 6; - } - - // Optional unique identifier of this Rollout. Only lower case letters, digits - // and '-' are allowed. - // - // If not specified by client, the server will generate one. The generated id - // will have the form of , where "date" is the create - // date in ISO 8601 format. "revision number" is a monotonically increasing - // positive number that is reset every day for each service. - // An example of the generated rollout_id is '2016-02-16r1' - string rollout_id = 1; - - // Creation time of the rollout. Readonly. - google.protobuf.Timestamp create_time = 2; - - // The user who created the Rollout. Readonly. - string created_by = 3; - - // The status of this rollout. Readonly. In case of a failed rollout, - // the system will automatically rollback to the current Rollout - // version. Readonly. - RolloutStatus status = 4; - - // Strategy that defines which versions of service configurations should be - // pushed - // and how they should be used at runtime. - oneof strategy { - // Google Service Control selects service configurations based on - // traffic percentage. - TrafficPercentStrategy traffic_percent_strategy = 5; - - // The strategy associated with a rollout to delete a `ManagedService`. - // Readonly. - DeleteServiceStrategy delete_service_strategy = 200; - } - - // The name of the service associated with this Rollout. - string service_name = 8; -} diff --git a/third_party/google/api/servicemanagement/v1/servicemanagement_gapic.yaml b/third_party/google/api/servicemanagement/v1/servicemanagement_gapic.yaml deleted file mode 100644 index ade4187d3..000000000 --- a/third_party/google/api/servicemanagement/v1/servicemanagement_gapic.yaml +++ /dev/null @@ -1,300 +0,0 @@ -type: com.google.api.codegen.ConfigProto -config_schema_version: 1.0.0 -# The settings of generated code in a specific language. -language_settings: - java: - package_name: com.google.cloud.api.servicemanagement.v1 - python: - package_name: google.cloud.api.servicemanagement_v1.gapic - go: - package_name: cloud.google.com/go/api/servicemanagement/apiv1 - csharp: - package_name: Google.Api.Servicemanagement.V1 - ruby: - package_name: Google::Cloud::Api::Servicemanagement::V1 - php: - package_name: Google\Cloud\Api\Servicemanagement\V1 - nodejs: - package_name: servicemanagement.v1 -# A list of API interface configurations. -interfaces: - # The fully qualified name of the API interface. -- name: google.api.servicemanagement.v1.ServiceManager - # A list of resource collection configurations. - # Consists of a name_pattern and an entity_name. - # The name_pattern is a pattern to describe the names of the resources of this - # collection, using the platform's conventions for URI patterns. A generator - # may use this to generate methods to compose and decompose such names. The - # pattern should use named placeholders as in `shelves/{shelf}/books/{book}`; - # those will be taken as hints for the parameter names of the generated - # methods. If empty, no name methods are generated. - # The entity_name is the name to be used as a basis for generated methods and - # classes. - smoke_test: - method: ListServices - init_fields: - - producer_project_id=$PROJECT_ID - collections: [] - # Definition for retryable codes. - retry_codes_def: - - name: idempotent - retry_codes: - - UNAVAILABLE - - name: non_idempotent - retry_codes: [] - # Definition for retry/backoff parameters. - retry_params_def: - - name: default - initial_retry_delay_millis: 100 - retry_delay_multiplier: 1.3 - max_retry_delay_millis: 60000 - initial_rpc_timeout_millis: 20000 - rpc_timeout_multiplier: 1 - max_rpc_timeout_millis: 20000 - total_timeout_millis: 600000 - # A list of method configurations. - # Common properties: - # - # name - The simple name of the method. - # - # flattening - Specifies the configuration for parameter flattening. - # Describes the parameter groups for which a generator should produce method - # overloads which allow a client to directly pass request message fields as - # method parameters. This information may or may not be used, depending on - # the target language. - # Consists of groups, which each represent a list of parameters to be - # flattened. Each parameter listed must be a field of the request message. - # - # required_fields - Fields that are always required for a request to be - # valid. - # - # resource_name_treatment - An enum that specifies how to treat the resource - # name formats defined in the field_name_patterns and - # response_field_name_patterns fields. - # UNSET: default value - # NONE: the collection configs will not be used by the generated code. - # VALIDATE: string fields will be validated by the client against the - # specified resource name formats. - # STATIC_TYPES: the client will use generated types for resource names. - # - # page_streaming - Specifies the configuration for paging. - # Describes information for generating a method which transforms a paging - # list RPC into a stream of resources. - # Consists of a request and a response. - # The request specifies request information of the list method. It defines - # which fields match the paging pattern in the request. The request consists - # of a page_size_field and a token_field. The page_size_field is the name of - # the optional field specifying the maximum number of elements to be - # returned in the response. The token_field is the name of the field in the - # request containing the page token. - # The response specifies response information of the list method. It defines - # which fields match the paging pattern in the response. The response - # consists of a token_field and a resources_field. The token_field is the - # name of the field in the response containing the next page token. The - # resources_field is the name of the field in the response containing the - # list of resources belonging to the page. - # - # retry_codes_name - Specifies the configuration for retryable codes. The - # name must be defined in interfaces.retry_codes_def. - # - # retry_params_name - Specifies the configuration for retry/backoff - # parameters. The name must be defined in interfaces.retry_params_def. - # - # field_name_patterns - Maps the field name of the request type to - # entity_name of interfaces.collections. - # Specifies the string pattern that the field must follow. - # - # timeout_millis - Specifies the default timeout for a non-retrying call. If - # the call is retrying, refer to retry_params_name instead. - methods: - - name: ListServices - flattening: - groups: - - parameters: - - producer_project_id - - consumer_id - required_fields: - page_streaming: - request: - page_size_field: page_size - token_field: page_token - response: - token_field: next_page_token - resources_field: services - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 10000 - - name: GetService - flattening: - groups: - - parameters: - - service_name - required_fields: - - service_name - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 10000 - - name: CreateService - flattening: - groups: - - parameters: - - service - required_fields: - - service - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 20000 - - name: DeleteService - flattening: - groups: - - parameters: - - service_name - required_fields: - - service_name - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 60000 - - name: UndeleteService - flattening: - groups: - - parameters: - - service_name - required_fields: - - service_name - retry_codes_name: non_idempotent - retry_params_name: default - # REVIEW: Could this operation take a long time? - timeout_millis: 60000 - - name: ListServiceConfigs - flattening: - groups: - - parameters: - - service_name - required_fields: - - service_name - page_streaming: - request: - page_size_field: page_size - token_field: page_token - response: - token_field: next_page_token - resources_field: service_configs - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 10000 - - name: GetServiceConfig - flattening: - groups: - - parameters: - - service_name - - config_id - - view - required_fields: - - service_name - - config_id - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 10000 - - name: CreateServiceConfig - flattening: - groups: - - parameters: - - service_name - - service_config - required_fields: - - service_name - - service_config - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 20000 - - name: SubmitConfigSource - flattening: - groups: - - parameters: - - service_name - - config_source - - validate_only - required_fields: - - service_name - - config_source - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 10000 - - name: ListServiceRollouts - flattening: - groups: - - parameters: - - service_name - - filter - required_fields: - - service_name - page_streaming: - request: - page_size_field: page_size - token_field: page_token - response: - token_field: next_page_token - resources_field: rollouts - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 10000 - - name: GetServiceRollout - flattening: - groups: - - parameters: - - service_name - - rollout_id - required_fields: - - service_name - - rollout_id - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 10000 - - name: CreateServiceRollout - flattening: - groups: - - parameters: - - service_name - - rollout - required_fields: - - service_name - - rollout - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 10000 - - name: GenerateConfigReport - flattening: - groups: - - parameters: - - new_config - - old_config - required_fields: - - new_config - - old_config - retry_codes_name: non_idempotent - retry_params_name: default - timeout_millis: 10000 - - name: EnableService - flattening: - groups: - - parameters: - - service_name - - consumer_id - required_fields: - - service_name - - consumer_id - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 10000 - - name: DisableService - flattening: - groups: - - parameters: - - service_name - - consumer_id - required_fields: - - service_name - - consumer_id - retry_codes_name: idempotent - retry_params_name: default - timeout_millis: 10000 diff --git a/third_party/google/api/servicemanagement/v1/servicemanager.proto b/third_party/google/api/servicemanagement/v1/servicemanager.proto deleted file mode 100644 index 02d506665..000000000 --- a/third_party/google/api/servicemanagement/v1/servicemanager.proto +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api.servicemanagement.v1; - -import "google/api/annotations.proto"; -import "google/api/service.proto"; -import "google/api/servicemanagement/v1/resources.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/struct.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.ServiceManagement.V1"; -option go_package = "google.golang.org/genproto/googleapis/api/servicemanagement/v1;servicemanagement"; -option java_multiple_files = true; -option java_outer_classname = "ServiceManagerProto"; -option java_package = "com.google.api.servicemanagement.v1"; -option objc_class_prefix = "GASM"; -option php_namespace = "Google\\Cloud\\ServiceManagement\\V1"; - -// [Google Service Management API](/service-management/overview) -service ServiceManager { - // Lists managed services. - // - // Returns all public services. For authenticated users, also returns all - // services the calling user has "servicemanagement.services.get" permission - // for. - // - // **BETA:** If the caller specifies the `consumer_id`, it returns only the - // services enabled on the consumer. The `consumer_id` must have the format - // of "project:{PROJECT-ID}". - rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { - option (google.api.http) = { - get: "/v1/services" - }; - } - - // Gets a managed service. Authentication is required unless the service is - // public. - rpc GetService(GetServiceRequest) returns (ManagedService) { - option (google.api.http) = { - get: "/v1/services/{service_name}" - }; - } - - // Creates a new managed service. - // Please note one producer project can own no more than 20 services. - // - // Operation - rpc CreateService(CreateServiceRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/services" - body: "service" - }; - } - - // Deletes a managed service. This method will change the service to the - // `Soft-Delete` state for 30 days. Within this period, service producers may - // call - // [UndeleteService][google.api.servicemanagement.v1.ServiceManager.UndeleteService] - // to restore the service. After 30 days, the service will be permanently - // deleted. - // - // Operation - rpc DeleteService(DeleteServiceRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - delete: "/v1/services/{service_name}" - }; - } - - // Revives a previously deleted managed service. The method restores the - // service using the configuration at the time the service was deleted. - // The target service must exist and must have been deleted within the - // last 30 days. - // - // Operation - rpc UndeleteService(UndeleteServiceRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/services/{service_name}:undelete" - }; - } - - // Lists the history of the service configuration for a managed service, - // from the newest to the oldest. - rpc ListServiceConfigs(ListServiceConfigsRequest) - returns (ListServiceConfigsResponse) { - option (google.api.http) = { - get: "/v1/services/{service_name}/configs" - }; - } - - // Gets a service configuration (version) for a managed service. - rpc GetServiceConfig(GetServiceConfigRequest) returns (google.api.Service) { - option (google.api.http) = { - get: "/v1/services/{service_name}/configs/{config_id}" - additional_bindings { get: "/v1/services/{service_name}/config" } - }; - } - - // Creates a new service configuration (version) for a managed service. - // This method only stores the service configuration. To roll out the service - // configuration to backend systems please call - // [CreateServiceRollout][google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout]. - // - // Only the 100 most recent service configurations and ones referenced by - // existing rollouts are kept for each service. The rest will be deleted - // eventually. - rpc CreateServiceConfig(CreateServiceConfigRequest) - returns (google.api.Service) { - option (google.api.http) = { - post: "/v1/services/{service_name}/configs" - body: "service_config" - }; - } - - // Creates a new service configuration (version) for a managed service based - // on - // user-supplied configuration source files (for example: OpenAPI - // Specification). This method stores the source configurations as well as the - // generated service configuration. To rollout the service configuration to - // other services, - // please call - // [CreateServiceRollout][google.api.servicemanagement.v1.ServiceManager.CreateServiceRollout]. - // - // Only the 100 most recent configuration sources and ones referenced by - // existing service configurtions are kept for each service. The rest will be - // deleted eventually. - // - // Operation - rpc SubmitConfigSource(SubmitConfigSourceRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/services/{service_name}/configs:submit" - body: "*" - }; - } - - // Lists the history of the service configuration rollouts for a managed - // service, from the newest to the oldest. - rpc ListServiceRollouts(ListServiceRolloutsRequest) - returns (ListServiceRolloutsResponse) { - option (google.api.http) = { - get: "/v1/services/{service_name}/rollouts" - }; - } - - // Gets a service configuration - // [rollout][google.api.servicemanagement.v1.Rollout]. - rpc GetServiceRollout(GetServiceRolloutRequest) returns (Rollout) { - option (google.api.http) = { - get: "/v1/services/{service_name}/rollouts/{rollout_id}" - }; - } - - // Creates a new service configuration rollout. Based on rollout, the - // Google Service Management will roll out the service configurations to - // different backend services. For example, the logging configuration will be - // pushed to Google Cloud Logging. - // - // Please note that any previous pending and running Rollouts and associated - // Operations will be automatically cancelled so that the latest Rollout will - // not be blocked by previous Rollouts. - // - // Only the 100 most recent (in any state) and the last 10 successful (if not - // already part of the set of 100 most recent) rollouts are kept for each - // service. The rest will be deleted eventually. - // - // Operation - rpc CreateServiceRollout(CreateServiceRolloutRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/services/{service_name}/rollouts" - body: "rollout" - }; - } - - // Generates and returns a report (errors, warnings and changes from - // existing configurations) associated with - // GenerateConfigReportRequest.new_value - // - // If GenerateConfigReportRequest.old_value is specified, - // GenerateConfigReportRequest will contain a single ChangeReport based on the - // comparison between GenerateConfigReportRequest.new_value and - // GenerateConfigReportRequest.old_value. - // If GenerateConfigReportRequest.old_value is not specified, this method - // will compare GenerateConfigReportRequest.new_value with the last pushed - // service configuration. - rpc GenerateConfigReport(GenerateConfigReportRequest) - returns (GenerateConfigReportResponse) { - option (google.api.http) = { - post: "/v1/services:generateConfigReport" - body: "*" - }; - } - - // Enables a [service][google.api.servicemanagement.v1.ManagedService] for a - // project, so it can be used for the project. See [Cloud Auth - // Guide](https://cloud.google.com/docs/authentication) for more information. - // - // Operation - rpc EnableService(EnableServiceRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/services/{service_name}:enable" - body: "*" - }; - } - - // Disables a [service][google.api.servicemanagement.v1.ManagedService] for a - // project, so it can no longer be be used for the project. It prevents - // accidental usage that may cause unexpected billing charges or security - // leaks. - // - // Operation - rpc DisableService(DisableServiceRequest) - returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v1/services/{service_name}:disable" - body: "*" - }; - } -} - -// Request message for `ListServices` method. -message ListServicesRequest { - // Include services produced by the specified project. - string producer_project_id = 1; - - // Requested size of the next page of data. - int32 page_size = 5; - - // Token identifying which result to start with; returned by a previous list - // call. - string page_token = 6; - - // Include services consumed by the specified consumer. - // - // The Google Service Management implementation accepts the following - // forms: - // - project: - string consumer_id = 7; -} - -// Response message for `ListServices` method. -message ListServicesResponse { - // The returned services will only have the name field set. - repeated ManagedService services = 1; - - // Token that can be passed to `ListServices` to resume a paginated query. - string next_page_token = 2; -} - -// Request message for `GetService` method. -message GetServiceRequest { - // The name of the service. See the `ServiceManager` overview for naming - // requirements. For example: `example.googleapis.com`. - string service_name = 1; -} - -// Request message for CreateService method. -message CreateServiceRequest { - // Initial values for the service resource. - ManagedService service = 1; -} - -// Request message for DeleteService method. -message DeleteServiceRequest { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; -} - -// Request message for UndeleteService method. -message UndeleteServiceRequest { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; -} - -// Response message for UndeleteService method. -message UndeleteServiceResponse { - // Revived service resource. - ManagedService service = 1; -} - -// Request message for GetServiceConfig method. -message GetServiceConfigRequest { - enum ConfigView { - // Server response includes all fields except SourceInfo. - BASIC = 0; - - // Server response includes all fields including SourceInfo. - // SourceFiles are of type 'google.api.servicemanagement.v1.ConfigFile' - // and are only available for configs created using the - // SubmitConfigSource method. - FULL = 1; - } - - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; - - // The id of the service configuration resource. - string config_id = 2; - - // Specifies which parts of the Service Config should be returned in the - // response. - ConfigView view = 3; -} - -// Request message for ListServiceConfigs method. -message ListServiceConfigsRequest { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; - - // The token of the page to retrieve. - string page_token = 2; - - // The max number of items to include in the response list. - int32 page_size = 3; -} - -// Response message for ListServiceConfigs method. -message ListServiceConfigsResponse { - // The list of service configuration resources. - repeated google.api.Service service_configs = 1; - - // The token of the next page of results. - string next_page_token = 2; -} - -// Request message for CreateServiceConfig method. -message CreateServiceConfigRequest { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; - - // The service configuration resource. - google.api.Service service_config = 2; -} - -// Request message for SubmitConfigSource method. -message SubmitConfigSourceRequest { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; - - // The source configuration for the service. - ConfigSource config_source = 2; - - // Optional. If set, this will result in the generation of a - // `google.api.Service` configuration based on the `ConfigSource` provided, - // but the generated config and the sources will NOT be persisted. - bool validate_only = 3; -} - -// Response message for SubmitConfigSource method. -message SubmitConfigSourceResponse { - // The generated service configuration. - google.api.Service service_config = 1; -} - -// Request message for 'CreateServiceRollout' -message CreateServiceRolloutRequest { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; - - // The rollout resource. The `service_name` field is output only. - Rollout rollout = 2; -} - -// Request message for 'ListServiceRollouts' -message ListServiceRolloutsRequest { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; - - // The token of the page to retrieve. - string page_token = 2; - - // The max number of items to include in the response list. - int32 page_size = 3; - - // Use `filter` to return subset of rollouts. - // The following filters are supported: - // -- To limit the results to only those in - // [status](google.api.servicemanagement.v1.RolloutStatus) 'SUCCESS', - // use filter='status=SUCCESS' - // -- To limit the results to those in - // [status](google.api.servicemanagement.v1.RolloutStatus) 'CANCELLED' - // or 'FAILED', use filter='status=CANCELLED OR status=FAILED' - string filter = 4; -} - -// Response message for ListServiceRollouts method. -message ListServiceRolloutsResponse { - // The list of rollout resources. - repeated Rollout rollouts = 1; - - // The token of the next page of results. - string next_page_token = 2; -} - -// Request message for GetServiceRollout method. -message GetServiceRolloutRequest { - // The name of the service. See the [overview](/service-management/overview) - // for naming requirements. For example: `example.googleapis.com`. - string service_name = 1; - - // The id of the rollout resource. - string rollout_id = 2; -} - -// Request message for EnableService method. -message EnableServiceRequest { - // Name of the service to enable. Specifying an unknown service name will - // cause the request to fail. - string service_name = 1; - - // The identity of consumer resource which service enablement will be - // applied to. - // - // The Google Service Management implementation accepts the following - // forms: - // - "project:" - // - // Note: this is made compatible with - // google.api.servicecontrol.v1.Operation.consumer_id. - string consumer_id = 2; -} - -// Request message for DisableService method. -message DisableServiceRequest { - // Name of the service to disable. Specifying an unknown service name - // will cause the request to fail. - string service_name = 1; - - // The identity of consumer resource which service disablement will be - // applied to. - // - // The Google Service Management implementation accepts the following - // forms: - // - "project:" - // - // Note: this is made compatible with - // google.api.servicecontrol.v1.Operation.consumer_id. - string consumer_id = 2; -} - -// Request message for GenerateConfigReport method. -message GenerateConfigReportRequest { - // Service configuration for which we want to generate the report. - // For this version of API, the supported types are - // [google.api.servicemanagement.v1.ConfigRef][google.api.servicemanagement.v1.ConfigRef], - // [google.api.servicemanagement.v1.ConfigSource][google.api.servicemanagement.v1.ConfigSource], - // and [google.api.Service][google.api.Service] - google.protobuf.Any new_config = 1; - - // Service configuration against which the comparison will be done. - // For this version of API, the supported types are - // [google.api.servicemanagement.v1.ConfigRef][google.api.servicemanagement.v1.ConfigRef], - // [google.api.servicemanagement.v1.ConfigSource][google.api.servicemanagement.v1.ConfigSource], - // and [google.api.Service][google.api.Service] - google.protobuf.Any old_config = 2; -} - -// Response message for GenerateConfigReport method. -message GenerateConfigReportResponse { - // Name of the service this report belongs to. - string service_name = 1; - - // ID of the service configuration this report belongs to. - string id = 2; - - // list of ChangeReport, each corresponding to comparison between two - // service configurations. - repeated ChangeReport change_reports = 3; - - // Errors / Linter warnings associated with the service definition this - // report - // belongs to. - repeated Diagnostic diagnostics = 4; -} diff --git a/third_party/google/api/source_info.proto b/third_party/google/api/source_info.proto deleted file mode 100644 index 5954143de..000000000 --- a/third_party/google/api/source_info.proto +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/any.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "SourceInfoProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Source information used to create a Service Config -message SourceInfo { - // All files used during config generation. - repeated google.protobuf.Any source_files = 1; -} diff --git a/third_party/google/api/system_parameter.proto b/third_party/google/api/system_parameter.proto deleted file mode 100644 index 740a5538b..000000000 --- a/third_party/google/api/system_parameter.proto +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "SystemParameterProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// ### System parameter configuration -// -// A system parameter is a special kind of parameter defined by the API -// system, not by an individual API. It is typically mapped to an HTTP header -// and/or a URL query parameter. This configuration specifies which methods -// change the names of the system parameters. -message SystemParameters { - // Define system parameters. - // - // The parameters defined here will override the default parameters - // implemented by the system. If this field is missing from the service - // config, default system parameters will be used. Default system parameters - // and names is implementation-dependent. - // - // Example: define api key for all methods - // - // system_parameters - // rules: - // - selector: "*" - // parameters: - // - name: api_key - // url_query_parameter: api_key - // - // - // Example: define 2 api key names for a specific method. - // - // system_parameters - // rules: - // - selector: "/ListShelves" - // parameters: - // - name: api_key - // http_header: Api-Key1 - // - name: api_key - // http_header: Api-Key2 - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated SystemParameterRule rules = 1; -} - -// Define a system parameter rule mapping system parameter definitions to -// methods. -message SystemParameterRule { - // Selects the methods to which this rule applies. Use '*' to indicate all - // methods in all APIs. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // Define parameters. Multiple names may be defined for a parameter. - // For a given method call, only one of them should be used. If multiple - // names are used the behavior is implementation-dependent. - // If none of the specified names are present the behavior is - // parameter-dependent. - repeated SystemParameter parameters = 2; -} - -// Define a parameter's name and location. The parameter may be passed as either -// an HTTP header or a URL query parameter, and if both are passed the behavior -// is implementation-dependent. -message SystemParameter { - // Define the name of the parameter, such as "api_key" . It is case sensitive. - string name = 1; - - // Define the HTTP header name to use for the parameter. It is case - // insensitive. - string http_header = 2; - - // Define the URL query parameter name to use for the parameter. It is case - // sensitive. - string url_query_parameter = 3; -} diff --git a/third_party/google/api/usage.proto b/third_party/google/api/usage.proto deleted file mode 100644 index 6ab4e408c..000000000 --- a/third_party/google/api/usage.proto +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.api; - -option go_package = "google.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig"; -option java_multiple_files = true; -option java_outer_classname = "UsageProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Configuration controlling usage of a service. -message Usage { - // Requirements that must be satisfied before a consumer project can use the - // service. Each requirement is of the form /; - // for example 'serviceusage.googleapis.com/billing-enabled'. - repeated string requirements = 1; - - // A list of usage rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated UsageRule rules = 6; - - // The full resource name of a channel used for sending notifications to the - // service producer. - // - // Google Service Management currently only supports - // [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification - // channel. To use Google Cloud Pub/Sub as the channel, this must be the name - // of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format - // documented in https://cloud.google.com/pubsub/docs/overview. - string producer_notification_channel = 7; -} - -// Usage configuration rules for the service. -// -// NOTE: Under development. -// -// -// Use this rule to configure unregistered calls for the service. Unregistered -// calls are calls that do not contain consumer project identity. -// (Example: calls that do not contain an API key). -// By default, API methods do not allow unregistered calls, and each method call -// must be identified by a consumer project identity. Use this rule to -// allow/disallow unregistered calls. -// -// Example of an API that wants to allow unregistered calls for entire service. -// -// usage: -// rules: -// - selector: "*" -// allow_unregistered_calls: true -// -// Example of a method that wants to allow unregistered calls. -// -// usage: -// rules: -// - selector: "google.example.library.v1.LibraryService.CreateBook" -// allow_unregistered_calls: true -message UsageRule { - // Selects the methods to which this rule applies. Use '*' to indicate all - // methods in all APIs. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // If true, the selected method allows unregistered calls, e.g. calls - // that don't identify any user or application. - bool allow_unregistered_calls = 2; - - // If true, the selected method should skip service control and the control - // plane features, such as quota and billing, will not be available. - // This flag is used by Google Cloud Endpoints to bypass checks for internal - // methods, such as service health check methods. - bool skip_service_control = 3; -} diff --git a/tool/kratos-gen-bts/README.md b/tool/kratos-gen-bts/README.md deleted file mode 100644 index 82f6b0af2..000000000 --- a/tool/kratos-gen-bts/README.md +++ /dev/null @@ -1,48 +0,0 @@ -#### genbts - -> 缓存代码生成 - -##### 项目简介 - -从缓存中获取数据 如果miss则调用回源函数从数据源获取 然后塞入缓存 - -支持以下功能: - -- 单飞限制回源并发 防止打爆数据源 -- 空缓存 防止缓存穿透 -- 分批获取数据 降低延时 -- 默认异步加缓存 可选同步加缓存 -- prometheus回源比监控 -- 多行注释生成代码 -- 支持分页(限单key模板) -- 自定义注释 -- 支持忽略参数 - -##### 使用方式: -1. 在dao package中 增加注解 //go:generate kratos tool genbts 定义bts接口 声明需要的方法 -2. 在dao 文件夹中执行 go generate命令 将会生成相应的缓存代码 -3. 调用生成的XXX方法 -4. 示例见testdata/dao.go - -要求: -dao里面需要有cache对象 代码会调用d.cache来新增缓存 -需要实现代码中所需的方法 每一个缓存方法都需要实现以下方法: -从缓存中获取数据 名称为Cache+方法名 函数定义和声明一致 -从数据源(db/api/...)获取数据 名称为Raw+方法 函数定义和声明一致 -存入缓存方法 名称为AddCache+方法名 函数定义为 func AddCache方法名(c context.Context, ...) (error) - -##### 注解参数: -| 参数名称 | 默认值 | 说明 | 示例 | -| ---------------- | ------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| -nullcache | | 空指针对象(存正常业务不会出现的内容 id的话像是-1这样的) | &Demo{ID:-1} 或-1 或"null" | -| -check_null_code | | 开启空缓存并且value为指针对象时必填 用于判断是否是空缓存 $来指代对象名 | `-check_null_code=$!=nil&&$.ID==-1 或 $ == -1` | -| -cache_err |continue| 缓存出错的时候的行为 continue: 继续执行 break: 抛出错误 方法返回|break| -| -batch | | (限多key模板) 批量获取数据 每组大小 | 100 | -| -max_group | | (限多key模板)批量获取数据 最大组数量 | 10 | -| -batch_err | break | (限多key模板)批量获取数据回源错误的时候 降级继续请求(continue)还是直接返回(break) | break 或 continue | -| -singleflight | false | 是否开启单飞(开启后生成函数会多一个单飞名称参数 生成的代码会调用d.cacheSFNAME方法获取单飞的key) | true | -| -sync | false | 是否同步增加缓存 | false | -| -paging | false | (限单key模板)分页 数据源应返回2个值 第一个为对外数据 第二个为全量数据 用于新增缓存 | false | -| -ignores | | 用于依赖的三个方法参数和主方法参数不一致的情况. 忽略方法的某些参数 用\|分隔方法逗号分隔参数 | pn,ps\|pn\|origin 表示"缓存获取"方法忽略pn,ps两个参数 回源方法忽略pn参数 加缓存方法忽略origin参数 | -| -custom_method | false | 自定义方法名 \|分隔 缓存获取方法名\|回源方法名\|增加缓存方法名 | d.mc.AddDemo\|d.mysql.Demo\|d.mc.AddDemo | -| -struct_name | dao | 所属结构体名称 | Dao| \ No newline at end of file diff --git a/tool/kratos-gen-bts/header_template.go b/tool/kratos-gen-bts/header_template.go deleted file mode 100644 index 613e886b7..000000000 --- a/tool/kratos-gen-bts/header_template.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -var _headerTemplate = ` -// Code generated by kratos tool genbts. DO NOT EDIT. - -NEWLINE -/* - Package {{.PkgName}} is a generated cache proxy package. - It is generated from: - ARGS -*/ -NEWLINE - -package {{.PkgName}} - -import ( - "context" - {{if .EnableBatch }}"sync"{{end}} -NEWLINE - "github.com/go-kratos/kratos/pkg/cache" - {{if .EnableBatch }}"github.com/go-kratos/kratos/pkg/sync/errgroup"{{end}} - {{.ImportPackage}} -NEWLINE - {{if .EnableSingleFlight}} "golang.org/x/sync/singleflight" {{end}} -) - -{{if .UseBTS}} -var _ _bts -{{end }} -{{if .EnableSingleFlight}} -var cacheSingleFlights = [SFCOUNT]*singleflight.Group{SFINIT} -{{end }} -` diff --git a/tool/kratos-gen-bts/main.go b/tool/kratos-gen-bts/main.go deleted file mode 100644 index f1bd7f624..000000000 --- a/tool/kratos-gen-bts/main.go +++ /dev/null @@ -1,507 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "io/ioutil" - "log" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "text/template" - - "github.com/go-kratos/kratos/tool/pkg" -) - -var ( - // arguments - singleFlight = flag.Bool("singleflight", false, "enable singleflight") - nullCache = flag.String("nullcache", "", "null cache") - checkNullCode = flag.String("check_null_code", "", "check null code") - cacheErr = flag.String("cache_err", "continue", "cache err to continue or break") - batchSize = flag.Int("batch", 0, "batch size") - batchErr = flag.String("batch_err", "break", "batch err to continue or break") - maxGroup = flag.Int("max_group", 0, "max group size") - sync = flag.Bool("sync", false, "add cache in sync way.") - paging = flag.Bool("paging", false, "use paging in single template") - ignores = flag.String("ignores", "", "ignore params") - customMethod = flag.String("custom_method", "", "自定义方法名 |分隔: 缓存|回源|增加缓存") - structName = flag.String("struct_name", "dao", "struct name") - - numberTypes = []string{"int", "int8", "int16", "int32", "int64", "float32", "float64", "uint", "uint8", "uint16", "uint32", "uint64"} - simpleTypes = []string{"int", "int8", "int16", "int32", "int64", "float32", "float64", "uint", "uint8", "uint16", "uint32", "uint64", "bool", "string", "[]byte"} - optionNames = []string{"singleflight", "nullcache", "check_null_code", "batch", "max_group", "sync", "paging", "ignores", "batch_err", "custom_method", "cache_err", "struct_name"} - optionNamesMap = map[string]bool{} - interfaceName string -) - -const ( - _multiTpl = 1 - _singleTpl = 2 - _noneTpl = 3 -) - -func resetFlag() { - *singleFlight = false - *nullCache = "" - *checkNullCode = "" - *batchSize = 0 - *maxGroup = 0 - *sync = false - *paging = false - *batchErr = "break" - *cacheErr = "continue" - *ignores = "" - *customMethod = "" - *structName = "dao" -} - -// options options -type options struct { - name string - keyType string - valueType string - cacheFunc string - rawFunc string - addCacheFunc string - template int - SimpleValue bool - NumberValue bool - GoValue bool - ZeroValue string - ImportPackage string - importPackages []string - Args string - PkgName string - EnableSingleFlight bool - NullCache string - EnableNullCache bool - GroupSize int - MaxGroup int - EnableBatch bool - BatchErrBreak bool - Sync bool - CheckNullCode string - ExtraArgsType string - ExtraArgs string - ExtraCacheArgs string - ExtraRawArgs string - ExtraAddCacheArgs string - EnablePaging bool - Comment string - CustomMethod string - IDName string - CacheErrContinue bool - StructName string - hasDec bool - UseBTS bool -} - -func getOptions(opt *options, comment string) { - os.Args = []string{os.Args[0]} - if regexp.MustCompile(`\s+//\s*bts:.+`).Match([]byte(comment)) { - args := strings.Split(pkg.RegexpReplace(`//\s*bts:(?P.+)`, comment, "$arg"), " ") - for _, arg := range args { - arg = strings.TrimSpace(arg) - if arg != "" { - // validate option name - argName := pkg.RegexpReplace(`-(?P[\w_-]+)=.+`, arg, "$name") - if !optionNamesMap[argName] { - log.Fatalf("选项:%s 不存在 请检查拼写\n", argName) - } - os.Args = append(os.Args, arg) - } - } - opt.hasDec = true - } - resetFlag() - flag.Parse() - opt.EnableSingleFlight = *singleFlight - opt.NullCache = *nullCache - opt.EnablePaging = *paging - opt.EnableNullCache = *nullCache != "" - opt.EnableBatch = (*batchSize != 0) && (*maxGroup != 0) - opt.BatchErrBreak = *batchErr == "break" - opt.Sync = *sync - opt.CheckNullCode = *checkNullCode - opt.GroupSize = *batchSize - opt.MaxGroup = *maxGroup - opt.CustomMethod = *customMethod - opt.CacheErrContinue = *cacheErr == "continue" - opt.StructName = *structName -} - -func processList(s *pkg.Source, list *ast.Field) (opt options) { - fset := s.Fset - src := s.Src - lines := strings.Split(src, "\n") - opt = options{name: list.Names[0].Name, Args: s.GetDef(interfaceName), importPackages: s.Packages(list)} - // get comment - line := fset.Position(list.Pos()).Line - 3 - if len(lines)-1 >= line { - comment := lines[line] - opt.Comment = pkg.RegexpReplace(`\s+//(?P.+)`, comment, "$name") - opt.Comment = strings.TrimSpace(opt.Comment) - } - // get options - line = fset.Position(list.Pos()).Line - 2 - comment := lines[line] - getOptions(&opt, comment) - if !opt.hasDec { - log.Printf("%s: 无声明 忽略此方法\n", opt.name) - return - } - // get func - params := list.Type.(*ast.FuncType).Params.List - if len(params) == 0 { - log.Fatalln(opt.name + "参数不足") - } - for _, p := range params { - if len(p.Names) > 1 { - log.Fatalln(opt.name + "不支持省略类型 请写全声明中的字段类型名称") - } - } - if s.ExprString(params[0].Type) != "context.Context" { - log.Fatalln("第一个参数必须为context") - } - if len(params) == 1 { - opt.template = _noneTpl - } else { - opt.IDName = params[1].Names[0].Name - if _, ok := params[1].Type.(*ast.ArrayType); ok { - opt.template = _multiTpl - } else { - opt.template = _singleTpl - // get key - opt.keyType = s.ExprString(params[1].Type) - } - } - if len(params) > 2 { - var args []string - var allArgs []string - for _, pa := range params[2:] { - paType := s.ExprString(pa.Type) - if len(pa.Names) == 0 { - args = append(args, paType) - allArgs = append(allArgs, paType) - continue - } - var names []string - for _, name := range pa.Names { - names = append(names, name.Name) - } - allArgs = append(allArgs, strings.Join(names, ",")+" "+paType) - args = append(args, names...) - } - opt.ExtraArgs = strings.Join(args, ",") - opt.ExtraArgsType = strings.Join(allArgs, ",") - argsMap := make(map[string]bool) - for _, arg := range args { - argsMap[arg] = true - } - ignoreCache := make(map[string]bool) - ignoreRaw := make(map[string]bool) - ignoreAddCache := make(map[string]bool) - ignoreArray := [3]map[string]bool{ignoreCache, ignoreRaw, ignoreAddCache} - if *ignores != "" { - is := strings.Split(*ignores, "|") - if len(is) > 3 { - log.Fatalln("ignores参数错误") - } - for i := range is { - if len(is) > i { - for _, s := range strings.Split(is[i], ",") { - ignoreArray[i][s] = true - } - } - } - } - var as []string - for _, arg := range args { - if !ignoreCache[arg] { - as = append(as, arg) - } - } - opt.ExtraCacheArgs = strings.Join(as, ",") - as = []string{} - for _, arg := range args { - if !ignoreRaw[arg] { - as = append(as, arg) - } - } - opt.ExtraRawArgs = strings.Join(as, ",") - as = []string{} - for _, arg := range args { - if !ignoreAddCache[arg] { - as = append(as, arg) - } - } - opt.ExtraAddCacheArgs = strings.Join(as, ",") - if opt.ExtraAddCacheArgs != "" { - opt.ExtraAddCacheArgs = "," + opt.ExtraAddCacheArgs - } - if opt.ExtraRawArgs != "" { - opt.ExtraRawArgs = "," + opt.ExtraRawArgs - } - if opt.ExtraCacheArgs != "" { - opt.ExtraCacheArgs = "," + opt.ExtraCacheArgs - } - if opt.ExtraArgs != "" { - opt.ExtraArgs = "," + opt.ExtraArgs - } - if opt.ExtraArgsType != "" { - opt.ExtraArgsType = "," + opt.ExtraArgsType - } - } - // get k v from results - results := list.Type.(*ast.FuncType).Results.List - if len(results) != 2 { - log.Fatalln(opt.name + ": 参数个数不对") - } - if s.ExprString(results[1].Type) != "error" { - log.Fatalln(opt.name + ": 最后返回值参数需为error") - } - if opt.template == _multiTpl { - p, ok := results[0].Type.(*ast.MapType) - if !ok { - log.Fatalln(opt.name + ": 批量获取方法 返回值类型需为map类型") - } - opt.keyType = s.ExprString(p.Key) - opt.valueType = s.ExprString(p.Value) - } else { - opt.valueType = s.ExprString(results[0].Type) - } - for _, t := range numberTypes { - if t == opt.valueType { - opt.NumberValue = true - break - } - } - opt.ZeroValue = "nil" - for _, t := range simpleTypes { - if t == opt.valueType { - opt.SimpleValue = true - opt.ZeroValue = zeroValue(t) - break - } - } - if !opt.SimpleValue { - for _, t := range []string{"[]", "map"} { - if strings.HasPrefix(opt.valueType, t) { - opt.GoValue = true - break - } - } - } - upperName := strings.ToUpper(opt.name[0:1]) + opt.name[1:] - opt.cacheFunc = fmt.Sprintf("d.Cache%s", upperName) - opt.rawFunc = fmt.Sprintf("d.Raw%s", upperName) - opt.addCacheFunc = fmt.Sprintf("d.AddCache%s", upperName) - if opt.CustomMethod != "" { - arrs := strings.Split(opt.CustomMethod, "|") - if len(arrs) > 0 && arrs[0] != "" { - opt.cacheFunc = arrs[0] - } - if len(arrs) > 1 && arrs[1] != "" { - opt.rawFunc = arrs[1] - } - if len(arrs) > 2 && arrs[2] != "" { - opt.addCacheFunc = arrs[2] - } - } - return -} - -// parse parse options -func parse(s *pkg.Source) (opts []*options) { - var c *ast.Object - for _, name := range []string{"_bts", "Dao"} { - c = s.F.Scope.Lookup(name) - if (c == nil) || (c.Kind != ast.Typ) { - c = nil - continue - } - interfaceName = name - break - } - if c == nil { - log.Fatalln("无法找到缓存声明") - } - lists := c.Decl.(*ast.TypeSpec).Type.(*ast.InterfaceType).Methods.List - for _, list := range lists { - opt := processList(s, list) - if opt.hasDec { - opt.Check() - opts = append(opts, &opt) - } - } - return -} - -func (option *options) Check() { - if !option.SimpleValue && !strings.Contains(option.valueType, "*") && !strings.Contains(option.valueType, "[]") && !strings.Contains(option.valueType, "map") { - log.Fatalf("%s: 值类型只能为基本类型/slice/map/指针类型\n", option.name) - } - if option.EnableSingleFlight && option.EnableBatch { - log.Fatalf("%s: 单飞和批量获取不能同时开启\n", option.name) - } - if option.template != _singleTpl && option.EnablePaging { - log.Fatalf("%s: 分页只能用在单key模板中\n", option.name) - } - if option.SimpleValue && !option.EnableNullCache { - if !((option.template == _multiTpl) && option.NumberValue) { - log.Fatalf("%s: 值为基本类型时需开启空缓存 防止缓存零值穿透\n", option.name) - } - } - if option.EnableNullCache { - if !option.SimpleValue && option.CheckNullCode == "" { - log.Fatalf("%s: 缺少-check_null_code参数\n", option.name) - } - if option.SimpleValue && option.NullCache == option.ZeroValue { - log.Fatalf("%s: %s 不能作为空缓存值 \n", option.name, option.NullCache) - } - if strings.Contains(option.CheckNullCode, "len") && strings.Contains(strings.Replace(option.CheckNullCode, " ", "", -1), "==0") { - // -check_null_code=len($)==0 这种无效 - log.Fatalf("%s: -check_null_code=%s 错误 会有无意义的赋值\n", option.name, option.CheckNullCode) - } - } -} - -func genHeader(opts []*options) (src string) { - option := options{PkgName: os.Getenv("GOPACKAGE")} - option.UseBTS = interfaceName == "_bts" - var sfCount int - var packages, sfInit []string - packagesMap := map[string]bool{`"context"`: true} - for _, opt := range opts { - if opt.EnableSingleFlight { - option.EnableSingleFlight = true - sfCount++ - } - if opt.EnableBatch { - option.EnableBatch = true - } - if len(opt.importPackages) > 0 { - for _, pkg := range opt.importPackages { - if !packagesMap[pkg] { - packages = append(packages, pkg) - packagesMap[pkg] = true - } - } - } - if opt.Args != "" { - option.Args = opt.Args - } - } - option.ImportPackage = strings.Join(packages, "\n") - for i := 0; i < sfCount; i++ { - sfInit = append(sfInit, "{}") - } - src = _headerTemplate - src = strings.Replace(src, "SFCOUNT", strconv.Itoa(sfCount), -1) - t := template.Must(template.New("header").Parse(src)) - var buffer bytes.Buffer - err := t.Execute(&buffer, option) - if err != nil { - log.Fatalf("execute template: %s", err) - } - // Format the output. - src = strings.Replace(buffer.String(), "\t", "", -1) - src = regexp.MustCompile("\n+").ReplaceAllString(src, "\n") - src = strings.Replace(src, "NEWLINE", "", -1) - src = strings.Replace(src, "ARGS", option.Args, -1) - src = strings.Replace(src, "SFINIT", strings.Join(sfInit, ","), -1) - return -} - -func genBody(opts []*options) (res string) { - sfnum := -1 - for _, option := range opts { - var nullCodeVar, src string - if option.template == _multiTpl { - src = _multiTemplate - nullCodeVar = "v" - } else if option.template == _singleTpl { - src = _singleTemplate - nullCodeVar = "res" - } else { - src = _noneTemplate - nullCodeVar = "res" - } - if option.template != _noneTpl { - src = strings.Replace(src, "KEY", option.keyType, -1) - } - if option.CheckNullCode != "" { - option.CheckNullCode = strings.Replace(option.CheckNullCode, "$", nullCodeVar, -1) - } - if option.EnableSingleFlight { - sfnum++ - } - src = strings.Replace(src, "NAME", option.name, -1) - src = strings.Replace(src, "VALUE", option.valueType, -1) - src = strings.Replace(src, "ADDCACHEFUNC", option.addCacheFunc, -1) - src = strings.Replace(src, "CACHEFUNC", option.cacheFunc, -1) - src = strings.Replace(src, "RAWFUNC", option.rawFunc, -1) - src = strings.Replace(src, "GROUPSIZE", strconv.Itoa(option.GroupSize), -1) - src = strings.Replace(src, "MAXGROUP", strconv.Itoa(option.MaxGroup), -1) - src = strings.Replace(src, "SFNUM", strconv.Itoa(sfnum), -1) - t := template.Must(template.New("cache").Parse(src)) - var buffer bytes.Buffer - err := t.Execute(&buffer, option) - if err != nil { - log.Fatalf("execute template: %s", err) - } - // Format the output. - src = strings.Replace(buffer.String(), "\t", "", -1) - src = regexp.MustCompile("\n+").ReplaceAllString(src, "\n") - res = res + "\n" + src - } - return -} - -func zeroValue(t string) string { - switch t { - case "bool": - return "false" - case "string": - return "\"\"" - case "[]byte": - return "nil" - default: - return "0" - } -} - -func init() { - for _, name := range optionNames { - optionNamesMap[name] = true - } -} - -func main() { - log.SetFlags(0) - defer func() { - if err := recover(); err != nil { - buf := make([]byte, 64*1024) - buf = buf[:runtime.Stack(buf, false)] - log.Fatalf("程序解析失败, err: %+v stack: %s", err, buf) - } - }() - options := parse(pkg.NewSource(pkg.SourceText())) - header := genHeader(options) - body := genBody(options) - code := pkg.FormatCode(header + "\n" + body) - // Write to file. - dir := filepath.Dir(".") - outputName := filepath.Join(dir, "dao.bts.go") - err := ioutil.WriteFile(outputName, []byte(code), 0644) - if err != nil { - log.Fatalf("写入文件失败: %s", err) - } - log.Println("dao.bts.go: 生成成功") -} diff --git a/tool/kratos-gen-bts/multi_template.go b/tool/kratos-gen-bts/multi_template.go deleted file mode 100644 index 7034f00af..000000000 --- a/tool/kratos-gen-bts/multi_template.go +++ /dev/null @@ -1,130 +0,0 @@ -package main - -var _multiTemplate = ` -// NAME {{or .Comment "get data from cache if miss will call source method, then add to cache."}} -func (d *{{.StructName}}) NAME(c context.Context, {{.IDName}} []KEY{{.ExtraArgsType}}) (res map[KEY]VALUE, err error) { - if len({{.IDName}}) == 0 { - return - } - addCache := true - if res, err = CACHEFUNC(c, {{.IDName}} {{.ExtraCacheArgs}});err != nil { - {{if .CacheErrContinue}} - addCache = false - res = nil - err = nil - {{else}} - return - {{end}} - } - var miss []KEY - for _, key := range {{.IDName}} { - {{if .GoValue}} - if (res == nil) || (len(res[key]) == 0) { - {{else}} - {{if .NumberValue}} - if _, ok := res[key]; !ok { - {{else}} - if (res == nil) || (res[key] == {{.ZeroValue}}) { - {{end}} - {{end}} - miss = append(miss, key) - } - } - cache.MetricHits.Add(float64(len({{.IDName}}) - len(miss)), "bts:NAME") - {{if .EnableNullCache}} - for k, v := range res { - {{if .SimpleValue}} if v == {{.NullCache}} { {{else}} if {{.CheckNullCode}} { {{end}} - delete(res, k) - } - } - {{end}} - missLen := len(miss) - if missLen == 0 { - return - } - {{if .EnableBatch}} - missData := make(map[KEY]VALUE, missLen) - {{else}} - var missData map[KEY]VALUE - {{end}} - {{if .EnableSingleFlight}} - var rr interface{} - sf := d.cacheSFNAME({{.IDName}} {{.ExtraArgs}}) - rr, err, _ = cacheSingleFlights[SFNUM].Do(sf, func() (r interface{}, e error) { - cache.MetricMisses.Add(float64(len(miss)), "bts:NAME") - r, e = RAWFUNC(c, miss {{.ExtraRawArgs}}) - return - }) - missData = rr.(map[KEY]VALUE) - {{else}} - {{if .EnableBatch}} - cache.MetricMisses.Add(float64(missLen), "bts:NAME") - var mutex sync.Mutex - {{if .BatchErrBreak}} - group := errgroup.WithCancel(c) - {{else}} - group := errgroup.WithContext(c) - {{end}} - if missLen > MAXGROUP { - group.GOMAXPROCS(MAXGROUP) - } - var run = func(ms []KEY) { - group.Go(func(ctx context.Context) (err error) { - data, err := RAWFUNC(ctx, ms {{.ExtraRawArgs}}) - mutex.Lock() - for k, v := range data { - missData[k] = v - } - mutex.Unlock() - return - }) - } - var ( - i int - n = missLen/GROUPSIZE - ) - for i=0; i< n; i++{ - run(miss[i*GROUPSIZE:(i+1)*GROUPSIZE]) - } - if len(miss[i*GROUPSIZE:]) > 0 { - run(miss[i*GROUPSIZE:]) - } - err = group.Wait() - {{else}} - cache.MetricMisses.Add(float64(len(miss)), "bts:NAME") - missData, err = RAWFUNC(c, miss {{.ExtraRawArgs}}) - {{end}} - {{end}} - if res == nil { - res = make(map[KEY]VALUE, len({{.IDName}})) - } - for k, v := range missData { - res[k] = v - } - if err != nil { - return - } - {{if .EnableNullCache}} - for _, key := range miss { - {{if .GoValue}} - if len(res[key]) == 0 { - {{else}} - if res[key] == {{.ZeroValue}} { - {{end}} - missData[key] = {{.NullCache}} - } - } - {{end}} - if !addCache { - return - } - {{if .Sync}} - ADDCACHEFUNC(c, missData {{.ExtraAddCacheArgs}}) - {{else}} - d.cache.Do(c, func(c context.Context) { - ADDCACHEFUNC(c, missData {{.ExtraAddCacheArgs}}) - }) - {{end}} - return -} -` diff --git a/tool/kratos-gen-bts/none_template.go b/tool/kratos-gen-bts/none_template.go deleted file mode 100644 index 90c4063d0..000000000 --- a/tool/kratos-gen-bts/none_template.go +++ /dev/null @@ -1,69 +0,0 @@ -package main - -var _noneTemplate = ` -// NAME {{or .Comment "get data from cache if miss will call source method, then add to cache."}} -func (d *{{.StructName}}) NAME(c context.Context) (res VALUE, err error) { - addCache := true - res, err = CACHEFUNC(c) - if err != nil { - {{if .CacheErrContinue}} - addCache = false - err = nil - {{else}} - return - {{end}} - } - {{if .EnableNullCache}} - defer func() { - {{if .SimpleValue}} if res == {{.NullCache}} { {{else}} if {{.CheckNullCode}} { {{end}} - res = {{.ZeroValue}} - } - }() - {{end}} - {{if .GoValue}} - if len(res) != 0 { - {{else}} - if res != {{.ZeroValue}} { - {{end}} - cache.MetricHits.Inc("bts:NAME") - return - } - {{if .EnableSingleFlight}} - var rr interface{} - sf := d.cacheSFNAME() - rr, err, _ = cacheSingleFlights[SFNUM].Do(sf, func() (r interface{}, e error) { - cache.MetricMisses.Inc("bts:NAME") - r, e = RAWFUNC(c) - return - }) - res = rr.(VALUE) - {{else}} - cache.MetricMisses.Inc("bts:NAME") - res, err = RAWFUNC(c) - {{end}} - if err != nil { - return - } - var miss = res - {{if .EnableNullCache}} - {{if .GoValue}} - if len(miss) == 0 { - {{else}} - if miss == {{.ZeroValue}} { - {{end}} - miss = {{.NullCache}} - } - {{end}} - if !addCache { - return - } - {{if .Sync}} - ADDCACHEFUNC(c, miss) - {{else}} - d.cache.Do(c, func(c context.Context) { - ADDCACHEFUNC(c, miss) - }) - {{end}} - return -} -` diff --git a/tool/kratos-gen-bts/single_template.go b/tool/kratos-gen-bts/single_template.go deleted file mode 100644 index 0ec2d4473..000000000 --- a/tool/kratos-gen-bts/single_template.go +++ /dev/null @@ -1,90 +0,0 @@ -package main - -var _singleTemplate = ` -// NAME {{or .Comment "get data from cache if miss will call source method, then add to cache."}} -func (d *{{.StructName}}) NAME(c context.Context, {{.IDName}} KEY{{.ExtraArgsType}}) (res VALUE, err error) { - addCache := true - res, err = CACHEFUNC(c, {{.IDName}} {{.ExtraCacheArgs}}) - if err != nil { - {{if .CacheErrContinue}} - addCache = false - err = nil - {{else}} - return - {{end}} - } - {{if .EnableNullCache}} - defer func() { - {{if .SimpleValue}} if res == {{.NullCache}} { {{else}} if {{.CheckNullCode}} { {{end}} - res = {{.ZeroValue}} - } - }() - {{end}} - {{if .GoValue}} - if len(res) != 0 { - {{else}} - if res != {{.ZeroValue}} { - {{end}} - cache.MetricHits.Inc("bts:NAME") - return - } - {{if .EnablePaging}} - var miss VALUE - {{end}} - {{if .EnableSingleFlight}} - var rr interface{} - sf := d.cacheSFNAME({{.IDName}} {{.ExtraArgs}}) - rr, err, _ = cacheSingleFlights[SFNUM].Do(sf, func() (r interface{}, e error) { - cache.MetricMisses.Inc("bts:NAME") - {{if .EnablePaging}} - var rrs [2]interface{} - rrs[0], rrs[1], e = RAWFUNC(c, {{.IDName}} {{.ExtraRawArgs}}) - r = rrs - {{else}} - r, e = RAWFUNC(c, {{.IDName}} {{.ExtraRawArgs}}) - {{end}} - return - }) - {{if .EnablePaging}} - res = rr.([2]interface{})[0].(VALUE) - miss = rr.([2]interface{})[1].(VALUE) - {{else}} - res = rr.(VALUE) - {{end}} - {{else}} - cache.MetricMisses.Inc("bts:NAME") - {{if .EnablePaging}} - res, miss, err = RAWFUNC(c, {{.IDName}} {{.ExtraRawArgs}}) - {{else}} - res, err = RAWFUNC(c, {{.IDName}} {{.ExtraRawArgs}}) - {{end}} - {{end}} - if err != nil { - return - } - {{if .EnablePaging}} - {{else}} - miss := res - {{end}} - {{if .EnableNullCache}} - {{if .GoValue}} - if len(miss) == 0 { - {{else}} - if miss == {{.ZeroValue}} { - {{end}} - miss = {{.NullCache}} - } - {{end}} - if !addCache { - return - } - {{if .Sync}} - ADDCACHEFUNC(c, {{.IDName}}, miss {{.ExtraAddCacheArgs}}) - {{else}} - d.cache.Do(c, func(c context.Context) { - ADDCACHEFUNC(c, {{.IDName}}, miss {{.ExtraAddCacheArgs}}) - }) - {{end}} - return -} -` diff --git a/tool/kratos-gen-bts/testdata/dao.bts.go b/tool/kratos-gen-bts/testdata/dao.bts.go deleted file mode 100644 index cba448576..000000000 --- a/tool/kratos-gen-bts/testdata/dao.bts.go +++ /dev/null @@ -1,283 +0,0 @@ -// Code generated by kratos tool genbts. DO NOT EDIT. - -/* - Package testdata is a generated cache proxy package. - It is generated from: - type _bts interface { - // bts: -batch=2 -max_group=20 -batch_err=break -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - Demos(c context.Context, keys []int64) (map[int64]*Demo, error) - // bts: -batch=2 -max_group=20 -batch_err=continue -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - Demos1(c context.Context, keys []int64) (map[int64]*Demo, error) - // bts: -sync=true -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - Demo(c context.Context, key int64) (*Demo, error) - // bts: -paging=true - Demo1(c context.Context, key int64, pn int, ps int) (*Demo, error) - // bts: -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - None(c context.Context) (*Demo, error) - } -*/ - -package testdata - -import ( - "context" - "sync" - - "github.com/go-kratos/kratos/pkg/cache" - "github.com/go-kratos/kratos/pkg/sync/errgroup" -) - -var _ _bts - -// Demos get data from cache if miss will call source method, then add to cache. -func (d *dao) Demos(c context.Context, keys []int64) (res map[int64]*Demo, err error) { - if len(keys) == 0 { - return - } - addCache := true - if res, err = d.CacheDemos(c, keys); err != nil { - addCache = false - res = nil - err = nil - } - var miss []int64 - for _, key := range keys { - if (res == nil) || (res[key] == nil) { - miss = append(miss, key) - } - } - cache.MetricHits.Add(float64(len(keys)-len(miss)), "bts:Demos") - for k, v := range res { - if v.ID == -1 { - delete(res, k) - } - } - missLen := len(miss) - if missLen == 0 { - return - } - missData := make(map[int64]*Demo, missLen) - cache.MetricMisses.Add(float64(missLen), "bts:Demos") - var mutex sync.Mutex - group := errgroup.WithCancel(c) - if missLen > 20 { - group.GOMAXPROCS(20) - } - var run = func(ms []int64) { - group.Go(func(ctx context.Context) (err error) { - data, err := d.RawDemos(ctx, ms) - mutex.Lock() - for k, v := range data { - missData[k] = v - } - mutex.Unlock() - return - }) - } - var ( - i int - n = missLen / 2 - ) - for i = 0; i < n; i++ { - run(miss[i*2 : (i+1)*2]) - } - if len(miss[i*2:]) > 0 { - run(miss[i*2:]) - } - err = group.Wait() - if res == nil { - res = make(map[int64]*Demo, len(keys)) - } - for k, v := range missData { - res[k] = v - } - if err != nil { - return - } - for _, key := range miss { - if res[key] == nil { - missData[key] = &Demo{ID: -1} - } - } - if !addCache { - return - } - d.cache.Do(c, func(c context.Context) { - d.AddCacheDemos(c, missData) - }) - return -} - -// Demos1 get data from cache if miss will call source method, then add to cache. -func (d *dao) Demos1(c context.Context, keys []int64) (res map[int64]*Demo, err error) { - if len(keys) == 0 { - return - } - addCache := true - if res, err = d.CacheDemos1(c, keys); err != nil { - addCache = false - res = nil - err = nil - } - var miss []int64 - for _, key := range keys { - if (res == nil) || (res[key] == nil) { - miss = append(miss, key) - } - } - cache.MetricHits.Add(float64(len(keys)-len(miss)), "bts:Demos1") - for k, v := range res { - if v.ID == -1 { - delete(res, k) - } - } - missLen := len(miss) - if missLen == 0 { - return - } - missData := make(map[int64]*Demo, missLen) - cache.MetricMisses.Add(float64(missLen), "bts:Demos1") - var mutex sync.Mutex - group := errgroup.WithContext(c) - if missLen > 20 { - group.GOMAXPROCS(20) - } - var run = func(ms []int64) { - group.Go(func(ctx context.Context) (err error) { - data, err := d.RawDemos1(ctx, ms) - mutex.Lock() - for k, v := range data { - missData[k] = v - } - mutex.Unlock() - return - }) - } - var ( - i int - n = missLen / 2 - ) - for i = 0; i < n; i++ { - run(miss[i*2 : (i+1)*2]) - } - if len(miss[i*2:]) > 0 { - run(miss[i*2:]) - } - err = group.Wait() - if res == nil { - res = make(map[int64]*Demo, len(keys)) - } - for k, v := range missData { - res[k] = v - } - if err != nil { - return - } - for _, key := range miss { - if res[key] == nil { - missData[key] = &Demo{ID: -1} - } - } - if !addCache { - return - } - d.cache.Do(c, func(c context.Context) { - d.AddCacheDemos1(c, missData) - }) - return -} - -// Demo get data from cache if miss will call source method, then add to cache. -func (d *dao) Demo(c context.Context, key int64) (res *Demo, err error) { - addCache := true - res, err = d.CacheDemo(c, key) - if err != nil { - addCache = false - err = nil - } - defer func() { - if res.ID == -1 { - res = nil - } - }() - if res != nil { - cache.MetricHits.Inc("bts:Demo") - return - } - cache.MetricMisses.Inc("bts:Demo") - res, err = d.RawDemo(c, key) - if err != nil { - return - } - miss := res - if miss == nil { - miss = &Demo{ID: -1} - } - if !addCache { - return - } - d.AddCacheDemo(c, key, miss) - return -} - -// Demo1 get data from cache if miss will call source method, then add to cache. -func (d *dao) Demo1(c context.Context, key int64, pn int, ps int) (res *Demo, err error) { - addCache := true - res, err = d.CacheDemo1(c, key, pn, ps) - if err != nil { - addCache = false - err = nil - } - if res != nil { - cache.MetricHits.Inc("bts:Demo1") - return - } - var miss *Demo - cache.MetricMisses.Inc("bts:Demo1") - res, miss, err = d.RawDemo1(c, key, pn, ps) - if err != nil { - return - } - if !addCache { - return - } - d.cache.Do(c, func(c context.Context) { - d.AddCacheDemo1(c, key, miss, pn, ps) - }) - return -} - -// None get data from cache if miss will call source method, then add to cache. -func (d *dao) None(c context.Context) (res *Demo, err error) { - addCache := true - res, err = d.CacheNone(c) - if err != nil { - addCache = false - err = nil - } - defer func() { - if res.ID == -1 { - res = nil - } - }() - if res != nil { - cache.MetricHits.Inc("bts:None") - return - } - cache.MetricMisses.Inc("bts:None") - res, err = d.RawNone(c) - if err != nil { - return - } - var miss = res - if miss == nil { - miss = &Demo{ID: -1} - } - if !addCache { - return - } - d.cache.Do(c, func(c context.Context) { - d.AddCacheNone(c, miss) - }) - return -} diff --git a/tool/kratos-gen-bts/testdata/dao.go b/tool/kratos-gen-bts/testdata/dao.go deleted file mode 100644 index 1958f8424..000000000 --- a/tool/kratos-gen-bts/testdata/dao.go +++ /dev/null @@ -1,37 +0,0 @@ -package testdata - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/sync/pipeline/fanout" -) - -// Demo test struct -type Demo struct { - ID int64 - Title string -} - -// Dao . -type dao struct { - cache *fanout.Fanout -} - -// New . -func New() *dao { - return &dao{cache: fanout.New("cache")} -} - -//go:generate kratos tool genbts -type _bts interface { - // bts: -batch=2 -max_group=20 -batch_err=break -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - Demos(c context.Context, keys []int64) (map[int64]*Demo, error) - // bts: -batch=2 -max_group=20 -batch_err=continue -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - Demos1(c context.Context, keys []int64) (map[int64]*Demo, error) - // bts: -sync=true -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - Demo(c context.Context, key int64) (*Demo, error) - // bts: -paging=true - Demo1(c context.Context, key int64, pn int, ps int) (*Demo, error) - // bts: -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1 - None(c context.Context) (*Demo, error) -} diff --git a/tool/kratos-gen-bts/testdata/multi.go b/tool/kratos-gen-bts/testdata/multi.go deleted file mode 100644 index 4685d37ef..000000000 --- a/tool/kratos-gen-bts/testdata/multi.go +++ /dev/null @@ -1,48 +0,0 @@ -package testdata - -import ( - "context" -) - -// mock test -var ( - _multiCacheFunc func(c context.Context, keys []int64) (map[int64]*Demo, error) - _multiRawFunc func(c context.Context, keys []int64) (map[int64]*Demo, error) - _multiAddCacheFunc func(c context.Context, values map[int64]*Demo) error -) - -// CacheDemos . -func (d *dao) CacheDemos(c context.Context, keys []int64) (map[int64]*Demo, error) { - // get data from cache - return _multiCacheFunc(c, keys) -} - -// RawDemos . -func (d *dao) RawDemos(c context.Context, keys []int64) (map[int64]*Demo, error) { - // get data from db - return _multiRawFunc(c, keys) -} - -// AddCacheDemos . -func (d *dao) AddCacheDemos(c context.Context, values map[int64]*Demo) error { - // add to cache - return _multiAddCacheFunc(c, values) -} - -// CacheDemos1 . -func (d *dao) CacheDemos1(c context.Context, keys []int64) (map[int64]*Demo, error) { - // get data from cache - return _multiCacheFunc(c, keys) -} - -// RawDemos . -func (d *dao) RawDemos1(c context.Context, keys []int64) (map[int64]*Demo, error) { - // get data from db - return _multiRawFunc(c, keys) -} - -// AddCacheDemos . -func (d *dao) AddCacheDemos1(c context.Context, values map[int64]*Demo) error { - // add to cache - return _multiAddCacheFunc(c, values) -} diff --git a/tool/kratos-gen-bts/testdata/multi_test.go b/tool/kratos-gen-bts/testdata/multi_test.go deleted file mode 100644 index 78319acd3..000000000 --- a/tool/kratos-gen-bts/testdata/multi_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package testdata - -import ( - "context" - "errors" - "testing" -) - -func TestMultiCache(t *testing.T) { - id := int64(1) - d := New() - meta := map[int64]*Demo{id: {ID: id}} - getsFromCache := func(c context.Context, keys []int64) (map[int64]*Demo, error) { return meta, nil } - notGetsFromCache := func(c context.Context, keys []int64) (map[int64]*Demo, error) { return nil, errors.New("err") } - // 缓存返回了部分数据 - partFromCache := func(c context.Context, keys []int64) (map[int64]*Demo, error) { return meta, errors.New("err") } - getsFromSource := func(c context.Context, keys []int64) (map[int64]*Demo, error) { return meta, nil } - notGetsFromSource := func(c context.Context, keys []int64) (map[int64]*Demo, error) { - return meta, errors.New("err") - } - addToCache := func(c context.Context, values map[int64]*Demo) error { return nil } - // gets from cache - _multiCacheFunc = getsFromCache - _multiRawFunc = notGetsFromSource - _multiAddCacheFunc = addToCache - res, err := d.Demos(context.TODO(), []int64{id}) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res[1].ID != 1 { - t.Fatalf("id should be 1") - } - // get from source - _multiCacheFunc = notGetsFromCache - _multiRawFunc = getsFromSource - res, err = d.Demos(context.TODO(), []int64{1, 2, 3, 4, 5, 6}) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res[1].ID != 1 { - t.Fatalf("id should be 1") - } - // 缓存失败 返回部分数据 回源也失败的情况 - _multiCacheFunc = partFromCache - _multiRawFunc = notGetsFromSource - res, err = d.Demos(context.TODO(), []int64{id}) - if err == nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res[1].ID != 1 { - t.Fatalf("id should be 1") - } - // with null cache - nullCache := &Demo{ID: -1} - getNullFromCache := func(c context.Context, keys []int64) (map[int64]*Demo, error) { - return map[int64]*Demo{id: nullCache}, nil - } - _multiCacheFunc = getNullFromCache - _multiRawFunc = notGetsFromSource - res, err = d.Demos(context.TODO(), []int64{id}) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res[id] != nil { - t.Fatalf("res should be nil") - } -} diff --git a/tool/kratos-gen-bts/testdata/none.go b/tool/kratos-gen-bts/testdata/none.go deleted file mode 100644 index c3202613a..000000000 --- a/tool/kratos-gen-bts/testdata/none.go +++ /dev/null @@ -1,30 +0,0 @@ -package testdata - -import ( - "context" -) - -// mock test -var ( - _noneCacheFunc func(c context.Context) (*Demo, error) - _noneRawFunc func(c context.Context) (*Demo, error) - _noneAddCacheFunc func(c context.Context, value *Demo) error -) - -// CacheNone . -func (d *dao) CacheNone(c context.Context) (*Demo, error) { - // get data from cache - return _noneCacheFunc(c) -} - -// RawNone . -func (d *dao) RawNone(c context.Context) (*Demo, error) { - // get data from db - return _noneRawFunc(c) -} - -// AddCacheNone . -func (d *dao) AddCacheNone(c context.Context, value *Demo) error { - // add to cache - return _noneAddCacheFunc(c, value) -} diff --git a/tool/kratos-gen-bts/testdata/none_test.go b/tool/kratos-gen-bts/testdata/none_test.go deleted file mode 100644 index 6ace25d75..000000000 --- a/tool/kratos-gen-bts/testdata/none_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package testdata - -import ( - "context" - "errors" - "testing" -) - -func TestNoneCache(t *testing.T) { - d := New() - meta := &Demo{ID: 1} - getFromCache := func(c context.Context) (*Demo, error) { return meta, nil } - notGetFromCache := func(c context.Context) (*Demo, error) { return nil, errors.New("err") } - getFromSource := func(c context.Context) (*Demo, error) { return meta, nil } - notGetFromSource := func(c context.Context) (*Demo, error) { return meta, errors.New("err") } - addToCache := func(c context.Context, values *Demo) error { return nil } - // get from cache - _noneCacheFunc = getFromCache - _noneRawFunc = notGetFromSource - _noneAddCacheFunc = addToCache - res, err := d.None(context.TODO()) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res.ID != 1 { - t.Fatalf("id should be 1") - } - // get from source - _noneCacheFunc = notGetFromCache - _noneRawFunc = getFromSource - res, err = d.None(context.TODO()) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res.ID != 1 { - t.Fatalf("id should be 1") - } - // with null cache - nullCache := &Demo{ID: -1} - getNullFromCache := func(c context.Context) (*Demo, error) { return nullCache, nil } - _noneCacheFunc = getNullFromCache - _noneRawFunc = notGetFromSource - res, err = d.None(context.TODO()) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res != nil { - t.Fatalf("res should be nil") - } -} diff --git a/tool/kratos-gen-bts/testdata/single.go b/tool/kratos-gen-bts/testdata/single.go deleted file mode 100644 index fe390216a..000000000 --- a/tool/kratos-gen-bts/testdata/single.go +++ /dev/null @@ -1,48 +0,0 @@ -package testdata - -import ( - "context" -) - -// mock test -var ( - _singleCacheFunc func(c context.Context, key int64) (*Demo, error) - _singleRawFunc func(c context.Context, key int64) (*Demo, error) - _singleAddCacheFunc func(c context.Context, key int64, value *Demo) error -) - -// CacheDemo . -func (d *dao) CacheDemo(c context.Context, key int64) (*Demo, error) { - // get data from cache - return _singleCacheFunc(c, key) -} - -// RawDemo . -func (d *dao) RawDemo(c context.Context, key int64) (*Demo, error) { - // get data from db - return _singleRawFunc(c, key) -} - -// AddCacheDemo . -func (d *dao) AddCacheDemo(c context.Context, key int64, value *Demo) error { - // add to cache - return _singleAddCacheFunc(c, key, value) -} - -// CacheDemo1 . -func (d *dao) CacheDemo1(c context.Context, key int64, pn, ps int) (*Demo, error) { - // get data from cache - return nil, nil -} - -// RawDemo1 . -func (d *dao) RawDemo1(c context.Context, key int64, pn, ps int) (*Demo, *Demo, error) { - // get data from db - return nil, nil, nil -} - -// AddCacheDemo1 . -func (d *dao) AddCacheDemo1(c context.Context, key int64, value *Demo, pn, ps int) error { - // add to cache - return nil -} diff --git a/tool/kratos-gen-bts/testdata/single_test.go b/tool/kratos-gen-bts/testdata/single_test.go deleted file mode 100644 index e74973d2c..000000000 --- a/tool/kratos-gen-bts/testdata/single_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package testdata - -import ( - "context" - "errors" - "testing" -) - -func TestSingleCache(t *testing.T) { - d := New() - meta := &Demo{ID: 1} - getFromCache := func(c context.Context, id int64) (*Demo, error) { return meta, nil } - notGetFromCache := func(c context.Context, id int64) (*Demo, error) { return nil, errors.New("err") } - getFromSource := func(c context.Context, id int64) (*Demo, error) { return meta, nil } - notGetFromSource := func(c context.Context, id int64) (*Demo, error) { return meta, errors.New("err") } - addToCache := func(c context.Context, id int64, values *Demo) error { return nil } - // get from cache - _singleCacheFunc = getFromCache - _singleRawFunc = notGetFromSource - _singleAddCacheFunc = addToCache - res, err := d.Demo(context.TODO(), 1) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res.ID != 1 { - t.Fatalf("id should be 1") - } - // get from source - _singleCacheFunc = notGetFromCache - _singleRawFunc = getFromSource - res, err = d.Demo(context.TODO(), 1) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res.ID != 1 { - t.Fatalf("id should be 1") - } - // with null cache - nullCache := &Demo{ID: -1} - getNullFromCache := func(c context.Context, id int64) (*Demo, error) { return nullCache, nil } - _singleCacheFunc = getNullFromCache - _singleRawFunc = notGetFromSource - res, err = d.Demo(context.TODO(), 1) - if err != nil { - t.Fatalf("err should be nil, get: %v", err) - } - if res != nil { - t.Fatalf("res should be nil") - } -} diff --git a/tool/kratos-gen-mc/README.md b/tool/kratos-gen-mc/README.md deleted file mode 100644 index 0bc8c89ff..000000000 --- a/tool/kratos-gen-mc/README.md +++ /dev/null @@ -1,45 +0,0 @@ - -#### genmc - -> mc缓存代码生成 - -##### 项目简介 - -自动生成memcached缓存代码 和缓存回源工具kratos-gen-bts配合使用 体验更佳 -支持以下功能: -- 常用mc命令(get/set/add/replace/delete) -- 多种数据存储格式(json/pb/raw/gob/gzip) -- 常用值类型自动转换(int/bool/float...) -- 自定义缓存名称和过期时间 -- 记录pkg/error错误栈 -- 记录日志trace id -- prometheus错误监控 -- 自定义参数个数 -- 自定义注释 - -##### 使用方式: -1. dao.go文件中新增 _mc interface -2. 在dao 文件夹中执行 go generate命令 将会生成相应的缓存代码 -3. 示例见testdata/dao.go - -##### 注意: -类型会根据前缀进行猜测 -set / add 对应mc方法Set -replace 对应mc方法 Replace -del 对应mc方法 Delete -get / cache对应mc方法Get -mc Add方法需要用注解 -type=only_add单独指定 - -#### 注解参数: -| 名称 | 默认值 | 可用范围 | 说明 | 可选值 | 示例 | -| ----------- | ------------------- | ---------------- | ------------------------------------------------------------ | ---------------------------- | -------------------------- | -| encode | 根据值类型raw或json | set/add/replace | 数据存储的格式 | json/pb/raw/gob/gzip | json 或 json\|gzip 或gob等 | -| type | 前缀推断 | 全部 | mc方法 set/get/delete... | get/set/del/replace/only_add | get 或 replace 等 | -| key | 根据方法名称生成 | 全部 | 缓存key名称 | - | demoKey | -| expire | 根据方法名称生成 | 全部 | 缓存过期时间 | - | d.demoExpire | -| batch | | get(限多key模板) | 批量获取数据 每组大小 | - | 100 | -| max_group | | get(限多key模板) | 批量获取数据 最大组数量 | - | 10 | -| batch_err | break | get(限多key模板) | 批量获取数据回源错误的时候 降级继续请求(continue)还是直接返回(break) | break 或 continue | continue | -| struct_name | dao | 全部 | 用户自定义Dao结构体名称 | | MemcacheDao | -|check_null_code||add/set|(和null_expire配套使用)判断是否是空缓存的代码 用于为空缓存独立设定过期时间||$.ID==-1 或者 $=="-1"等| -|null_expire|300(5分钟)|add/set|(和check_null_code配套使用)空缓存的过期时间||d.nullExpire| \ No newline at end of file diff --git a/tool/kratos-gen-mc/header_template.go b/tool/kratos-gen-mc/header_template.go deleted file mode 100644 index 5c6cf763e..000000000 --- a/tool/kratos-gen-mc/header_template.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -var _headerTemplate = ` -// Code generated by kratos tool genmc. DO NOT EDIT. - -NEWLINE -/* - Package {{.PkgName}} is a generated mc cache package. - It is generated from: - ARGS -*/ -NEWLINE - -package {{.PkgName}} - -import ( - "context" - "fmt" - {{if .UseStrConv}}"strconv"{{end}} - {{if .EnableBatch }}"sync"{{end}} -NEWLINE - {{if .UseMemcached }}"github.com/go-kratos/kratos/pkg/cache/memcache"{{end}} - {{if .EnableBatch }}"github.com/go-kratos/kratos/pkg/sync/errgroup"{{end}} - "github.com/go-kratos/kratos/pkg/log" - {{.ImportPackage}} -) - -var _ _mc -` diff --git a/tool/kratos-gen-mc/main.go b/tool/kratos-gen-mc/main.go deleted file mode 100644 index 8a4f30002..000000000 --- a/tool/kratos-gen-mc/main.go +++ /dev/null @@ -1,570 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "go/ast" - "io/ioutil" - "log" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "text/template" - - common "github.com/go-kratos/kratos/tool/pkg" -) - -var ( - encode = flag.String("encode", "", "encode type: json/pb/raw/gob/gzip") - mcType = flag.String("type", "", "type: get/set/del/replace/only_add") - key = flag.String("key", "", "key name method") - expire = flag.String("expire", "", "expire time code") - structName = flag.String("struct_name", "dao", "struct name") - batchSize = flag.Int("batch", 0, "batch size") - batchErr = flag.String("batch_err", "break", "batch err to continue or break") - maxGroup = flag.Int("max_group", 0, "max group size") - checkNullCode = flag.String("check_null_code", "", "check null code") - nullExpire = flag.String("null_expire", "", "null cache expire time code") - - mcValidTypes = []string{"set", "replace", "del", "get", "only_add"} - mcValidPrefix = []string{"set", "replace", "del", "get", "cache", "add"} - optionNamesMap = map[string]bool{"batch": true, "max_group": true, "encode": true, "type": true, "key": true, "expire": true, "batch_err": true, "struct_name": true, "check_null_code": true, "null_expire": true} - simpleTypes = []string{"int", "int8", "int16", "int32", "int64", "float32", "float64", "uint", "uint8", "uint16", "uint32", "uint64", "bool", "string", "[]byte"} - lenTypes = []string{"[]", "map"} -) - -const ( - _interfaceName = "_mc" - _multiTpl = 1 - _singleTpl = 2 - _noneTpl = 3 - _typeGet = "get" - _typeSet = "set" - _typeDel = "del" - _typeReplace = "replace" - _typeAdd = "only_add" -) - -func resetFlag() { - *encode = "" - *mcType = "" - *batchSize = 0 - *maxGroup = 0 - *batchErr = "break" - *checkNullCode = "" - *nullExpire = "" - *structName = "dao" -} - -// options options -type options struct { - name string - keyType string - ValueType string - template int - SimpleValue bool - // int float 类型 - GetSimpleValue bool - // string, []byte类型 - GetDirectValue bool - ConvertValue2Bytes string - ConvertBytes2Value string - GoValue bool - ImportPackage string - importPackages []string - Args string - PkgName string - ExtraArgsType string - ExtraArgs string - MCType string - KeyMethod string - ExpireCode string - Encode string - UseMemcached bool - OriginValueType string - UseStrConv bool - Comment string - GroupSize int - MaxGroup int - EnableBatch bool - BatchErrBreak bool - LenType bool - PointType bool - StructName string - CheckNullCode string - ExpireNullCode string - EnableNullCode bool -} - -func getOptions(opt *options, comment string) { - os.Args = []string{os.Args[0]} - if regexp.MustCompile(`\s+//\s*mc:.+`).Match([]byte(comment)) { - args := strings.Split(common.RegexpReplace(`//\s*mc:(?P.+)`, comment, "$arg"), " ") - for _, arg := range args { - arg = strings.TrimSpace(arg) - if arg != "" { - // validate option name - argName := common.RegexpReplace(`-(?P[\w_-]+)=.+`, arg, "$name") - if !optionNamesMap[argName] { - log.Fatalf("选项:%s 不存在 请检查拼写\n", argName) - } - os.Args = append(os.Args, arg) - } - } - } - resetFlag() - flag.Parse() - if *mcType != "" { - opt.MCType = *mcType - } - if *key != "" { - opt.KeyMethod = *key - } - if *expire != "" { - opt.ExpireCode = *expire - } - opt.EnableBatch = (*batchSize != 0) && (*maxGroup != 0) - opt.BatchErrBreak = *batchErr == "break" - opt.GroupSize = *batchSize - opt.MaxGroup = *maxGroup - opt.StructName = *structName - opt.CheckNullCode = *checkNullCode - if *nullExpire != "" { - opt.ExpireNullCode = *nullExpire - } - if opt.CheckNullCode != "" { - opt.EnableNullCode = true - } -} - -func getTypeFromPrefix(opt *options, params []*ast.Field, s *common.Source) { - if opt.MCType == "" { - for _, t := range mcValidPrefix { - if strings.HasPrefix(strings.ToLower(opt.name), t) { - if t == "add" { - t = _typeSet - } - opt.MCType = t - break - } - } - if opt.MCType == "" { - log.Fatalln(opt.name + "请指定方法类型(type=get/set/del...)") - } - } - if opt.MCType == "cache" { - opt.MCType = _typeGet - } - if len(params) == 0 { - log.Fatalln(opt.name + "参数不足") - } - for _, p := range params { - if len(p.Names) > 1 { - log.Fatalln(opt.name + "不支持省略类型 请写全声明中的字段类型名称") - } - } - if s.ExprString(params[0].Type) != "context.Context" { - log.Fatalln(opt.name + "第一个参数必须为context") - } - for _, param := range params { - if len(param.Names) > 1 { - log.Fatalln(opt.name + "不支持省略类型") - } - } -} - -func processList(s *common.Source, list *ast.Field) (opt options) { - src := s.Src - fset := s.Fset - lines := strings.Split(src, "\n") - opt = options{Args: s.GetDef(_interfaceName), UseMemcached: true, importPackages: s.Packages(list)} - opt.name = list.Names[0].Name - opt.KeyMethod = "key" + opt.name - opt.ExpireCode = "d.mc" + opt.name + "Expire" - opt.ExpireNullCode = "300" // 默认5分钟 - // get comment - line := fset.Position(list.Pos()).Line - 3 - if len(lines)-1 >= line { - comment := lines[line] - opt.Comment = common.RegexpReplace(`\s+//(?P.+)`, comment, "$name") - opt.Comment = strings.TrimSpace(opt.Comment) - } - // get options - line = fset.Position(list.Pos()).Line - 2 - comment := lines[line] - getOptions(&opt, comment) - // get type from prefix - params := list.Type.(*ast.FuncType).Params.List - getTypeFromPrefix(&opt, params, s) - // get template - if len(params) == 1 { - opt.template = _noneTpl - } else if (len(params) == 2) && (opt.MCType == _typeSet || opt.MCType == _typeAdd || opt.MCType == _typeReplace) { - if _, ok := params[1].Type.(*ast.MapType); ok { - opt.template = _multiTpl - } else { - opt.template = _noneTpl - } - } else { - if _, ok := params[1].Type.(*ast.ArrayType); ok { - opt.template = _multiTpl - } else if _, ok := params[1].Type.(*ast.MapType); ok { - opt.template = _multiTpl - } else { - opt.template = _singleTpl - } - } - // extra args - if len(params) > 2 { - args := []string{""} - allArgs := []string{""} - var pos = 2 - if (opt.MCType == _typeAdd) || (opt.MCType == _typeSet) || (opt.MCType == _typeReplace) { - pos = 3 - } - if opt.template == _multiTpl && opt.MCType == _typeSet { - pos = 2 - } - for _, pa := range params[pos:] { - paType := s.ExprString(pa.Type) - if len(pa.Names) == 0 { - args = append(args, paType) - allArgs = append(allArgs, paType) - continue - } - var names []string - for _, name := range pa.Names { - names = append(names, name.Name) - } - allArgs = append(allArgs, strings.Join(names, ",")+" "+paType) - args = append(args, strings.Join(names, ",")) - } - if len(args) > 1 { - opt.ExtraArgs = strings.Join(args, ",") - opt.ExtraArgsType = strings.Join(allArgs, ",") - } - } - results := list.Type.(*ast.FuncType).Results.List - getKeyValueType(&opt, params, results, s) - return -} - -func getKeyValueType(opt *options, params, results []*ast.Field, s *common.Source) { - // check - if s.ExprString(results[len(results)-1].Type) != "error" { - log.Fatalln("最后返回值参数需为error") - } - for _, res := range results { - if len(res.Names) > 1 { - log.Fatalln(opt.name + "返回值不支持省略类型") - } - } - if opt.MCType == _typeGet { - if len(results) != 2 { - log.Fatalln("参数个数不对") - } - } - // get key type and value type - if (opt.MCType == _typeAdd) || (opt.MCType == _typeSet) || (opt.MCType == _typeReplace) { - if opt.template == _multiTpl { - p, ok := params[1].Type.(*ast.MapType) - if !ok { - log.Fatalf("%s: 参数类型错误 批量设置数据时类型需为map类型\n", opt.name) - } - opt.keyType = s.ExprString(p.Key) - opt.ValueType = s.ExprString(p.Value) - } else if opt.template == _singleTpl { - opt.keyType = s.ExprString(params[1].Type) - opt.ValueType = s.ExprString(params[2].Type) - } else { - opt.ValueType = s.ExprString(params[1].Type) - } - } - if opt.MCType == _typeGet { - if opt.template == _multiTpl { - if p, ok := results[0].Type.(*ast.MapType); ok { - opt.keyType = s.ExprString(p.Key) - opt.ValueType = s.ExprString(p.Value) - } else { - log.Fatalf("%s: 返回值类型错误 批量获取数据时返回值需为map类型\n", opt.name) - } - } else if opt.template == _singleTpl { - opt.keyType = s.ExprString(params[1].Type) - opt.ValueType = s.ExprString(results[0].Type) - } else { - opt.ValueType = s.ExprString(results[0].Type) - } - } - if opt.MCType == _typeDel { - if opt.template == _multiTpl { - p, ok := params[1].Type.(*ast.ArrayType) - if !ok { - log.Fatalf("%s: 类型错误 参数需为[]类型\n", opt.name) - } - opt.keyType = s.ExprString(p.Elt) - } else if opt.template == _singleTpl { - opt.keyType = s.ExprString(params[1].Type) - } - } - for _, t := range simpleTypes { - if t == opt.ValueType { - opt.SimpleValue = true - opt.GetSimpleValue = true - opt.ConvertValue2Bytes = convertValue2Bytes(t) - opt.ConvertBytes2Value = convertBytes2Value(t) - break - } - } - if opt.ValueType == "string" { - opt.LenType = true - } else { - for _, t := range lenTypes { - if strings.HasPrefix(opt.ValueType, t) { - opt.LenType = true - break - } - } - } - if opt.SimpleValue && (opt.ValueType == "[]byte" || opt.ValueType == "string") { - opt.GetSimpleValue = false - opt.GetDirectValue = true - } - if opt.MCType == _typeGet && opt.template == _multiTpl { - opt.UseMemcached = false - } - if strings.HasPrefix(opt.ValueType, "*") { - opt.PointType = true - opt.OriginValueType = strings.Replace(opt.ValueType, "*", "", 1) - } else { - opt.OriginValueType = opt.ValueType - } - if *encode != "" { - var flags []string - for _, f := range strings.Split(*encode, "|") { - switch f { - case "gob": - flags = append(flags, "memcache.FlagGOB") - case "json": - flags = append(flags, "memcache.FlagJSON") - case "raw": - flags = append(flags, "memcache.FlagRAW") - case "pb": - flags = append(flags, "memcache.FlagProtobuf") - case "gzip": - flags = append(flags, "memcache.FlagGzip") - default: - log.Fatalf("%s: encode类型无效\n", opt.name) - } - } - opt.Encode = strings.Join(flags, " | ") - } else { - if opt.SimpleValue { - opt.Encode = "memcache.FlagRAW" - } else { - opt.Encode = "memcache.FlagJSON" - } - } -} - -func parse(s *common.Source) (opts []*options) { - c := s.F.Scope.Lookup(_interfaceName) - if (c == nil) || (c.Kind != ast.Typ) { - log.Fatalln("无法找到缓存声明") - } - lists := c.Decl.(*ast.TypeSpec).Type.(*ast.InterfaceType).Methods.List - for _, list := range lists { - opt := processList(s, list) - opt.Check() - opts = append(opts, &opt) - } - return -} - -func (option *options) Check() { - var valid bool - for _, x := range mcValidTypes { - if x == option.MCType { - valid = true - break - } - } - if !valid { - log.Fatalf("%s: 类型错误 不支持%s类型\n", option.name, option.MCType) - } - if (option.MCType != _typeDel) && !option.SimpleValue && !strings.Contains(option.ValueType, "*") && !strings.Contains(option.ValueType, "[]") && !strings.Contains(option.ValueType, "map") { - log.Fatalf("%s: 值类型只能为基本类型/slice/map/指针类型\n", option.name) - } -} - -func genHeader(opts []*options) (src string) { - option := options{PkgName: os.Getenv("GOPACKAGE"), UseMemcached: false} - var packages []string - packagesMap := map[string]bool{`"context"`: true} - for _, opt := range opts { - if len(opt.importPackages) > 0 { - for _, pkg := range opt.importPackages { - if !packagesMap[pkg] { - packages = append(packages, pkg) - packagesMap[pkg] = true - } - } - } - if opt.Args != "" { - option.Args = opt.Args - } - if opt.UseMemcached { - option.UseMemcached = true - } - if opt.SimpleValue && !opt.GetDirectValue { - option.UseStrConv = true - } - if opt.EnableBatch { - option.EnableBatch = true - } - } - option.ImportPackage = strings.Join(packages, "\n") - src = _headerTemplate - t := template.Must(template.New("header").Parse(src)) - var buffer bytes.Buffer - err := t.Execute(&buffer, option) - if err != nil { - log.Fatalf("execute template: %s", err) - } - // Format the output. - src = strings.Replace(buffer.String(), "\t", "", -1) - src = regexp.MustCompile("\n+").ReplaceAllString(src, "\n") - src = strings.Replace(src, "NEWLINE", "", -1) - src = strings.Replace(src, "ARGS", option.Args, -1) - return -} - -func getNewTemplate(option *options) (src string) { - if option.template == _multiTpl { - switch option.MCType { - case _typeGet: - src = _multiGetTemplate - case _typeSet: - src = _multiSetTemplate - case _typeReplace: - src = _multiReplaceTemplate - case _typeDel: - src = _multiDelTemplate - case _typeAdd: - src = _multiAddTemplate - } - } else if option.template == _singleTpl { - switch option.MCType { - case _typeGet: - src = _singleGetTemplate - case _typeSet: - src = _singleSetTemplate - case _typeReplace: - src = _singleReplaceTemplate - case _typeDel: - src = _singleDelTemplate - case _typeAdd: - src = _singleAddTemplate - } - } else { - switch option.MCType { - case _typeGet: - src = _noneGetTemplate - case _typeSet: - src = _noneSetTemplate - case _typeReplace: - src = _noneReplaceTemplate - case _typeDel: - src = _noneDelTemplate - case _typeAdd: - src = _noneAddTemplate - } - } - return -} - -func genBody(opts []*options) (res string) { - for _, option := range opts { - src := getNewTemplate(option) - src = strings.Replace(src, "KEY", option.keyType, -1) - src = strings.Replace(src, "NAME", option.name, -1) - src = strings.Replace(src, "VALUE", option.ValueType, -1) - src = strings.Replace(src, "GROUPSIZE", strconv.Itoa(option.GroupSize), -1) - src = strings.Replace(src, "MAXGROUP", strconv.Itoa(option.MaxGroup), -1) - if option.EnableNullCode { - option.CheckNullCode = strings.Replace(option.CheckNullCode, "$", "val", -1) - } - t := template.Must(template.New("cache").Parse(src)) - var buffer bytes.Buffer - err := t.Execute(&buffer, option) - if err != nil { - log.Fatalf("execute template: %s", err) - } - // Format the output. - src = strings.Replace(buffer.String(), "\t", "", -1) - src = regexp.MustCompile("\n+").ReplaceAllString(src, "\n") - res = res + "\n" + src - } - return -} - -func main() { - log.SetFlags(0) - defer func() { - if err := recover(); err != nil { - buf := make([]byte, 64*1024) - buf = buf[:runtime.Stack(buf, false)] - log.Fatalf("程序解析失败, err: %+v stack: %s", err, buf) - } - }() - options := parse(common.NewSource(common.SourceText())) - header := genHeader(options) - body := genBody(options) - code := common.FormatCode(header + "\n" + body) - // Write to file. - dir := filepath.Dir(".") - outputName := filepath.Join(dir, "mc.cache.go") - err := ioutil.WriteFile(outputName, []byte(code), 0644) - if err != nil { - log.Fatalf("写入文件失败: %s", err) - } - log.Println("mc.cache.go: 生成成功") -} - -func convertValue2Bytes(t string) string { - switch t { - case "int", "int8", "int16", "int32", "int64": - return "[]byte(strconv.FormatInt(int64(val), 10))" - case "uint", "uint8", "uint16", "uint32", "uint64": - return "[]byte(strconv.FormatUInt(val, 10))" - case "bool": - return "[]byte(strconv.FormatBool(val))" - case "float32": - return "[]byte(strconv.FormatFloat(val, 'E', -1, 32))" - case "float64": - return "[]byte(strconv.FormatFloat(val, 'E', -1, 64))" - case "string": - return "[]byte(val)" - case "[]byte": - return "val" - } - return "" -} - -func convertBytes2Value(t string) string { - switch t { - case "int", "int8", "int16", "int32", "int64": - return "strconv.ParseInt(v, 10, 64)" - case "uint", "uint8", "uint16", "uint32", "uint64": - return "strconv.ParseUInt(v, 10, 64)" - case "bool": - return "strconv.ParseBool(v)" - case "float32": - return "float32(strconv.ParseFloat(v, 32))" - case "float64": - return "strconv.ParseFloat(v, 64)" - } - return "" -} diff --git a/tool/kratos-gen-mc/multi_template.go b/tool/kratos-gen-mc/multi_template.go deleted file mode 100644 index db02358cc..000000000 --- a/tool/kratos-gen-mc/multi_template.go +++ /dev/null @@ -1,205 +0,0 @@ -package main - -import ( - "strings" -) - -var _multiGetTemplate = ` -// NAME {{or .Comment "get data from mc"}} -func (d *{{.StructName}}) NAME(c context.Context, ids []KEY {{.ExtraArgsType}}) (res map[KEY]VALUE, err error) { - l := len(ids) - if l == 0 { - return - } - {{if .EnableBatch}} - mutex := sync.Mutex{} - for i:=0;i < l; i+= GROUPSIZE * MAXGROUP { - var subKeys []KEY - {{if .BatchErrBreak}} - group, ctx := errgroup.WithContext(c) - {{else}} - group := &errgroup.Group{} - ctx := c - {{end}} - if (i + GROUPSIZE * MAXGROUP) > l { - subKeys = ids[i:] - } else { - subKeys = ids[i : i+GROUPSIZE * MAXGROUP] - } - subLen := len(subKeys) - for j:=0; j< subLen; j += GROUPSIZE { - var ks []KEY - if (j+GROUPSIZE) > subLen { - ks = subKeys[j:] - } else { - ks = subKeys[j:j+GROUPSIZE] - } - group.Go(func() (err error) { - keysMap := make(map[string]KEY, len(ks)) - keys := make([]string, 0, len(ks)) - for _, id := range ks { - key := {{.KeyMethod}}(id{{.ExtraArgs}}) - keysMap[key] = id - keys = append(keys, key) - } - replies, err := d.mc.GetMulti(c, keys) - if err != nil { - log.Errorv(ctx, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("keys", keys)) - return - } - for _, key := range replies.Keys() { - {{if .GetSimpleValue}} - var v string - err = replies.Scan(key, &v) - {{else}} - var v VALUE - {{if .GetDirectValue}} - err = replies.Scan(key, &v) - {{else}} - {{if .PointType}} - v = &{{.OriginValueType}}{} - err = replies.Scan(key, v) - {{else}} - v = {{.OriginValueType}}{} - err = replies.Scan(key, &v) - {{end}} - {{end}} - {{end}} - if err != nil { - log.Errorv(ctx, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - {{if .GetSimpleValue}} - r, err := {{.ConvertBytes2Value}} - if err != nil { - log.Errorv(ctx, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return res, err - } - mutex.Lock() - if res == nil { - res = make(map[KEY]VALUE, len(keys)) - } - res[keysMap[key]] = {{.ValueType}}(r) - mutex.Unlock() - {{else}} - mutex.Lock() - if res == nil { - res = make(map[KEY]VALUE, len(keys)) - } - res[keysMap[key]] = v - mutex.Unlock() - {{end}} - } - return - }) - } - err1 := group.Wait() - if err1 != nil { - err = err1 - {{if .BatchErrBreak}} - break - {{end}} - } - } - {{else}} - keysMap := make(map[string]KEY, l) - keys := make([]string, 0, l) - for _, id := range ids { - key := {{.KeyMethod}}(id{{.ExtraArgs}}) - keysMap[key] = id - keys = append(keys, key) - } - replies, err := d.mc.GetMulti(c, keys) - if err != nil { - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("keys", keys)) - return - } - for _, key := range replies.Keys() { - {{if .GetSimpleValue}} - var v string - err = replies.Scan(key, &v) - {{else}} - {{if .PointType}} - v := &{{.OriginValueType}}{} - err = replies.Scan(key, v) - {{else}} - v := {{.OriginValueType}}{} - err = replies.Scan(key, &v) - {{end}} - {{end}} - if err != nil { - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - {{if .GetSimpleValue}} - r, err := {{.ConvertBytes2Value}} - if err != nil { - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return res, err - } - if res == nil { - res = make(map[KEY]VALUE, len(keys)) - } - res[keysMap[key]] = {{.ValueType}}(r) - {{else}} - if res == nil { - res = make(map[KEY]VALUE, len(keys)) - } - res[keysMap[key]] = v - {{end}} - } - {{end}} - return -} -` - -var _multiSetTemplate = ` -// NAME {{or .Comment "Set data to mc"}} -func (d *{{.StructName}}) NAME(c context.Context, values map[KEY]VALUE {{.ExtraArgsType}}) (err error) { - if len(values) == 0 { - return - } - for id, val := range values { - key := {{.KeyMethod}}(id{{.ExtraArgs}}) - {{if .SimpleValue}} - bs := {{.ConvertValue2Bytes}} - item := &memcache.Item{Key: key, Value: bs, Expiration: {{.ExpireCode}}, Flags: {{.Encode}}} - {{else}} - item := &memcache.Item{Key: key, Object: val, Expiration: {{.ExpireCode}}, Flags: {{.Encode}}} - {{end}} - {{if .EnableNullCode}} - if {{.CheckNullCode}} { - item.Expiration = {{.ExpireNullCode}} - } - {{end}} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - } - return -} -` -var _multiAddTemplate = strings.Replace(_multiSetTemplate, "Set", "Add", -1) -var _multiReplaceTemplate = strings.Replace(_multiSetTemplate, "Set", "Replace", -1) - -var _multiDelTemplate = ` -// NAME {{or .Comment "delete data from mc"}} -func (d *{{.StructName}}) NAME(c context.Context, ids []KEY {{.ExtraArgsType}}) (err error) { - if len(ids) == 0 { - return - } - for _, id := range ids { - key := {{.KeyMethod}}(id{{.ExtraArgs}}) - if err = d.mc.Delete(c, key); err != nil { - if err == memcache.ErrNotFound { - err = nil - continue - } - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - } - return -} -` diff --git a/tool/kratos-gen-mc/none_template.go b/tool/kratos-gen-mc/none_template.go deleted file mode 100644 index 65ec1de63..000000000 --- a/tool/kratos-gen-mc/none_template.go +++ /dev/null @@ -1,104 +0,0 @@ -package main - -import ( - "strings" -) - -var _noneGetTemplate = ` -// NAME {{or .Comment "get data from mc"}} -func (d *{{.StructName}}) NAME(c context.Context) (res VALUE, err error) { - key := {{.KeyMethod}}() - {{if .GetSimpleValue}} - var v string - err = d.mc.Get(c, key).Scan(&v) - {{else}} - {{if .GetDirectValue}} - err = d.mc.Get(c, key).Scan(&res) - {{else}} - {{if .PointType}} - res = &{{.OriginValueType}}{} - if err = d.mc.Get(c, key).Scan(res); err != nil { - res = nil - if err == memcache.ErrNotFound { - err = nil - return - } - } - {{else}} - err = d.mc.Get(c, key).Scan(&res) - {{end}} - {{end}} - {{end}} - if err != nil { - {{if .PointType}} - {{else}} - if err == memcache.ErrNotFound { - err = nil - return - } - {{end}} - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - {{if .GetSimpleValue}} - r, err := {{.ConvertBytes2Value}} - if err != nil { - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - res = {{.ValueType}}(r) - {{end}} - return -} -` - -var _noneSetTemplate = ` -// NAME {{or .Comment "Set data to mc"}} -func (d *{{.StructName}}) NAME(c context.Context, val VALUE) (err error) { - {{if .PointType}} - if val == nil { - return - } - {{end}} - {{if .LenType}} - if len(val) == 0 { - return - } - {{end}} - key := {{.KeyMethod}}() - {{if .SimpleValue}} - bs := {{.ConvertValue2Bytes}} - item := &memcache.Item{Key: key, Value: bs, Expiration: {{.ExpireCode}}, Flags: {{.Encode}}} - {{else}} - item := &memcache.Item{Key: key, Object: val, Expiration: {{.ExpireCode}}, Flags: {{.Encode}}} - {{end}} - {{if .EnableNullCode}} - if {{.CheckNullCode}} { - item.Expiration = {{.ExpireNullCode}} - } - {{end}} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} -` -var _noneAddTemplate = strings.Replace(_noneSetTemplate, "Set", "Add", -1) -var _noneReplaceTemplate = strings.Replace(_noneSetTemplate, "Set", "Replace", -1) - -var _noneDelTemplate = ` -// NAME {{or .Comment "delete data from mc"}} -func (d *{{.StructName}}) NAME(c context.Context) (err error) { - key := {{.KeyMethod}}() - if err = d.mc.Delete(c, key); err != nil { - if err == memcache.ErrNotFound { - err = nil - return - } - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} -` diff --git a/tool/kratos-gen-mc/single_template.go b/tool/kratos-gen-mc/single_template.go deleted file mode 100644 index 850a4ba12..000000000 --- a/tool/kratos-gen-mc/single_template.go +++ /dev/null @@ -1,103 +0,0 @@ -package main - -import ( - "strings" -) - -var _singleGetTemplate = ` -// NAME {{or .Comment "get data from mc"}} -func (d *{{.StructName}}) NAME(c context.Context, id KEY {{.ExtraArgsType}}) (res VALUE, err error) { - key := {{.KeyMethod}}(id{{.ExtraArgs}}) - {{if .GetSimpleValue}} - var v string - err = d.mc.Get(c, key).Scan(&v) - {{else}} - {{if .GetDirectValue}} - err = d.mc.Get(c, key).Scan(&res) - {{else}} - {{if .PointType}} - res = &{{.OriginValueType}}{} - if err = d.mc.Get(c, key).Scan(res); err != nil { - res = nil - if err == memcache.ErrNotFound { - err = nil - } - } - {{else}} - err = d.mc.Get(c, key).Scan(&res) - {{end}} - {{end}} - {{end}} - if err != nil { - {{if .PointType}} - {{else}} - if err == memcache.ErrNotFound { - err = nil - return - } - {{end}} - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - {{if .GetSimpleValue}} - r, err := {{.ConvertBytes2Value}} - if err != nil { - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - res = {{.ValueType}}(r) - {{end}} - return -} -` - -var _singleSetTemplate = ` -// NAME {{or .Comment "Set data to mc"}} -func (d *{{.StructName}}) NAME(c context.Context, id KEY, val VALUE {{.ExtraArgsType}}) (err error) { - {{if .PointType}} - if val == nil { - return - } - {{end}} - {{if .LenType}} - if len(val) == 0 { - return - } - {{end}} - key := {{.KeyMethod}}(id{{.ExtraArgs}}) - {{if .SimpleValue}} - bs := {{.ConvertValue2Bytes}} - item := &memcache.Item{Key: key, Value: bs, Expiration: {{.ExpireCode}}, Flags: {{.Encode}}} - {{else}} - item := &memcache.Item{Key: key, Object: val, Expiration: {{.ExpireCode}}, Flags: {{.Encode}}} - {{end}} - {{if .EnableNullCode}} - if {{.CheckNullCode}} { - item.Expiration = {{.ExpireNullCode}} - } - {{end}} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} -` -var _singleAddTemplate = strings.Replace(_singleSetTemplate, "Set", "Add", -1) -var _singleReplaceTemplate = strings.Replace(_singleSetTemplate, "Set", "Replace", -1) - -var _singleDelTemplate = ` -// NAME {{or .Comment "delete data from mc"}} -func (d *{{.StructName}}) NAME(c context.Context, id KEY {{.ExtraArgsType}}) (err error) { - key := {{.KeyMethod}}(id{{.ExtraArgs}}) - if err = d.mc.Delete(c, key); err != nil { - if err == memcache.ErrNotFound { - err = nil - return - } - log.Errorv(c, log.KV("NAME", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} -` diff --git a/tool/kratos-gen-mc/testdata/dao.go b/tool/kratos-gen-mc/testdata/dao.go deleted file mode 100644 index a9a0e9d8e..000000000 --- a/tool/kratos-gen-mc/testdata/dao.go +++ /dev/null @@ -1,93 +0,0 @@ -package testdata - -import ( - "context" - "fmt" - "time" - - "github.com/go-kratos/kratos/pkg/cache/memcache" - "github.com/go-kratos/kratos/pkg/container/pool" - xtime "github.com/go-kratos/kratos/pkg/time" -) - -// Dao . -type Dao struct { - mc *memcache.Memcache - demoExpire int32 -} - -// New new dao -func New() (d *Dao) { - cfg := &memcache.Config{ - Config: &pool.Config{ - Active: 10, - Idle: 5, - IdleTimeout: xtime.Duration(time.Second), - }, - Name: "test", - Proto: "tcp", - // Addr: "172.16.33.54:11214", - Addr: "127.0.0.1:11211", - DialTimeout: xtime.Duration(time.Second), - ReadTimeout: xtime.Duration(time.Second), - WriteTimeout: xtime.Duration(time.Second), - } - d = &Dao{ - mc: memcache.New(cfg), - demoExpire: int32(5), - } - return -} - -//go:generate kratos tool genmc -type _mc interface { - // mc: -key=demoKey - CacheDemos(c context.Context, keys []int64) (map[int64]*Demo, error) - // mc: -key=demoKey - CacheDemo(c context.Context, key int64) (*Demo, error) - // mc: -key=keyMid - CacheDemo1(c context.Context, key int64, mid int64) (*Demo, error) - // mc: -key=noneKey - CacheNone(c context.Context) (*Demo, error) - // mc: -key=demoKey - CacheString(c context.Context, key int64) (string, error) - - // mc: -key=demoKey -expire=d.demoExpire -encode=json - AddCacheDemos(c context.Context, values map[int64]*Demo) error - // mc: -key=demo2Key -expire=d.demoExpire -encode=json - AddCacheDemos2(c context.Context, values map[int64]*Demo, tp int64) error - // 这里也支持自定义注释 会替换默认的注释 - // mc: -key=demoKey -expire=d.demoExpire -encode=json|gzip - AddCacheDemo(c context.Context, key int64, value *Demo) error - // mc: -key=keyMid -expire=d.demoExpire -encode=gob - AddCacheDemo1(c context.Context, key int64, value *Demo, mid int64) error - // mc: -key=noneKey - AddCacheNone(c context.Context, value *Demo) error - // mc: -key=demoKey -expire=d.demoExpire - AddCacheString(c context.Context, key int64, value string) error - - // mc: -key=demoKey - DelCacheDemos(c context.Context, keys []int64) error - // mc: -key=demoKey - DelCacheDemo(c context.Context, key int64) error - // mc: -key=keyMid - DelCacheDemo1(c context.Context, key int64, mid int64) error - // mc: -key=noneKey - DelCacheNone(c context.Context) error -} - -func demoKey(id int64) string { - return fmt.Sprintf("art_%d", id) -} - -func demo2Key(id, tp int64) string { - return fmt.Sprintf("art_%d_%d", id, tp) -} - -func keyMid(id, mid int64) string { - return fmt.Sprintf("art_%d_%d", id, mid) -} - -func noneKey() string { - return "none" -} diff --git a/tool/kratos-gen-mc/testdata/dao_test.go b/tool/kratos-gen-mc/testdata/dao_test.go deleted file mode 100644 index 0bf8a0d59..000000000 --- a/tool/kratos-gen-mc/testdata/dao_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package testdata - -import ( - "context" - "testing" -) - -func TestDemo(t *testing.T) { - d := New() - c := context.TODO() - art := &Demo{ID: 1, Title: "title"} - err := d.AddCacheDemo(c, art.ID, art) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - art1, err := d.CacheDemo(c, art.ID) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - if (art1.ID != art.ID) || (art.Title != art1.Title) { - t.Error("art not equal") - t.FailNow() - } - err = d.DelCacheDemo(c, art.ID) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - art1, err = d.CacheDemo(c, art.ID) - if (art1 != nil) || (err != nil) { - t.Errorf("art %v, err: %v", art1, err) - t.FailNow() - } -} - -func TestNone(t *testing.T) { - d := New() - c := context.TODO() - art := &Demo{ID: 1, Title: "title"} - err := d.AddCacheNone(c, art) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - art1, err := d.CacheNone(c) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - if (art1.ID != art.ID) || (art.Title != art1.Title) { - t.Error("art not equal") - t.FailNow() - } - err = d.DelCacheNone(c) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - art1, err = d.CacheNone(c) - if (art1 != nil) || (err != nil) { - t.Errorf("art %v, err: %v", art1, err) - t.FailNow() - } -} - -func TestDemos(t *testing.T) { - d := New() - c := context.TODO() - art1 := &Demo{ID: 1, Title: "title"} - art2 := &Demo{ID: 2, Title: "title"} - err := d.AddCacheDemos(c, map[int64]*Demo{1: art1, 2: art2}) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - arts, err := d.CacheDemos(c, []int64{art1.ID, art2.ID}) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - if (arts[1].Title != art1.Title) || (arts[2].Title != art2.Title) { - t.Error("art not equal") - t.FailNow() - } - err = d.DelCacheDemos(c, []int64{art1.ID, art2.ID}) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - arts, err = d.CacheDemos(c, []int64{art1.ID, art2.ID}) - if (arts != nil) || (err != nil) { - t.Errorf("art %v, err: %v", art1, err) - t.FailNow() - } -} - -func TestString(t *testing.T) { - d := New() - c := context.TODO() - err := d.AddCacheString(c, 1, "abc") - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - res, err := d.CacheString(c, 1) - if err != nil { - t.Errorf("err should be nil, get: %v", err) - t.FailNow() - } - if res != "abc" { - t.Error("res wrong") - t.FailNow() - } -} diff --git a/tool/kratos-gen-mc/testdata/mc.cache.go b/tool/kratos-gen-mc/testdata/mc.cache.go deleted file mode 100644 index bfde3c273..000000000 --- a/tool/kratos-gen-mc/testdata/mc.cache.go +++ /dev/null @@ -1,305 +0,0 @@ -// Code generated by kratos tool genmc. DO NOT EDIT. - -/* - Package testdata is a generated mc cache package. - It is generated from: - type _mc interface { - // mc: -key=demoKey - CacheDemos(c context.Context, keys []int64) (map[int64]*Demo, error) - // mc: -key=demoKey - CacheDemo(c context.Context, key int64) (*Demo, error) - // mc: -key=keyMid - CacheDemo1(c context.Context, key int64, mid int64) (*Demo, error) - // mc: -key=noneKey - CacheNone(c context.Context) (*Demo, error) - // mc: -key=demoKey - CacheString(c context.Context, key int64) (string, error) - - // mc: -key=demoKey -expire=d.demoExpire -encode=json - AddCacheDemos(c context.Context, values map[int64]*Demo) error - // mc: -key=demo2Key -expire=d.demoExpire -encode=json - AddCacheDemos2(c context.Context, values map[int64]*Demo, tp int64) error - // 这里也支持自定义注释 会替换默认的注释 - // mc: -key=demoKey -expire=d.demoExpire -encode=json|gzip - AddCacheDemo(c context.Context, key int64, value *Demo) error - // mc: -key=keyMid -expire=d.demoExpire -encode=gob - AddCacheDemo1(c context.Context, key int64, value *Demo, mid int64) error - // mc: -key=noneKey - AddCacheNone(c context.Context, value *Demo) error - // mc: -key=demoKey -expire=d.demoExpire - AddCacheString(c context.Context, key int64, value string) error - - // mc: -key=demoKey - DelCacheDemos(c context.Context, keys []int64) error - // mc: -key=demoKey - DelCacheDemo(c context.Context, key int64) error - // mc: -key=keyMid - DelCacheDemo1(c context.Context, key int64, mid int64) error - // mc: -key=noneKey - DelCacheNone(c context.Context) error - } -*/ - -package testdata - -import ( - "context" - "fmt" - - "github.com/go-kratos/kratos/pkg/cache/memcache" - "github.com/go-kratos/kratos/pkg/log" -) - -var ( - _ _mc -) - -// CacheDemos get data from mc -func (d *Dao) CacheDemos(c context.Context, ids []int64) (res map[int64]*Demo, err error) { - l := len(ids) - if l == 0 { - return - } - keysMap := make(map[string]int64, l) - keys := make([]string, 0, l) - for _, id := range ids { - key := demoKey(id) - keysMap[key] = id - keys = append(keys, key) - } - replies, err := d.mc.GetMulti(c, keys) - if err != nil { - log.Errorv(c, log.KV("CacheDemos", fmt.Sprintf("%+v", err)), log.KV("keys", keys)) - return - } - for _, key := range replies.Keys() { - v := &Demo{} - err = replies.Scan(key, v) - if err != nil { - log.Errorv(c, log.KV("CacheDemos", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - if res == nil { - res = make(map[int64]*Demo, len(keys)) - } - res[keysMap[key]] = v - } - return -} - -// CacheDemo get data from mc -func (d *Dao) CacheDemo(c context.Context, id int64) (res *Demo, err error) { - key := demoKey(id) - res = &Demo{} - if err = d.mc.Get(c, key).Scan(res); err != nil { - res = nil - if err == memcache.ErrNotFound { - err = nil - } - } - if err != nil { - log.Errorv(c, log.KV("CacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// CacheDemo1 get data from mc -func (d *Dao) CacheDemo1(c context.Context, id int64, mid int64) (res *Demo, err error) { - key := keyMid(id, mid) - res = &Demo{} - if err = d.mc.Get(c, key).Scan(res); err != nil { - res = nil - if err == memcache.ErrNotFound { - err = nil - } - } - if err != nil { - log.Errorv(c, log.KV("CacheDemo1", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// CacheNone get data from mc -func (d *Dao) CacheNone(c context.Context) (res *Demo, err error) { - key := noneKey() - res = &Demo{} - if err = d.mc.Get(c, key).Scan(res); err != nil { - res = nil - if err == memcache.ErrNotFound { - err = nil - return - } - } - if err != nil { - log.Errorv(c, log.KV("CacheNone", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// CacheString get data from mc -func (d *Dao) CacheString(c context.Context, id int64) (res string, err error) { - key := demoKey(id) - err = d.mc.Get(c, key).Scan(&res) - if err != nil { - if err == memcache.ErrNotFound { - err = nil - return - } - log.Errorv(c, log.KV("CacheString", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// AddCacheDemos Set data to mc -func (d *Dao) AddCacheDemos(c context.Context, values map[int64]*Demo) (err error) { - if len(values) == 0 { - return - } - for id, val := range values { - key := demoKey(id) - item := &memcache.Item{Key: key, Object: val, Expiration: d.demoExpire, Flags: memcache.FlagJSON} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("AddCacheDemos", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - } - return -} - -// AddCacheDemos2 Set data to mc -func (d *Dao) AddCacheDemos2(c context.Context, values map[int64]*Demo, tp int64) (err error) { - if len(values) == 0 { - return - } - for id, val := range values { - key := demo2Key(id, tp) - item := &memcache.Item{Key: key, Object: val, Expiration: d.demoExpire, Flags: memcache.FlagJSON} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("AddCacheDemos2", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - } - return -} - -// AddCacheDemo 这里也支持自定义注释 会替换默认的注释 -func (d *Dao) AddCacheDemo(c context.Context, id int64, val *Demo) (err error) { - if val == nil { - return - } - key := demoKey(id) - item := &memcache.Item{Key: key, Object: val, Expiration: d.demoExpire, Flags: memcache.FlagJSON | memcache.FlagGzip} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("AddCacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// AddCacheDemo1 Set data to mc -func (d *Dao) AddCacheDemo1(c context.Context, id int64, val *Demo, mid int64) (err error) { - if val == nil { - return - } - key := keyMid(id, mid) - item := &memcache.Item{Key: key, Object: val, Expiration: d.demoExpire, Flags: memcache.FlagGOB} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("AddCacheDemo1", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// AddCacheNone Set data to mc -func (d *Dao) AddCacheNone(c context.Context, val *Demo) (err error) { - if val == nil { - return - } - key := noneKey() - item := &memcache.Item{Key: key, Object: val, Expiration: d.demoExpire, Flags: memcache.FlagJSON} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("AddCacheNone", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// AddCacheString Set data to mc -func (d *Dao) AddCacheString(c context.Context, id int64, val string) (err error) { - if len(val) == 0 { - return - } - key := demoKey(id) - bs := []byte(val) - item := &memcache.Item{Key: key, Value: bs, Expiration: d.demoExpire, Flags: memcache.FlagRAW} - if err = d.mc.Set(c, item); err != nil { - log.Errorv(c, log.KV("AddCacheString", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// DelCacheDemos delete data from mc -func (d *Dao) DelCacheDemos(c context.Context, ids []int64) (err error) { - if len(ids) == 0 { - return - } - for _, id := range ids { - key := demoKey(id) - if err = d.mc.Delete(c, key); err != nil { - if err == memcache.ErrNotFound { - err = nil - continue - } - log.Errorv(c, log.KV("DelCacheDemos", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - } - return -} - -// DelCacheDemo delete data from mc -func (d *Dao) DelCacheDemo(c context.Context, id int64) (err error) { - key := demoKey(id) - if err = d.mc.Delete(c, key); err != nil { - if err == memcache.ErrNotFound { - err = nil - return - } - log.Errorv(c, log.KV("DelCacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// DelCacheDemo1 delete data from mc -func (d *Dao) DelCacheDemo1(c context.Context, id int64, mid int64) (err error) { - key := keyMid(id, mid) - if err = d.mc.Delete(c, key); err != nil { - if err == memcache.ErrNotFound { - err = nil - return - } - log.Errorv(c, log.KV("DelCacheDemo1", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} - -// DelCacheNone delete data from mc -func (d *Dao) DelCacheNone(c context.Context) (err error) { - key := noneKey() - if err = d.mc.Delete(c, key); err != nil { - if err == memcache.ErrNotFound { - err = nil - return - } - log.Errorv(c, log.KV("DelCacheNone", fmt.Sprintf("%+v", err)), log.KV("key", key)) - return - } - return -} diff --git a/tool/kratos-gen-mc/testdata/model.pb.go b/tool/kratos-gen-mc/testdata/model.pb.go deleted file mode 100644 index fdf2ccfff..000000000 --- a/tool/kratos-gen-mc/testdata/model.pb.go +++ /dev/null @@ -1,328 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: model.proto - -/* - Package model is a generated protocol buffer package. - - It is generated from these files: - model.proto - - It has these top-level messages: - Demo -*/ -package testdata - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Demo struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"id"` - Title string `protobuf:"bytes,3,opt,name=Title,proto3" json:"title"` -} - -func (m *Demo) Reset() { *m = Demo{} } -func (m *Demo) String() string { return proto.CompactTextString(m) } -func (*Demo) ProtoMessage() {} -func (*Demo) Descriptor() ([]byte, []int) { return fileDescriptorModel, []int{0} } - -func init() { - proto.RegisterType((*Demo)(nil), "model.Demo") -} -func (m *Demo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Demo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintModel(dAtA, i, uint64(m.ID)) - } - if len(m.Title) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintModel(dAtA, i, uint64(len(m.Title))) - i += copy(dAtA[i:], m.Title) - } - return i, nil -} - -func encodeVarintModel(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Demo) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovModel(uint64(m.ID)) - } - l = len(m.Title) - if l > 0 { - n += 1 + l + sovModel(uint64(l)) - } - return n -} - -func sovModel(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozModel(x uint64) (n int) { - return sovModel(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Demo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowModel - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Demo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Demo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowModel - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowModel - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthModel - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Title = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipModel(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthModel - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipModel(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowModel - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowModel - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowModel - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthModel - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowModel - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipModel(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthModel = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowModel = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("model.proto", fileDescriptorModel) } - -var fileDescriptorModel = []byte{ - // 166 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0xcd, 0x4f, 0x49, - 0xcd, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x73, 0xa4, 0x74, 0xd3, 0x33, 0x4b, - 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0xb2, 0x49, - 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0x74, 0x29, 0x39, 0x71, 0xb1, 0x3b, 0x16, 0x95, - 0x64, 0x26, 0xe7, 0xa4, 0x0a, 0x89, 0x71, 0x31, 0x79, 0xba, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, - 0x3b, 0xb1, 0xbd, 0xba, 0x27, 0xcf, 0x94, 0x99, 0x12, 0xc4, 0xe4, 0xe9, 0x22, 0x24, 0xcf, 0xc5, - 0x1a, 0x92, 0x59, 0x92, 0x93, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0xe9, 0xc4, 0xf9, 0xea, 0x9e, - 0x3c, 0x6b, 0x09, 0x48, 0x20, 0x08, 0x22, 0xee, 0x24, 0x71, 0xe2, 0xa1, 0x1c, 0xc3, 0x85, 0x87, - 0x72, 0x0c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, - 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0x60, 0x4b, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x11, 0xa6, - 0xfa, 0x1c, 0xa9, 0x00, 0x00, 0x00, -} diff --git a/tool/kratos-gen-mc/testdata/model.proto b/tool/kratos-gen-mc/testdata/model.proto deleted file mode 100644 index fdc58a5cc..000000000 --- a/tool/kratos-gen-mc/testdata/model.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; -package testdata; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.goproto_enum_prefix_all) = false; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; - -message Demo { - int64 ID = 1 [(gogoproto.jsontag) = "id"]; - string Title = 3 [(gogoproto.jsontag) = "title"]; -} \ No newline at end of file diff --git a/tool/kratos-gen-project/main-packr.go b/tool/kratos-gen-project/main-packr.go deleted file mode 100644 index 6766d9531..000000000 --- a/tool/kratos-gen-project/main-packr.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !skippackr -// Code generated by github.com/gobuffalo/packr/v2. DO NOT EDIT. - -// You can use the "packr clean" command to clean up this, -// and any other packr generated files. -package main - -import _ "github.com/go-kratos/kratos/tool/kratos-gen-project/packrd" diff --git a/tool/kratos-gen-project/main.go b/tool/kratos-gen-project/main.go deleted file mode 100644 index 10686c885..000000000 --- a/tool/kratos-gen-project/main.go +++ /dev/null @@ -1,75 +0,0 @@ -package main - -import ( - "os" - "strings" - - "github.com/urfave/cli/v2" -) - -var appHelpTemplate = `{{if .Usage}}{{.Usage}}{{end}} - -USAGE: - kratos new {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} - -VERSION: - {{.Version}}{{end}}{{end}}{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if len .Authors}} - -AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: - {{range $index, $author := .Authors}}{{if $index}} - {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} - -OPTIONS: - {{range $index, $option := .VisibleFlags}}{{if $index}} - {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} - -COPYRIGHT: - {{.Copyright}}{{end}} -` - -func main() { - app := cli.NewApp() - app.Name = "" - app.Usage = "kratos 新项目创建工具" - app.UsageText = "项目名 [options]" - app.HideVersion = true - app.CustomAppHelpTemplate = appHelpTemplate - app.Flags = []cli.Flag{ - &cli.StringFlag{ - Name: "d", - Value: "", - Usage: "指定项目所在目录", - Destination: &p.path, - }, - &cli.BoolFlag{ - Name: "http", - Usage: "只使用http 不使用grpc", - Destination: &p.onlyHTTP, - }, - &cli.BoolFlag{ - Name: "grpc", - Usage: "只使用grpc 不使用http", - Destination: &p.onlyGRPC, - }, - &cli.BoolFlag{ - Name: "proto", - Usage: "废弃参数 无作用", - Destination: &p.none, - Hidden: true, - }, - } - if len(os.Args) < 2 || strings.HasPrefix(os.Args[1], "-") { - app.Run([]string{"-h"}) - return - } - p.Name = os.Args[1] - app.Action = runNew - args := append([]string{os.Args[0]}, os.Args[2:]...) - err := app.Run(args) - if err != nil { - panic(err) - } -} diff --git a/tool/kratos-gen-project/new.go b/tool/kratos-gen-project/new.go deleted file mode 100644 index c62349866..000000000 --- a/tool/kratos-gen-project/new.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - common "github.com/go-kratos/kratos/tool/pkg" - - "github.com/urfave/cli/v2" -) - -func runNew(ctx *cli.Context) (err error) { - if p.onlyGRPC && p.onlyHTTP { - p.onlyGRPC = false - p.onlyHTTP = false - } - if p.path != "" { - if p.path, err = filepath.Abs(p.path); err != nil { - return - } - p.path = filepath.Join(p.path, p.Name) - } else { - pwd, _ := os.Getwd() - p.path = filepath.Join(pwd, p.Name) - } - p.ModPrefix = strings.ReplaceAll(modPath(p.path), "\\", "/") - // creata a project - if err := create(); err != nil { - return err - } - fmt.Printf("Project: %s\n", p.Name) - fmt.Printf("OnlyGRPC: %t\n", p.onlyGRPC) - fmt.Printf("OnlyHTTP: %t\n", p.onlyHTTP) - fmt.Printf("Directory: %s\n\n", p.path) - fmt.Println("项目创建成功.") - return nil -} - -func modPath(p string) string { - dir := filepath.Dir(p) - for { - if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { - content, _ := ioutil.ReadFile(filepath.Join(dir, "go.mod")) - mod := common.RegexpReplace(`module\s+(?P[\S]+)`, string(content), "$name") - name := strings.TrimPrefix(filepath.Dir(p), dir) - name = strings.TrimPrefix(name, string(os.PathSeparator)) - if name == "" { - return fmt.Sprintf("%s/", mod) - } - return fmt.Sprintf("%s/%s/", mod, name) - } - parent := filepath.Dir(dir) - if dir == parent { - return "" - } - dir = parent - } -} diff --git a/tool/kratos-gen-project/packrd/packed-packr.go b/tool/kratos-gen-project/packrd/packed-packr.go deleted file mode 100644 index 4ab920e93..000000000 --- a/tool/kratos-gen-project/packrd/packed-packr.go +++ /dev/null @@ -1,230 +0,0 @@ -// +build !skippackr -// Code generated by github.com/gobuffalo/packr/v2. DO NOT EDIT. - -// You can use the "packr2 clean" command to clean up this, -// and any other packr generated files. -package packrd - -import ( - "github.com/gobuffalo/packr/v2" - "github.com/gobuffalo/packr/v2/file/resolver" -) - -var _ = func() error { - const gk = "23f256f8ae67626d12a89a2afb9fd55c" - g := packr.New(gk, "") - hgr, err := resolver.NewHexGzip(map[string]string{ - "01947c6dd922af83263605f0243198da": "1f8b08000000000000ff8c51cb8e1331103cdb5fd1cc616547c1be202e2807362b7121e1fd013d7ead158f9d783a246834ff8e1c061471401cac96dc5d55dd554734070c0e2c16cee3702c954070d69992c95da9e39c75d3a476c57eaccec7eb3c4f93dae3e0e659c74cae664c7a28d6a58eb32e447a3ef7ca944187f2f25091caa897723c046d4af6fa88096dccff336f91b0c7d1e9f1943a2e39f7e76c60ef2e4f8f4282b03dacc653524f8f6b301e5a53c835b85adb2b55c2c4d977aced22667c8036bc2dd9c7d03e089655d4d70fbbf79c49cea2bfa1377f3aef1c89cef68aca903aa9bee501ebf88c493c18926f6ec32f3690636a52ac3a3ad7ccd97cc764e817c9364597e99ea3a98a07e3c33f896c0f9bdbe27b77d9fdf8f2698170663c6c969b61b2bddaa6323a2167fe1b3d2f7e090b2b8b45c267bcbcad144d72c2d015968c9b23adae215a88995ebf9220b012ac6eb9aa05f2b7af5a4370042d22f0b50c60fb3be59f010000ffff90ef4f8158020000", - "07b6987724e01017288e376e48d717a4": "1f8b08000000000000ff2a484cce4e4c4f55c8cd4f49cde1e2d2d757f02e4a2cc92f56c848cdc9c957c80673f4b84a2a0b526132c52545a5c9250ad55c9c1e6035c525459979e95cb55c10558e452599c939a948ca3c5d1432f34acc4cb8389df3f34a52f34a605a381d4b4b32f28be02600020000ffffcb8696638d000000", - "0800ba3c38cf5e9bf163d71215211839": "1f8b08000000000000ff94924d6fdb300c86cfd2af200c2c90324fbe17c865ed801dda2c6856ecacc8b423d49654594e1a18feef83fcd1a6cbba8f8325987a45be8f4827d5a32c111af407ad90525d3beb03304a12654dc0e790509214754828256e0749d7893b9b6f3c16fab9efbb4eac658d7d9f49a7a3f2bd636d027a23ab2c9736ea4a1df6ed4e285b67a5fdf4e865b04d366deeb1cc943545e66425736d62e9b7172a69cacc791becae2d32174e0e9b0c6b174e17b96d596176d41e13ca293d480f1b6f0f3a470f2b8871b1c6e316035be3311d039fb5c999c123733b7183b5dda23fa0e729c4d8723b3e15e79cd22c83e9777e4141a399d768f0ad0ad0512215c072021277d251924b0bb9b4e2465ada0fb9d6788c3540ced9409a1c3c86d61b418bd6a82861f97c8d036b603694822a206a184f01bd8f9ff53cd66e60058b49d65142a4ba0258cc66be7fbbbbedfa9492e8e80af294929e1255c00a1a715dd9062989f95630dff82183dab3443a57692583b646045b57490a8d908a53325a9ea8b6f2f415abca42e99d821c6b3bd89c80ce08f88b94a9f00cd3fc89eb714fc1e3132cdd4e0c927b7ce2c03cbaea04cba1f5e24b5c7f651f15aba17767324e495107b1f1da848225fbc1e0872619aa0c43fb1ec6c3fd2decea7fe378b8bffd5f94d760e3de63599c8b624387bc265cc10492c0c7178eb19d97b0adaffe0cbcd1a6041797b047f0d8d8d6c701bfc48dcadf73e29bde706017ad7a418b95617176dcf5719ee2a8cff9f9e46c184a50c3fa376f8396c51a3dfd190000ffff35636a08ee040000", - "09569e3bf810225e525835d677053b1b": "1f8b08000000000000ff5256702c2dc9c82fe25256084a2dcb4c2d4f2de202040000ffffbe75c21514000000", - "0a6bc3e72bc7017e18eb65cbe3145be8": "1f8b08000000000000ff9490b16eeb300c45e7f02b084ff67b86b4774bb70e0d32f407149991d9d8a240d34901c3ff5e38e9d031dd84ab7301dee33dfe3fcd3c74786325ce9f140dbcc78f9ef0915b4838860b4d38cd4a683de164f30979c22c76870c39df3fce9cc3f0e8398012e22524c28e01782ca28635ecaa6571efd21d95cefcb5aecbe20e61a475f59c8d3487c17741aa67b889f4ca919e66497dd212ffc2f766a502d85589ad9f4f2ecae893481ac86fbe2a6800bc4ff29228930623bc683099d0ee3ee13ce7886f996d5f4add60fd6f5f4a8b5b5a372d92aa68830bec4ac81cebade25e377b7517c41d55aedc91b6f8b3f457b21de60e746b719bf4781de8b62fa5696085ef000000ffffe36b7bafd7010000", - "0a985fe02d21f9d997ee7c01e1f60f82": "1f8b08000000000000ff9452c18adb30103d6bbe626a285890b5e935740fa5b4dd1c36a44d722e5a79ac0c912523c97417e37f2f52c2b2813db4175b9a79cfefcd1b8f4a9f95211c143b001e461f12d620aade2a5381a87cbc3cdbc8c6299b2ff1256a65cb31f1401580a8e6b979f4dd2e50cfcfcb32cfcd560db42c2dbb44c129db769cf186d3697a6ab41f5ae3efce41251fdbeb6b3c9b567bd7b7a3b2aa63f72f78eb4d0512a09f9c2e33d4126710d97cb35321522d41586f9a8de3543bb612db163b7a9a0c66d01a73b3e3703f8f2a9d16101df5144af5abf56ff9bdafabd7b93026155225415ccd5e042488df2bd499f87d727a851402aeefb1e3d2ff328e19c27da97fb847c736db15a372ac6b0a41825840e8cc19d4996a7d520e7d6cf625fc157e92202e8b68b63e71ff52eb155ef7d1ec373f1e8ebb9bfbcfe3e67053387cfbf57853d86c0f1244ef433112b3f2e73b0de2cdd486122abcc8e2c758ad3036fb14d8995aca4cfac3499f30962f6815e9bf1dac4108f11a5bce48bc9b3a3d73095d88fce3357b4b34d6972369efbad20a94a6e0deb1f270dc65a18e7a35d9b4bec12e39f805fe060000ffff29d8de170f030000", - "0babbb2bdef8b27731b8c6e569f74aa9": "1f8b08000000000000ff5256702c2dc9c82fe25256084a2dcb4c2d4f2de202040000ffffbe75c21514000000", - "0c5ba5fa8837f2def63c05cc79c53394": "1f8b08000000000000ff248eb16eeb300c45e7f02b2e3249ef05d6de31f5da0e457e80b61959b52d1a120d0f45ffbdb03391e0bd073c21e07fb7a579c09e8aa4fc2dbd5108788c82d7dd3862e1492aea5604360aaa6d1d5245563b4b8694cfe09932cf2fae215ab99f380a0656a2b4ac5a0c8e2ed7986cdcbaa6d72544d5384b387e5fc9138510f52d4a96c226980a9b5698ea7cead173cb3db2ec0fa9d6b23a0ff76f60bde1089cbf414ad1e2f143979573eadd4135f7c3c765d9dba3fa297b7b3fc7970ca99edbc7bbf7f44b7f010000ffff3c96d9e80d010000", - "17e45e0f3497be1d7b615abf452cafa3": "1f8b08000000000000ff4ccc41cac2400c05e0f5e414610e5092fefc28822bafe04e5c0c4d16814ea74ca35e5fa22ebacbfb1e2fb7cb6cbaf81dd252aae219b3686d19d2da9bb7c83ead195211e991783c0c34d0c027e691399ac9ed19cb7f826432c7c90449accc57abda1efe1912d52d43ea5a64c7e38f5fdd5c77fef77544c4f8b96b8eb46578070000ffff6f600d93b7000000", - "1a41024e60169dfd892e57d36264a7e3": "1f8b08000000000000ff2a484cce4e4c4f55c8cd4f49cde1e2d2d757f02e4a2cc92f56c848cdc9c957c80673f4b84a2a0b526132c52545a5c9250ad55c9c1e6035c525459979e95cb55c10558e452599c939a948ca3c5d1432f34acc4cb8389df3f34a52f34a605a381d4b4b32f28be02600020000ffffcb8696638d000000", - "1dc54a258d1b22873bd8a065ef5f5136": "1f8b08000000000000ff4ccc41ca02310c05e075738ad2030c497f7e14c1955770272eca248bc0743a74a25e5fa22ebacbfb1e2fb7cba2b2da1dc25aaac4734c2cb525085b6fd63cdbbc250885b97ba27c9870c2894e99329137b3e9d397ff084179f19310026b59ae5aa53dec3344ac7b82d0a5f0c0f9c7afae2683ff7d3dc618fde7d01c714ff00e0000ffff7dc54fc8b7000000", - "21390808875e3972b5fb30ef533f7595": "1f8b08000000000000ff8a0e4e2d2a4b2d8ae552505050484c492952b0555032d003432b0b03030325b04c49666e6a7e690948d2b058890b100000ffffb7699cdc36000000", - "240e2e99283e50af1f153a929e3d1116": "1f8b08000000000000ff64cdc14ec3300cc6f1337e0a1fe150274e18092fc13b646d3013cd3cb264429af6ee889603d54e3ef8f7d75774ea73c6eb95de52c9b71b802832b107a8f9ab1f6ac64740449443fbe87b1ab518d1e1b3a6a667b31e2ce9dc72bd67a2e654b5e9bebfe385c911ff9a878d99d35136ca935bd5f222ad62becd3137bc58b2640767f9d5beb8c871e79f77c394d88731c4c053f8eb54e64cff72a9a771d98fc4f004f0130000ffff1f9ac7b6f4000000", - "28edc15b141434022bafc70e865e28c3": "1f8b08000000000000ff8c51cb8e1331103cdb5fd1cc616547c1be202e2807362b7121e1fd013d7ead158f9d783a246834ff8e1c061471401cac96dc5d55dd554734070c0e2c16cee3702c954070d69992c95da9e39c75d3a476c57eaccec7eb3c4f93dae3e0e659c74cae664c7a28d6a58eb32e447a3ef7ca944187f2f25091caa897723c046d4af6fa88096dccff336f91b0c7d1e9f1943a2e39f7e76c60ef2e4f8f4282b03dacc653524f8f6b301e5a53c835b85adb2b55c2c4d977aced22667c8036bc2dd9c7d03e089655d4d70fbbf79c49cea2bfa1377f3aef1c89cef68aca903aa9bee501ebf88c493c18926f6ec32f3690636a52ac3a3ad7ccd97cc764e817c9364597e99ea3a98a07e3c33f896c0f9bdbe27b77d9fdf8f2698170663c6c969b61b2bddaa6323a2167fe1b3d2f7e090b2b8b45c267bcbcad144d72c2d015968c9b23adae215a88995ebf9220b012ac6eb9aa05f2b7af5a4370042d22f0b50c60fb3be59f010000ffff90ef4f8158020000", - "2b96476f98cc5a0058f5fdd12ab432e1": "1f8b08000000000000ff5cccc10ac2300cc6f173f214230f505a058782275fc19b78084b0e81761d5dd4d7970ac2d8f1ff83ef7bdcb2e9ec4f84998b0ed781444b2584a555afbd7d5a0881455aaf7418430c31a4cbe9389e09c124f7598a083cb9bdff21c6f96e45ebcb7fbb95109ab2ecedd3cc758ffd746b7125fc060000ffffe2756aadaa000000", - "2bec6c6ed25a4c6dcdb30f32376e8814": "1f8b08000000000000ff94924d6fdb300c86cfd2af200c2c90324fbe17c865ed801dda2c6856ecacc8b423d49654594e1a18feef83fcd1a6cbba8f8325987a45be8f4827d5a32c111af407ad90525d3beb03304a12654dc0e790509214754828256e0749d7893b9b6f3c16fab9efbb4eac658d7d9f49a7a3f2bd636d027a23ab2c9736ea4a1df6ed4e285b67a5fdf4e865b04d366deeb1cc943545e66425736d62e9b7172a69cacc791becae2d32174e0e9b0c6b174e17b96d596176d41e13ca293d480f1b6f0f3a470f2b8871b1c6e316035be3311d039fb5c999c123733b7183b5dda23fa0e729c4d8723b3e15e79cd22c83e9777e4141a399d768f0ad0ad0512215c072021277d251924b0bb9b4e2465ada0fb9d6788c3540ced9409a1c3c86d61b418bd6a82861f97c8d036b603694822a206a184f01bd8f9ff53cd66e60058b49d65142a4ba0258cc66be7fbbbbedfa9492e8e80af294929e1255c00a1a715dd9062989f95630dff82183dab3443a57692583b646045b57490a8d908a53325a9ea8b6f2f415abca42e99d821c6b3bd89c80ce08f88b94a9f00cd3fc89eb714fc1e3132cdd4e0c927b7ce2c03cbaea04cba1f5e24b5c7f651f15aba17767324e495107b1f1da848225fbc1e0872619aa0c43fb1ec6c3fd2decea7fe378b8bffd5f94d760e3de63599c8b624387bc265cc10492c0c7178eb19d97b0adaffe0cbcd1a6041797b047f0d8d8d6c701bfc48dcadf73e29bde706017ad7a418b95617176dcf5719ee2a8cff9f9e46c184a50c3fa376f8396c51a3dfd190000ffff35636a08ee040000", - "2c5e0d8471a941cbc1b0532422997425": "1f8b08000000000000ff8c90316ec3300c45e7f0148427bb35a4bd5bba756890a117506c4666638b024527050cdfbd70d2a163d6c7f781cfef3dbe9e661e7bbcb112a76fea0cbcc7af81f0c12d449cc2850a9659096d202c369f900b26b1bb64c8e97e38730ae323e70072e82e2112f60cc0531635ac61572d8bfb94fea874e69f755d16770813adabe764a4298cbe0f523de315d22b77f4b44bea07b35c01ecaac836cc27d7c9e4a3481cc96fff57d000781fe52d52220d4678d16052d0eefbc0794e1d7e24b67dce7583f5cb3ee716375a372d92aa68830bec7248dcd55bc4bd6f6bd47d107754b9724fdae25ff37f642be60e746bf140b77dce4d03eb6f000000ffff14b8957d9c010000", - "2d07c6785bf36355d22a324ee7064328": "1f8b08000000000000ffbc91bd4a04311485ebe429428a416118333fa82c04052dc546bbc5229b5cd9c0fc7973e32ac3bcbb44071dabadb4ca395fc2e1836c6f5a0f3d3d71669c43a1852cab8b4215aa283775adce25672ef4894f3100ce9b6934211c0674f335d9f1e4f7ebd3b3c919323b1360be22dfc1104997214330eef1a71ed013acfa68307c764d18216b07abef066bdacceed30de948cf97ddaec9d329394b73b70ff7428bed11adeaffb4f2232af59fabe48b8f4cbf69c9bf82d0a2529c79d7a6582e71d9155a367bc9d94b047cff6642564a754172066f6057b85e30a1e957b8f9c21f010000ffff57d7944148020000", - "2e6074098bbcae09bb1bbbaf5c9c65d7": "1f8b08000000000000ff8c90416bdc301085cf9a5f31ab931436f2bdc187a609edc2064a937b50e4b157d8968c24275bc2fef7328b5a96b69482f19899379fe7bdc5bad10e849d8d007e5e622aa840481743a1639120643fd9816bccfc2e948b0f83041072f0e5b0be1817e76688d763b225e6a696651c1a1743df2c76b29d0ff23ff495dd4cde1d246880579bb0c32b3e8e3f5d39628bf534736bdd38a4b8864e69807e0d0e9f289707eb839af1aac2cc83c677106cc23c5251ecac975b94c634c69c7f29f5e5fcb761d3453752ba76715e6226f3ddced3af85af3665521a44e7b37d9968efdd013fb418b3f94c85c2ab9277bbc78fb7fbfbe7fdeed317a971d3a294207c8f9bcb9d7710dca394789dfdf331eba2f4cdb9b96931f8e92c138b0dde294a49831027e087c36119a51413fc24b558b337bbe0cb5f5097a44a713d7292ecc9f7d86dd1f5db0a0bf4c6f9ded9a8f4cdbf49890afb98cdb73530caf515f887e967ac769fc8a62ebe9de52710319bfba32f2a51d170821f010000ffffa3cee358a7020000", - "2f0ce61d7311ba883ba12633891e37a5": "1f8b08000000000000ff549131aed4301086ebcc29064b4809cadafd4a5b2c9be6356f57c0058c7762ac4d3c96336141e815749414f420710cae8378d74089173d515933feff6fc6bf937517eb096d0a10c6c459b0864a398e421f4441a5fa511440a57c9077f35bed78349e37976c8527733bd2c59b48627272e66af39962b130fb81b4e7c146af397be373720a1a0063709fd25d871a1cc7496ed50ed59b6377dce2beeb707f3ae15da756f13d5d0f43a02818e98a0b06dd5a433f47f7745dbbdee38bb2832ead03c73ef81639c9845aebc5acbb60876392c0b1c1baa3918bb645ca9973839fa02a7cdceef086fb6f48e169ad1ba89c5b7d8bb498567c7d0b51bfb4eee233cff15c372df6a3e8d72987287dadce6172fc9ef2c7ad3167eaed3c88793ea9b6c4d13450857e453fdb610cc3b2569549e61c97729d0ad503fcebddd3f5e92db5734dbbc8e0618df0f1dbf73f5fbea27f753ae0ef5f3f1f7f7c06633c6f3d45ca5608cb67a2300f98320b3bdc6cd6a86d0a7aedc0df000000ffff6187772e30020000", - "344d3e90de055e3f730ec93c8d518442": "1f8b08000000000000ffa492516bdb3010c7dff3290ebde42572e384259b200f657d5cc9d614c6564250e55b7cd4d279d2c550d8871f72dd242bdda0ece5eefcf3dfe7fbeb04d0614cc4c1809a174b350248183b7298cc0800a0ba7fca00e4ed1e0df8c7f4b331ef8ac5805b8e929e351acaf97cba30390c0843479183c72027d5f5b7cd974fbb9bf5fa76f7f972b3f9babeb95a4566390a6ebfaf2e13d98b4d6dc3beb634bce8b839783cfb5d612e2a760f183506898f2d53104d81a4ba2faa41e5d87b1b2a03770300186bed6a1bad138c3aa1e86c1ae3ea203fde8f277fc8b869ac108773c9ee10c871853b47e341bc1d728db691dad5e81eccb18f60120377eae3f5959a80ea8fd0569e8282896a29ec33d5ba8d2cecb859896bd5f6f43579e48318984dd31152108c9d6d0c942718512261caca11c0e80955945eacb067afadaf5fe062befc60727883a3bea3760da9c1cff69541cfc73f7afaf7f41ebdb3aec6eae5257ce67f73312b676569ca1cdf60035dcd3927b19272f14b4d005470b92e67cb625a4c8bb27fc89dffdbe5ef000000ffff82fe973e7e030000", - "34564973d460772fafff3026df19a783": "1f8b08000000000000ff8a0e4e2d2a4b2d8ae552505050484c492952b0555032d003432b4b03030325b04c49666e6a7e690948d2b058890b100000ffffb9f9177936000000", - "360acf295547cde8b9a14a8d71f86bde": "1f8b08000000000000ff8a0e4e2d2a4b2d8ae552505050484c492952b0555032d003432b0b03030325b04c49666e6a7e690948d2b058890b100000ffffb7699cdc36000000", - "3deb9e59e02e8c5bc6ba59a66141508f": "1f8b08000000000000ff8a0e4e2d2a4b2d8ae552505050484c492952b0555032d003432b0b03030325b04c49666e6a7e690948d2b058890b100000ffffb7699cdc36000000", - "3fcb8456a23b643f9cd2b5df95e3e497": "1f8b08000000000000ffe2525608c9c82c56c82c56485408f1f7f55148c94f2ecd4dcd2bd15370cacfcfade34a49cdcd77ad28c82c4a55b055503232c950e202040000ffffc36ed62935000000", - "450e380f3fbd9366ee660538f5f4eb47": "1f8b08000000000000ffe2525608c9c82c56c82c56485408f1f7f55148c94f2ecd4dcd2bd15370cacfcfade34a49cdcd77ad28c82c4a55b055503232c950e202040000ffffc36ed62935000000", - "45b4b7a3d09ea9195265478d55919d04": "1f8b08000000000000ff5256567049cdcde7e2525656562833d433d033e032d45378b2a3ebf9aefd4fbbe6bf68de5b5151c105080000ffff79fbc41327000000", - "47c5a1821e7054758d52c8004a7033de": "1f8b08000000000000ff9c90314bc3401cc5f77c8aff96045c0ae2521c62724a30b994f46ee8943bd3030f1b2bc9298e6eba281d8a1d74d04941280e226a07bf4c9bda6f21676b23bab9dc71efeefddebd7f9a0bae04b4b9e23bbc10b09773d52d92b6c8ba75e3f0b760b831720802e26c040818cf954c3ba2606019004cb619c87d65d56a3650dcf4b730f2004704300d02702889121fbb310a1126e046e1d76e8e5f46b3fed0f7cc15cd5052750483239ea7bb3cb7d656ed8ab0b44c7ae7d3bbc7b9215332130cf45a289e1d54cf3db4e9d080804be3186192103f444de2840d8830d086a78bfcbd5b8694d72793dec5f87d58f65fcbc1f36cf0340f4cff1158fdfcec6a327afb816bc47ee8c42dd8462db0f4046dadea1393c7c9a29cb568691bf6376add2c2f4fa70f371fb7f766ddf80c0000ffff632acdfbc6010000", - "47f7fee79587764108735a0e37a3f1cb": "1f8b08000000000000ff9452c18adb30103d6bbe626a285890b5e935740fa5b4dd1c36a44d722e5a79ac0c912523c97417e37f2f52c2b2813db4175b9a79cfefcd1b8f4a9f95211c143b001e461f12d620aade2a5381a87cbc3cdbc8c6299b2ff1256a65cb31f1401580a8e6b979f4dd2e50cfcfcb32cfcd560db42c2dbb44c129db769cf186d3697a6ab41f5ae3efce41251fdbeb6b3c9b567bd7b7a3b2aa63f72f78eb4d0512a09f9c2e33d4126710d97cb35321522d41586f9a8de3543bb612db163b7a9a0c66d01a73b3e3703f8f2a9d16101df5144af5abf56ff9bdafabd7b93026155225415ccd5e042488df2bd499f87d727a851402aeefb1e3d2ff328e19c27da97fb847c736db15a372ac6b0a41825840e8cc19d4996a7d520e7d6cf625fc157e92202e8b68b63e71ff52eb155ef7d1ec373f1e8ebb9bfbcfe3e67053387cfbf57853d86c0f1244ef433112b3f2e73b0de2cdd486122abcc8e2c758ad3036fb14d8995aca4cfac3499f30962f6815e9bf1dac4108f11a5bce48bc9b3a3d73095d88fce3357b4b34d6972369efbad20a94a6e0deb1f270dc65a18e7a35d9b4bec12e39f805fe060000ffff29d8de170f030000", - "4bafcbfe7d0faa99ecff3bd27c22d83f": "1f8b08000000000000ff5256567049cdcde7e2525656562833d433d033e032d45378b2a3ebf9aefd4fbbe6bf68de5b5151c105080000ffff79fbc41327000000", - "4f6064f875a8245d8fccaa9a7fa8b27c": "1f8b08000000000000ff5492b18ed4301086ebcc530c96901294d8fd4a5b2c9be69adb15f0023eaf63ac4d3c96336141e80a3a4a0a7a90780c5e0771af81622f5a5d15cde4ffbf19ff76d4e6ac9d451d3df8295262aca1128602db8f2ca012c3c402a012cef3fbe5411a9a94a3ee9c34d3acae9f78762a5856291a75d1e96443b110b9d14a47a30e4e5272caa5680434004ae12ec6bb1e25180a335fab2d8a7787feb0c15ddfe3ee78c4bb5e64f1bdbdec476f0363b0175c3168720dc312cced776d0687afca0eb2b4f61406ef5aa4c8334a2957b3ecbd1e0f913d8506ebde4e54b42dda942835f819aac2c7cd16afb867430a4f4ad940654cf6add262caf8fa1aa27cadcdd9255ac2a96e5a1c26966f63f281875a9cfc6ce8834d9f364a9deca09791d5cb59b4258ea681ca0f19fd628bc18feb5a55b2bca4b096792a548ff0bf776f2fb7b3d4c634ed2a83c71ce1d3f71f7fbf7e43f7e6b8c73fbf7f3dfdfc024a39da381b6cd26cb15c2632d188311193c1aecb5177ddc3b43e1199dbf02f0000ffff98f7b62e35020000", - "50c9e9a3056e1c5519a3545096c6cd2e": "1f8b08000000000000ff4ccc41ca02310c05e075738ad2030c497f7e14c1955770272eca248bc0743a74a25e5fa22ebacbfb1e2fb7cba2b2da1dc25aaac4734c2cb525085b6fd63cdbbc250885b97ba27c9870c2894e99329137b3e9d397ff084179f19310026b59ae5aa53dec3344ac7b82d0a5f0c0f9c7afae2683ff7d3dc618fde7d01c714ff00e0000ffff7dc54fc8b7000000", - "519277b78f6f840309ab0251f7b54419": "1f8b08000000000000ff8a0e4e2d2a4b2d8ae552505050484c492952b0555032d003432b4b03030325b04c49666e6a7e690948d2b058890b100000ffffb9f9177936000000", - "56df8426de024acea28c177fdbae2d79": "1f8b08000000000000ff8c90416bdc301085cf9a5f31ab931436f2bdc187a609edc2064a937b50e4b157d8968c24275bc2fef7328b5a96b69482f19899379fe7bdc5bad10e849d8d007e5e622aa840481743a1639120643fd9816bccfc2e948b0f83041072f0e5b0be1817e76688d763b225e6a696651c1a1743df2c76b29d0ff23ff495dd4cde1d246880579bb0c32b3e8e3f5d39628bf534736bdd38a4b8864e69807e0d0e9f289707eb839af1aac2cc83c677106cc23c5251ecac975b94c634c69c7f29f5e5fcb761d3453752ba76715e6226f3ddced3af85af3665521a44e7b37d9968efdd013fb418b3f94c85c2ab9277bbc78fb7fbfbe7fdeed317a971d3a294207c8f9bcb9d7710dca394789dfdf331eba2f4cdb9b96931f8e92c138b0dde294a49831027e087c36119a51413fc24b558b337bbe0cb5f5097a44a713d7292ecc9f7d86dd1f5db0a0bf4c6f9ded9a8f4cdbf49890afb98cdb73530caf515f887e967ac769fc8a62ebe9de52710319bfba32f2a51d170821f010000ffffa3cee358a7020000", - "5700a76799052e1b831d6826b3f89cc6": "1f8b08000000000000ff9c544d8fdb36103d93bf62220486b4b049a42d7a50a143b36a8b005d77d1b4e780224732b114a95054ecc0d07f2f48c9bb7151a4467c10c9e1703ede7be341c827d12128e128d5fde07c809c924c3a1bf014324ab2a07bcc2825d9f9cc1e9c7af4d8ead33c9fcf6c2f7a9c67ae6d406f85e1bd5368e2934e87c3d430e97adeb9dd9317c18d7c5d86a78e4b210fc87becd3e6f6171e951e6f7277b6e5833042697b8bbf12413462443e7ebca981f1b3957cd0031a6d91b7c2ba2962758a58c1ffbe7e46f4cad17506f9517bcc6841e927e1e1d1bb4f5aa1870aa29dedf1f81e43bec7e316f678acdfa6e5cf084ada3ddc179472deb9b2438b5e0484252b04e70c74689b3052cea1162e320e89b95648a4e1f380c9fc6c823325f7c68d9817943c6adbe5329c60d505bb5fd60272f41ed07be70b4a3887268c25ecec644ca2acda2451b09f7dd0d2e0f95d5deedeccb09307944f1fa2db07e91456af5f55569bcde6357b5757d5ee0d25eb8b5cfe3be716b48a65fef84301f9dd55f8eda59239e2905a54c2b1a5bb781a839f6488ada9069e7f77e347c3eab794247d5d8ce9c012ba94f4f20bf78b70d9c3baa1242d70b72881fd9a164a14f6ee97d3a03dc682bfff6ead6b8f47b07804916a125681c73079cb683b5919af737f957f0bbdfc8fb45b50cda5f802721509dc826c2186c98b84c68a486c79491233d7c2e53e068d11125a29f1e5e69b72dfa9af268f7a966db73270a684d42fe0a4b961f5e445d0ce523253a2db14a082758ad96f18f24c0c83d13279b1e07a9315ec6fdb0b3f1e84f9eb8f87dff38d6cbbe2a7f4f45505569b987bed3cc55550c1460917cdaa294135dbe4a0f458828ffb5e9611194a16524b58498db464cb1f56116f5fc82d1776f3ab2e72d976eca5c70238a4fbf7289d5531c24c896ca102c5d29c5d085a45926c20d3371c103c8e6ef2125791ac8817b0ce68d2345b38ba8ced1228ce2e0cf1f3b530374df8972ad286cef49f000000ffff72c22a543f060000", - "596e234fc3c0d58dd019eff083dc005f": "1f8b08000000000000ff8c545d6bdb30147db67ec59da0452aa9bd87b1878c3c94ac94b2252b74dbcb184595af55117d18e5a64d08feef438e937e6c747d926cdf73cebde71edc2abd5006a1569131ebdb9808042bb88e81704d9c15bcf1c4192bf8765bce627d95b0b1ebaedb6ecbb9f2d875950d84292857f958a3cb0863e96e755beae82b134f1749515c56c3d12e4ca595bec3caa3ef2f6f42c4d054ad72aab6e12df52e1ace24635565e2d860c0a40861570014a30383c16b469b16e1c66be887689446d8b2a2aac0eb319c2e703359e0e62c119ce6ca894162c534377d96c86a8742c3e05439dd9d23b07566fbf8418238e92d2987e211604a31c97f0be0bab509277559a38fe7fd032bceeafaad72235089e0b9a2048129bd22cb8acfe890f6fcbdd67f667a42d831d6ac8286393ecca64282f01a4ef66b2d67c36504ba815c2764efc080ce46dfab94d356e8c6c001378da1b126bf2518565e7eff36fbca0ac90adbf41493c3970b24c10f588ade7159fe085ea5e59d72e25893fcd443de4d205897558b84b44a8115dd133e4d3baaa9b318e82947d616c7ba31af12790d13781c628e0f3b0c2b740393617ed87a5d4e5d5ca2901ddbc3f7368a1a4e6a15255cd9606653a169fd7213cffccf2d1cfaaf4bafcb6ba48c1ac1f1a1914b42bffd829b31f0d606c347f053b9158ee1d7efdb0da1e06d0c86cb11f49153646318c3fbeeef595d34e579d615f9e7107aadabcbf985dc75238eee25eff72b7b3f5eccb68b9b78ccd192920d2633ef2aa1f1545eb7c9066a0457896e8e6a9e839763f6270000ffffb4a95a77a6040000", - "59b3f5316758be0d8bdfb8150516578a": "1f8b08000000000000ff5492b18ed4301086ebcc530c96901294d8fd4a5b2c9be69adb15f0023eaf63ac4d3c96336141e80a3a4a0a7a90780c5e0771af81622f5a5d15cde4ffbf19ff76d4e6ac9d451d3df8295262aca1128602db8f2ca012c3c402a012cef3fbe5411a9a94a3ee9c34d3acae9f78762a5856291a75d1e96443b110b9d14a47a30e4e5272caa5680434004ae12ec6bb1e25180a335fab2d8a7787feb0c15ddfe3ee78c4bb5e64f1bdbdec476f0363b0175c3168720dc312cced776d0687afca0eb2b4f61406ef5aa4c8334a2957b3ecbd1e0f913d8506ebde4e54b42dda942835f819aac2c7cd16afb867430a4f4ad940654cf6add262caf8fa1aa27cadcdd9255ac2a96e5a1c26966f63f281875a9cfc6ce8834d9f364a9deca09791d5cb59b4258ea681ca0f19fd628bc18feb5a55b2bca4b096792a548ff0bf776f2fb7b3d4c634ed2a83c71ce1d3f71f7fbf7e43f7e6b8c73fbf7f3dfdfc024a39da381b6cd26cb15c2632d188311193c1aecb5177ddc3b43e1199dbf02f0000ffff98f7b62e35020000", - "5ba5532a198294fc750ba2f01006d39b": "1f8b08000000000000ff4ccccf4ac0300c06f073fb14a587a120b3dd86caa02878f5e84d446a1b59b15d679afae7eda53a7497e4fb7e813cdcc6002b3d72e6cb2a8c909833cd6ddc90db4ef470d9ab5ef57ad6e3a82e4ecf5fd1522e4f1e52bea690205732ba7408d6dfffd70f0c0487be592c3fdd1056e86276e62e3b1b3bb7b40b994a2f57e9793a6b5b72661d857710460c8ab3e0638b7a8ffb5b61e4b448cede2ae0d79f093928958ae40c3ec11d78dc99d0ae079e7ef93b0000fffff662d40406010000", - "5f24168e1abae155c721ef6fa09bc429": "1f8b08000000000000ff8c914daedb201485c7b08a5b0f22a852584095512b75d2a4557f1640c83541b1015d885dc9f2de2b6ca7cd9b3cbd0142e25cbe730e24636fc621384a9673dfa748050467e90ccd34a963bc7c276cfd9f799e2675323dceb336c9379cb3c6f972bd9f958dbd76f1c38d4c89596f5bba396d636875329db9f8d0bc613e60d194ac1e0d5d30345c72ae359c7084802398252364a40149f1f61e6cd5441e2ca4b3fa8c7dfcb96812c498e1fd4a51ebd91e90a8ae481226ce0643b525b3ad8317839f6268bdab4a812dbafaf5edf89533c9996f17cce19ff2058b686a2c5562df3552fd0ebda17c359dd8d9223f2ee3ef0e107c575d1961b953e06c7e62d9b26256ff6746f5153bdbba57416386c3a3c309c715b35eabdfa87ea0f3b920fd7f2031e6adad907bc8839595b2dff254b1182a42f287cdccff060000ffff732d9b0b27020000", - "66133ffef1d9468dc3772e2c76e5a8d9": "1f8b08000000000000ff8c525fabda30147f4e3ec5596097e4d2a51f60f8e42e32984ea67b1e599ac4609a53d2e804f1bb8f34451c838b0fe5c0e9f9fd6d07a58fca19e81452eafb0153064e09d318b3b964462961cee7c3e9b7d4d8b70e3f1d93ca38b6f3188eaed54a1f4c9b4ce747f6cc3946db0e2aa8cec767ee033a4605a5f614356ccc9f1f45880be0095e2751396d1ad016ca0d170d9894ca8349c09592b34a2514d1d641452c315aefca2ac36c45aed54089a0c4db09beb8bf5899cc59c565ec0313f267ec551a0f2af0179dc5e7e9fec302a20f458e24934f2952727b20d3b9f22c8337313f72ecbfafbff1176dddbb4485a37ab857306128d1161673f06b92cb80a3e1a2202af83637c73b78ed140ad8fae82a81ce17983f7469a44c01fcdfeebc855fcd9ca193d5c1172cd006d8ee6dcf1a60838f6e9a181dfb3f454027df0a212fbf55943b93f9f6eb6625aa0cff78166c921035e9ecfb6f000000ffffd9fe30409b020000", - "67cc9f9db028b8b028595a334de05c39": "1f8b08000000000000ff9c544d8fdb36103d93bf62220486b4b049a42d7a50a143b36a8b005d77d1b4e780224732b114a95054ecc0d07f2f48c9bb7151a4467c10c9e1703ede7be341c827d12128e128d5fde07c809c924c3a1bf014324ab2a07bcc2825d9f9cc1e9c7af4d8ead33c9fcf6c2f7a9c67ae6d406f85e1bd5368e2934e87c3d430e97adeb9dd9317c18d7c5d86a78e4b210fc87becd3e6f6171e951e6f7277b6e5833042697b8bbf12413462443e7ebca981f1b3957cd0031a6d91b7c2ba2962758a58c1ffbe7e46f4cad17506f9517bcc6841e927e1e1d1bb4f5aa1870aa29dedf1f81e43bec7e316f678acdfa6e5cf084ada3ddc179472deb9b2438b5e0484252b04e70c74689b3052cea1162e320e89b95648a4e1f380c9fc6c823325f7c68d9817943c6adbe5329c60d505bb5fd60272f41ed07be70b4a3887268c25ecec644ca2acda2451b09f7dd0d2e0f95d5deedeccb09307944f1fa2db07e91456af5f55569bcde6357b5757d5ee0d25eb8b5cfe3be716b48a65fef84301f9dd55f8eda59239e2905a54c2b1a5bb781a839f6488ada9069e7f77e347c3eab794247d5d8ce9c012ba94f4f20bf78b70d9c3baa1242d70b72881fd9a164a14f6ee97d3a03dc682bfff6ead6b8f47b07804916a125681c73079cb683b5919af737f957f0bbdfc8fb45b50cda5f802721509dc826c2186c98b84c68a486c79491233d7c2e53e068d11125a29f1e5e69b72dfa9af268f7a966db73270a684d42fe0a4b961f5e445d0ce523253a2db14a082758ad96f18f24c0c83d13279b1e07a9315ec6fdb0b3f1e84f9eb8f87dff38d6cbbe2a7f4f45505569b987bed3cc55550c1460917cdaa294135dbe4a0f458828ffb5e9611194a16524b58498db464cb1f56116f5fc82d1776f3ab2e72d976eca5c70238a4fbf7289d5531c24c896ca102c5d29c5d085a45926c20d3371c103c8e6ef2125791ac8817b0ce68d2345b38ba8ced1228ce2e0cf1f3b530374df8972ad286cef49f000000ffff72c22a543f060000", - "69622533133ad8df518fe01a940713ec": "1f8b08000000000000ffe2525608c9c82c56c82c56485408f1f7f55148c94f2ecd4dcd2bd15370cacfcfade34a49cdcd77ad28c82c4a55b055503232c950e202040000ffffc36ed62935000000", - "70005e6633581fbd609bfec1957cd292": "1f8b08000000000000ffe2525608c9c82c56c82c56485408f1f7f55148c94f2ecd4dcd2bd15370cacfcfade34a49cdcd77ad28c82c4a55b055503232c950e202040000ffffc36ed62935000000", - "710a08c10a56dd632085fe2723c2fdca": "1f8b08000000000000ff8c90416bdc301085cf9a5f31ab931436f2bdc187a609edc2064a937b50e4b157d8968c24275bc2fef7328b5a96b69482f19899379fe7bdc5bad10e849d8d007e5e622aa840481743a1639120643fd9816bccfc2e948b0f83041072f0e5b0be1817e76688d763b225e6a696651c1a1743df2c76b29d0ff23ff495dd4cde1d246880579bb0c32b3e8e3f5d39628bf534736bdd38a4b8864e69807e0d0e9f289707eb839af1aac2cc83c677106cc23c5251ecac975b94c634c69c7f29f5e5fcb761d3453752ba76715e6226f3ddced3af85af3665521a44e7b37d9968efdd013fb418b3f94c85c2ab9277bbc78fb7fbfbe7fdeed317a971d3a294207c8f9bcb9d7710dca394789dfdf331eba2f4cdb9b96931f8e92c138b0dde294a49831027e087c36119a51413fc24b558b337bbe0cb5f5097a44a713d7292ecc9f7d86dd1f5db0a0bf4c6f9ded9a8f4cdbf49890afb98cdb73530caf515f887e967ac769fc8a62ebe9de52710319bfba32f2a51d170821f010000ffffa3cee358a7020000", - "75fe549e4c6ec53880d899e12f303d4d": "1f8b08000000000000ff64cdc14ec3300cc6f1337e0a1fe150274e18092fc13b646d3013cd3cb264429af6ee889603d54e3ef8f7d75774ea73c6eb95de52c9b71b802832b107a8f9ab1f6ac64740449443fbe87b1ab518d1e1b3a6a667b31e2ce9dc72bd67a2e654b5e9bebfe385c911ff9a878d99d35136ca935bd5f222ad62becd3137bc58b2640767f9d5beb8c871e79f77c394d88731c4c053f8eb54e64cff72a9a771d98fc4f004f0130000ffff1f9ac7b6f4000000", - "79906f2027d86895882c07b014541636": "1f8b08000000000000ff2a2d4e55c82e4a2cc92f8e4f49cdcdb7e6e2f2f40b760d0a51f0f40bf157482c2ac94cce492dd648c84c49d0514828c92cc9494dd0540873f409750d56d030d45150078ba96b5a7301020000ffffb60f97194b000000", - "7a430917a7aa07186808d95369458c4e": "1f8b08000000000000ff4ccc41ca02310c05e075738ad2030c497f7e14c1955770272eca248bc0743a74a25e5fa22ebacbfb1e2fb7cba2b2da1dc25aaac4734c2cb525085b6fd63cdbbc250885b97ba27c9870c2894e99329137b3e9d397ff084179f19310026b59ae5aa53dec3344ac7b82d0a5f0c0f9c7afae2683ff7d3dc618fde7d01c714ff00e0000ffff7dc54fc8b7000000", - "7b2c3c96383cfe733a24c5ae20b892e8": "1f8b08000000000000ff8a0e4e2d2a4b2d8ae552505050484c492952b0555032d003432b0b03030325b04c49666e6a7e690948d2b058890b100000ffffb7699cdc36000000", - "7b856fed41dcc4df47597de661eb9042": "1f8b08000000000000ff4ccc41cac2400c05e0f5e414610e5092fefc28822bafe04e5c0c4d16814ea74ca35e5fa22ebacbfb1e2fb7cb6cbaf81dd252aae219b3686d19d2da9bb7c83ead195211e991783c0c34d0c027e691399ac9ed19cb7f826432c7c90449accc57abda1efe1912d52d43ea5a64c7e38f5fdd5c77fef77544c4f8b96b8eb46578070000ffff6f600d93b7000000", - "7ba5a6706dc47731b76a9fccdc393d28": "1f8b08000000000000ff5cccc10ac2300cc6f173f214230f505a058782275fc19b78084b0e81761d5dd4d7970ac2d8f1ff83ef7bdcb2e9ec4f84998b0ed781444b2584a555afbd7d5a0881455aaf7418430c31a4cbe9389e09c124f7598a083cb9bdff21c6f96e45ebcb7fbb95109ab2ecedd3cc758ffd746b7125fc060000ffffe2756aadaa000000", - "7c8bcba5a2dfe4aa3bb18e6ee7c91140": "1f8b08000000000000ffbc91bd4a04311485ebe429428a416118333fa82c04052dc546bbc5229b5cd9c0fc7973e32ac3bcbb44071dabadb4ca395fc2e1836c6f5a0f3d3d71669c43a1852cab8b4215aa283775adce25672ef4894f3100ce9b6934211c0674f335d9f1e4f7ebd3b3c919323b1360be22dfc1104997214330eef1a71ed013acfa68307c764d18216b07abef066bdacceed30de948cf97ddaec9d329394b73b70ff7428bed11adeaffb4f2232af59fabe48b8f4cbf69c9bf82d0a2529c79d7a6582e71d9155a367bc9d94b047cff6642564a754172066f6057b85e30a1e957b8f9c21f010000ffff57d7944148020000", - "7d1ce2614230661551a2feabde69da0a": "1f8b08000000000000ff94924d4b1b411cc6effb29feecc504cc2eb6b7040fc50a164a1bac3d9512c6e49f71eaeece3833591a246063c1b6d8c616517c39d8628b976aa160b156fc30c926f15b94996455a81e7a49e6e5799edfcc33ebfb901c6e774ede5e7c3ee9ed1cc2bde203e86dbf0621b9e6d0dd58e99c1e43d23aea9c7e4dd656bb9bc7dd56ab7ffe03e8747102924fab30353353342ec7f707a6d95a75e84b5acdfed272deec400ee6b4162aeffb158c31e002a5f228e73440afcc43df5acb3cc8cdd6aa5594ca77543dd2e4258c836bf7eeba05c761a1e052834b999eabcd5a23e594fb29d8ceecc48e3c3b740b573e0bbc92632874fd361511cc2751c435d18c47ea52666f4acaf38422241fcf92b5f79db3f3defa012c122158a5e12dc62815e351c334d9dd3849feb44661b806c9d997e45b13e2b15188ef80e73969540543ee2994312ba3178f0d388f1ecf4ce6a1bbbb94ac7de81ffd4adeec5d6cedf7cfb73abf77ba9b7bbdf50343f87970b1f20e32ede66eaedddccdb65fedb797bf3b0e17e6dc407929458c834b04730be956e6b22e6ff85fa2a8354a552241908571a8924061c17186e782fb1872587400a42843914534933e62daa937693acd82445d939182db048561c813529fc220e019fb3b8d0bff6f7d3afdf026f7704989ac3d31c0d5ad6d2a11cc331fa5b9e740004051e7c1f5e725d15ce5cc93f88ad44b7326cab59a86a1370a4ec3714254cad49ab26d8ad292451422129abec7e0d9b592432e5113aa0c71a4ca6598778dce859804ac4234e65d890b3526b1e28e3cbf89a1c475c8048f3446fa1fce0bc5234da8c594079a41dcdf000000ffff1ddc747cee030000", - "8853489c373c278aef6338d0962fd746": "1f8b08000000000000ffe2525608c9c82c56c82c56485408f1f7f55148c94f2ecd4dcd2bd15370cacfcfade34a49cdcd77ad28c82c4a55b055503232c950e202040000ffffc36ed62935000000", - "91045d262e11302068a4d78ac8a2912e": "1f8b08000000000000ffa492516bdb3010c7dff3290ebde42572e384259b200f657d5cc9d614c6564250e55b7cd4d279d2c550d8871f72dd242bdda0ece5eefcf3dfe7fbeb04d0614cc4c1809a174b350248183b7298cc0800a0ba7fca00e4ed1e0df8c7f4b331ef8ac5805b8e929e351acaf97cba30390c0843479183c72027d5f5b7cd974fbb9bf5fa76f7f972b3f9babeb95a4566390a6ebfaf2e13d98b4d6dc3beb634bce8b839783cfb5d612e2a760f183506898f2d53104d81a4ba2faa41e5d87b1b2a03770300186bed6a1bad138c3aa1e86c1ae3ea203fde8f277fc8b869ac108773c9ee10c871853b47e341bc1d728db691dad5e81eccb18f60120377eae3f5959a80ea8fd0569e8282896a29ec33d5ba8d2cecb859896bd5f6f43579e48318984dd31152108c9d6d0c942718512261caca11c0e80955945eacb067afadaf5fe062befc60727883a3bea3760da9c1cff69541cfc73f7afaf7f41ebdb3aec6eae5257ce67f73312b676569ca1cdf60035dcd3927b19272f14b4d005470b92e67cb625a4c8bb27fc89dffdbe5ef000000ffff82fe973e7e030000", - "91350508efec72331fa05cfb386fe8d3": "1f8b08000000000000ff8c545d6bdb30147db67ec59da0452aa9bd87b1878c3c94ac94b2252b74dbcb184595af55117d18e5a64d08feef438e937e6c747d926cdf73cebde71edc2abd5006a1569131ebdb9808042bb88e81704d9c15bcf1c4192bf8765bce627d95b0b1ebaedb6ecbb9f2d875950d84292857f958a3cb0863e96e755beae82b134f1749515c56c3d12e4ca595bec3caa3ef2f6f42c4d054ad72aab6e12df52e1ace24635565e2d860c0a40861570014a30383c16b469b16e1c66be887689446d8b2a2aac0eb319c2e703359e0e62c119ce6ca894162c534377d96c86a8742c3e05439dd9d23b07566fbf8418238e92d2987e211604a31c97f0be0bab509277559a38fe7fd032bceeafaad72235089e0b9a2048129bd22cb8acfe890f6fcbdd67f667a42d831d6ac8286393ecca64282f01a4ef66b2d67c36504ba815c2764efc080ce46dfab94d356e8c6c001378da1b126bf2518565e7eff36fbca0ac90adbf41493c3970b24c10f588ade7159fe085ea5e59d72e25893fcd443de4d205897558b84b44a8115dd133e4d3baaa9b318e82947d616c7ba31af12790d13781c628e0f3b0c2b740393617ed87a5d4e5d5ca2901ddbc3f7368a1a4e6a15255cd9606653a169fd7213cffccf2d1cfaaf4bafcb6ba48c1ac1f1a1914b42bffd829b31f0d606c347f053b9158ee1d7efdb0da1e06d0c86cb11f49153646318c3fbeeef595d34e579d615f9e7107aadabcbf985dc75238eee25eff72b7b3f5eccb68b9b78ccd192920d2633ef2aa1f1545eb7c9066a0457896e8e6a9e839763f6270000ffffb4a95a77a6040000", - "991f8014a5f12bbf1f53265c7d62afcb": "1f8b08000000000000ff84914fcbdb3c10c4cfda4fb1afe12d7670a543e9c5c587149edefa5048a16745dec822b624e4b59362fcdd8b9cf4cfa1d08358586646bf61a336576d093b07e0c618126309a230c133ddb90051b01ba90010c5bacacfa1fb92e8e2eedbb6aef2558fb46dca79a6e4f5a0264a8b330fb175dccf6769c2a86c787b4d9ac3a49e235ead1a822d409c47fca7d213ab9e39aaf3a03b1af5c4940aa80094b2a1b1e42969267c38904318f0e612017f8f84c71871e2341bc615c4b4183c3c29e5e93141e4703c9c47f9e2adf3041bc065f6065fe9768cb1fc9ba9c6fe0f4785a58e110fc7186b344398e853f6e790b2aa9152ca2fa46a0591852dbe39c6b882c8400d4e8ba941ec180df635880dc4ef94f69993f985e17b8d467b4303362d3eaf24bf39eebfba91c2cce5cfdd476dae3685d97799e1ddfb43bea33c9109beab400877d9c19a167b79ea67eec2cd9786efd5877dff5f8bde0dfba7620856bee4026591214f69f9e578142bff5faa622f9a93b70cba3396d55e2611cfc9c3063f020000ffff936887516d020000", - "9a2d5bf0f7319185c80da50bc1e9bb25": "1f8b08000000000000ff9c90314bc3401cc5f77c8aff96045c0ae2521c62724a30b994f46ee8943bd3030f1b2bc9298e6eba281d8a1d74d04941280e226a07bf4c9bda6f21676b23bab9dc71efeefddebd7f9a0bae04b4b9e23bbc10b09773d52d92b6c8ba75e3f0b760b831720802e26c040818cf954c3ba2606019004cb619c87d65d56a3650dcf4b730f2004704300d02702889121fbb310a1126e046e1d76e8e5f46b3fed0f7cc15cd5052750483239ea7bb3cb7d656ed8ab0b44c7ae7d3bbc7b9215332130cf45a289e1d54cf3db4e9d080804be3186192103f444de2840d8830d086a78bfcbd5b8694d72793dec5f87d58f65fcbc1f36cf0340f4cff1158fdfcec6a327afb816bc47ee8c42dd8462db0f4046dadea1393c7c9a29cb568691bf6376add2c2f4fa70f371fb7f766ddf80c0000ffff632acdfbc6010000", - "9b5deb2555f0ec914474cefd5111dc33": "1f8b08000000000000ff9452c18adb30103d6bbe626a285890b5e935740fa5b4dd1c36a44d722e5a79ac0c912523c97417e37f2f52c2b2813db4175b9a79cfefcd1b8f4a9f95211c143b001e461f12d620aade2a5381a87cbc3cdbc8c6299b2ff1256a65cb31f1401580a8e6b979f4dd2e50cfcfcb32cfcd560db42c2dbb44c129db769cf186d3697a6ab41f5ae3efce41251fdbeb6b3c9b567bd7b7a3b2aa63f72f78eb4d0512a09f9c2e33d4126710d97cb35321522d41586f9a8de3543bb612db163b7a9a0c66d01a73b3e3703f8f2a9d16101df5144af5abf56ff9bdafabd7b93026155225415ccd5e042488df2bd499f87d727a851402aeefb1e3d2ff328e19c27da97fb847c736db15a372ac6b0a41825840e8cc19d4996a7d520e7d6cf625fc157e92202e8b68b63e71ff52eb155ef7d1ec373f1e8ebb9bfbcfe3e67053387cfbf57853d86c0f1244ef433112b3f2e73b0de2cdd486122abcc8e2c758ad3036fb14d8995aca4cfac3499f30962f6815e9bf1dac4108f11a5bce48bc9b3a3d73095d88fce3357b4b34d6972369efbad20a94a6e0deb1f270dc65a18e7a35d9b4bec12e39f805fe060000ffff29d8de170f030000", - "a9cbe2e1020ca7be3617cd6d83fe88f5": "1f8b08000000000000ff5256702c2dc9c82fe25256084a2dcb4c2d4f2de202040000ffffbe75c21514000000", - "ad071804417ba1d9b2afe916414fa2e0": "1f8b08000000000000ffa492516bdb3010c7dff3290ebde42572e384259b200f657d5cc9d614c6564250e55b7cd4d279d2c550d8871f72dd242bdda0ece5eefcf3dfe7fbeb04d0614cc4c1809a174b350248183b7298cc0800a0ba7fca00e4ed1e0df8c7f4b331ef8ac5805b8e929e351acaf97cba30390c0843479183c72027d5f5b7cd974fbb9bf5fa76f7f972b3f9babeb95a4566390a6ebfaf2e13d98b4d6dc3beb634bce8b839783cfb5d612e2a760f183506898f2d53104d81a4ba2faa41e5d87b1b2a03770300186bed6a1bad138c3aa1e86c1ae3ea203fde8f277fc8b869ac108773c9ee10c871853b47e341bc1d728db691dad5e81eccb18f60120377eae3f5959a80ea8fd0569e8282896a29ec33d5ba8d2cecb859896bd5f6f43579e48318984dd31152108c9d6d0c942718512261caca11c0e80955945eacb067afadaf5fe062befc60727883a3bea3760da9c1cff69541cfc73f7afaf7f41ebdb3aec6eae5257ce67f73312b676569ca1cdf60035dcd3927b19272f14b4d005470b92e67cb625a4c8bb27fc89dffdbe5ef000000ffff82fe973e7e030000", - "ae6a3278e56d79251f55ea74dcd489a8": "1f8b08000000000000ff94924f4b5b4d14c6f7f3290e7763027a2fbeef2ec145b18285d2066b57a584313919a7bdf7ce3833090d12b0b1605b6c638b28fe59d8628b9b6aa160b156fc30c94de2b72833c955a1bae8269999f33ce737e7991b04901c6e774ede5c7c3ae9ed1cc29dc23de86dbf02a98411d0dd58e99c1e43d23aea9c7e49d656bb9bc7dd56ab7ffe1dd84c6112928fab303d3b5bb02e120403d35cb532f425ad667f6939672b3006f3c6489d0b8232d630141295f699102c44bf24a2c0594b221c9bab562aa87440743d36f4054c80e76aff7b794278248532e0316ee6ab73cec80413410a763bb7712bdf2dbdfc95cf01afe4184953bf4d45250f681c0b430d17b1be94b94969e9396508c987b364ed5de7ecbcb77e008b544a5e6ef88b35549a8bb86193ec6e9c24bf5ba3303c83e4ec73f2b509b5f151a8fd07be4fd256658c84af51d57809fddaf880f3e0e1ec540ebabb4bc9dafbfed1cfe4f5dec5d67eff7cabf36ba7bbb9d75b3fb0841f07172b6f21d36eee8eb59bbbd9f6cbfdf6f2374284b4f706268a2962023c2ab9974f4b99cbb8fce17f91a131a8749186611626a042438d794286f782bb18095824004a96a0c06396491f31cdd49fb2996641a1a9aa58c36d82fcb0c9235a9fc6301419f73b830bff6e7d3c73ff26f7f048cbacbb31c0d5d4ae2b95dcb71fa59d732000606872e005f62d024debc579dbc373c586c536f2a44148845adb3c53a8b36ba378cc20a6910d7a1c9e5c4b37120a0d65daa2462a424539cfea3ca8d19097a9c19ca770a1ca1596bd91a73731b4bc0e9914b1c1d8fcc579a6456c287398d2403368f7270000ffff07b2c244e7030000", - "b264df098d6769dab4f670828910fa60": "1f8b08000000000000ff52567049cdcde7e252565678b970e7f3d9eb9eaf6b78b2bb9bcb508f0b100000ffff24c85b041b000000", - "b2ab5cb21b5a72490b36cbd46cdfc102": "1f8b08000000000000ff52567049cdcde7e252565678b970e7f3d9eb9eaf6b78b2bb9bcb508f0b100000ffff24c85b041b000000", - "bac80cf86d8252f4a488b221da8cfe80": "1f8b08000000000000ffbc91bd4a04311485ebe429428a416118333fa82c04052dc546bbc5229b5cd9c0fc7973e32ac3bcbb44071dabadb4ca395fc2e1836c6f5a0f3d3d71669c43a1852cab8b4215aa283775adce25672ef4894f3100ce9b6934211c0674f335d9f1e4f7ebd3b3c919323b1360be22dfc1104997214330eef1a71ed013acfa68307c764d18216b07abef066bdacceed30de948cf97ddaec9d329394b73b70ff7428bed11adeaffb4f2232af59fabe48b8f4cbf69c9bf82d0a2529c79d7a6582e71d9155a367bc9d94b047cff6642564a754172066f6057b85e30a1e957b8f9c21f010000ffff57d7944148020000", - "bb9c567981605c082029d83839523a6c": "1f8b08000000000000ff94924d4b1b411cc6effb29feecc504cc2eb6b7040fc50a164a1bac3d9512c6e49f71eaeece3833591a246063c1b6d8c616517c39d8628b976aa160b156fc30c926f15b94996455a81e7a49e6e5799edfcc33ebfb901c6e774ede5e7c3ee9ed1cc2bde203e86dbf0621b9e6d0dd58e99c1e43d23aea9c7e4dd656bb9bc7dd56ab7ffe03e8747102924fab30353353342ec7f707a6d95a75e84b5acdfed272deec400ee6b4162aeffb158c31e002a5f228e73440afcc43df5acb3cc8cdd6aa5594ca77543dd2e4258c836bf7eeba05c761a1e052834b999eabcd5a23e594fb29d8ceecc48e3c3b740b573e0bbc92632874fd361511cc2751c435d18c47ea52666f4acaf38422241fcf92b5f79db3f3defa012c122158a5e12dc62815e351c334d9dd3849feb44661b806c9d997e45b13e2b15188ef80e73969540543ee2994312ba3178f0d388f1ecf4ce6a1bbbb94ac7de81ffd4adeec5d6cedf7cfb73abf77ba9b7bbdf50343f87970b1f20e32ede66eaedddccdb65fedb797bf3b0e17e6dc407929458c834b04730be956e6b22e6ff85fa2a8354a552241908571a8924061c17186e782fb1872587400a42843914534933e62daa937693acd82445d939182db048561c813529fc220e019fb3b8d0bff6f7d3afdf026f7704989ac3d31c0d5ad6d2a11cc331fa5b9e740004051e7c1f5e725d15ce5cc93f88ad44b7326cab59a86a1370a4ec3714254cad49ab26d8ad292451422129abec7e0d9b592432e5113aa0c71a4ca6598778dce859804ac4234e65d890b3526b1e28e3cbf89a1c475c8048f3446fa1fce0bc5234da8c594079a41dcdf000000ffff1ddc747cee030000", - "bcd84391b0e45f9d8641de9b0f6bdc98": "1f8b08000000000000ff2a2d4e55c82e4a2cc92f8e4f49cdcdb7e6e2f2f40b760d0a51f0f40bf157482c2ac94cce492dd648c84c49d0514828c92cc9494dd0540873f409750d56d030d45150078ba96b5a7301020000ffffb60f97194b000000", - "c00dc165bbf20a017bf129c26a1f8e7d": "1f8b08000000000000ff8a0e4e2d2a4b2d8ae552505050484c492952b0555032d003432b4b03030325b04c49666e6a7e690948d2b058890b100000ffffb9f9177936000000", - "c1d37027fe90bb3a8ed6d3857637d326": "1f8b08000000000000ff8c534d6edb3c105d8ba71813f802ea834bed5378d126818bb6f9419ca0cb82924734618a54a99163c0f0317a83163d40d73d4e7a8e8294edba08fab3304c7186efcd7b7c6c55b5541a6141d432669ad60702c132ee908ab8c919cbda12f866232ffdfc26606dd6dbed6623af5483db6da15ac359f6dbb27184c1295b347e8e36766a438bbe94956f0aed9f2d8322df15bbbf76a98bcabbba68955573e3fea5df7acd595636f0d7cebda4a2b46a8e8dea08036739632b15a05b55d096f21c1b3fc3b0c2c05851c0153e80c307505036d0a57dc9eade55b122ba5f4fe420d069e310fe2f1b79919663c010e2cf871c362c8b54826559556b281b391c3cf3ae363aee12ec94cbbbebcbb72ccb5966ea04313954a64882471d927c63792eef5da342b750569c54943f4feda309386323631690fae058b63dc2aa688019f88f3122af38a96afd47a0e8d6043a96ed044fa29873ac556f69c01c306276e42d6a13bd8e4ebdbcdc9571674f17253a43b7bea7c376ceb261cee153ce48051239db8fb065c3251c1f3c323d398df2c6382d5ae3e2181a4e2780721a7cdf0a5e1c02ca7396456d5a4e2fee042fbac8c4c7b0f00f773ed1e651f09e30a2898ad689eccc3bc235253653c3fbe1ae4f27314b037945eb71b4efa997d66b7911532178c41c1222fe5be53ca1e4290c6bf9a2f481de195acc4851df8974edc33afa682abc776aa58c55a5c5fda44501b8564d6b116a1fd2d386801f7aec0816cacded21c43f558aea89a465947292dead7c935e511cfc155aeb4f814fbd554ec3e3a72fdfbf7e7cfcfc0d46a3111fa77054f2f5ecfa4a2c07e96cfb230000ffffb57309ca64040000", - "c5dc8e7e6833a138a5738b8d0eaa4040": "1f8b08000000000000ff9c90314bc3401cc5f77c8aff96045c0ae2521c62724a30b994f46ee8943bd3030f1b2bc9298e6eba281d8a1d74d04941280e226a07bf4c9bda6f21676b23bab9dc71efeefddebd7f9a0bae04b4b9e23bbc10b09773d52d92b6c8ba75e3f0b760b831720802e26c040818cf954c3ba2606019004cb619c87d65d56a3650dcf4b730f2004704300d02702889121fbb310a1126e046e1d76e8e5f46b3fed0f7cc15cd5052750483239ea7bb3cb7d656ed8ab0b44c7ae7d3bbc7b9215332130cf45a289e1d54cf3db4e9d080804be3186192103f444de2840d8830d086a78bfcbd5b8694d72793dec5f87d58f65fcbc1f36cf0340f4cff1158fdfcec6a327afb816bc47ee8c42dd8462db0f4046dadea1393c7c9a29cb568691bf6376add2c2f4fa70f371fb7f766ddf80c0000ffff632acdfbc6010000", - "c84c3b26a959752d7eb926d02ce46ddd": "1f8b08000000000000ff9c544d8fdb36103d93bf62220486b4b049a42d7a50a143b36a8b005d77d1b4e780224732b114a95054ecc0d07f2f48c9bb7151a4467c10c9e1703ede7be341c827d12128e128d5fde07c809c924c3a1bf014324ab2a07bcc2825d9f9cc1e9c7af4d8ead33c9fcf6c2f7a9c67ae6d406f85e1bd5368e2934e87c3d430e97adeb9dd9317c18d7c5d86a78e4b210fc87becd3e6f6171e951e6f7277b6e5833042697b8bbf12413462443e7ebca981f1b3957cd0031a6d91b7c2ba2962758a58c1ffbe7e46f4cad17506f9517bcc6841e927e1e1d1bb4f5aa1870aa29dedf1f81e43bec7e316f678acdfa6e5cf084ada3ddc179472deb9b2438b5e0484252b04e70c74689b3052cea1162e320e89b95648a4e1f380c9fc6c823325f7c68d9817943c6adbe5329c60d505bb5fd60272f41ed07be70b4a3887268c25ecec644ca2acda2451b09f7dd0d2e0f95d5deedeccb09307944f1fa2db07e91456af5f55569bcde6357b5757d5ee0d25eb8b5cfe3be716b48a65fef84301f9dd55f8eda59239e2905a54c2b1a5bb781a839f6488ada9069e7f77e347c3eab794247d5d8ce9c012ba94f4f20bf78b70d9c3baa1242d70b72881fd9a164a14f6ee97d3a03dc682bfff6ead6b8f47b07804916a125681c73079cb683b5919af737f957f0bbdfc8fb45b50cda5f802721509dc826c2186c98b84c68a486c79491233d7c2e53e068d11125a29f1e5e69b72dfa9af268f7a966db73270a684d42fe0a4b961f5e445d0ce523253a2db14a082758ad96f18f24c0c83d13279b1e07a9315ec6fdb0b3f1e84f9eb8f87dff38d6cbbe2a7f4f45505569b987bed3cc55550c1460917cdaa294135dbe4a0f458828ffb5e9611194a16524b58498db464cb1f56116f5fc82d1776f3ab2e72d976eca5c70238a4fbf7289d5531c24c896ca102c5d29c5d085a45926c20d3371c103c8e6ef2125791ac8817b0ce68d2345b38ba8ced1228ce2e0cf1f3b530374df8972ad286cef49f000000ffff72c22a543f060000", - "ca7acd4efcb673fe4e3505be171752b7": "1f8b08000000000000ff8c9141ebd43010c5cf994f31169476a9ed41bc547a58416f2ec20a9e633a9b0ddb26613aedae947e7749bb2a8887ff210c0cefbdfc1e13b5b9694bd8390037c4c08239a8cc042ff4900c54266ea00c4065cb527d09dd57a68b7bacebb254273dd0bad6ce0bb1d77d3d12cfceec62ebe43afda84c186a1bdede584b18ebe788375bf7c1662f9079929aa3a9ef9a3bf2191400756d4363c9136b21dcb52821f478774c203f23e131461c852723b8801a678387275c75de2728cbd1e0610fdeb6c4b0025c266ff044f7638cf9ff8c25da7f5c05e63a463c1c632cd1f461a4cf292305e54589c49c5ee0620195842dbe39c6b8804a600d8eb329416d380dda12d40aea6f4afbcc493d94914789467b433d362d3e8f547d7772fde6060a93e4bf771fb5b9590e93ef12c3bbf78774c6ea4c26f8ae00a5dc65036b5ab4d5f93a4917ee3e37f2283e6cfb572d7ad76f9faa3ed8ea532a906709f2ccf31fc75e2c7f3d17d9563425af097463cc8bad0c934cec61855f010000ffffbbe7f8956c020000", - "d11e25ce5264ce1ed0f0863d28fa7113": "1f8b08000000000000ff8c525fabda30147f4e3ec5596097e4d2a51f60f8e42e32984ea67b1e599ac4609a53d2e804f1bb8f34451c838b0fe5c0e9f9fd6d07a58fca19e81452eafb0153064e09d318b3b964462961cee7c3e9b7d4d8b70e3f1d93ca38b6f3188eaed54a1f4c9b4ce747f6cc3946db0e2aa8cec767ee033a4605a5f614356ccc9f1f45880be0095e2751396d1ad016ca0d170d9894ca8349c09592b34a2514d1d641452c315aefca2ac36c45aed54089a0c4db09beb8bf5899cc59c565ec0313f267ec551a0f2af0179dc5e7e9fec302a20f458e24934f2952727b20d3b9f22c8337313f72ecbfafbff1176dddbb4485a37ab857306128d1161673f06b92cb80a3e1a2202af83637c73b78ed140ad8fae82a81ce17983f7469a44c01fcdfeebc855fcd9ca193d5c1172cd006d8ee6dcf1a60838f6e9a181dfb3f454027df0a212fbf55943b93f9f6eb6625aa0cff78166c921035e9ecfb6f000000ffffd9fe30409b020000", - "d27273567e8a0cdf9c0f35717b89ff8d": "1f8b08000000000000ff248eb16eeb300c45e7f02b2e3249ef05d6de31f5da0e457e80b61959b52d1a120d0f45ffbdb03391e0bd073c21e07fb7a579c09e8aa4fc2dbd5108788c82d7dd3862e1492aea5604360aaa6d1d5245563b4b8694cfe09932cf2fae215ab99f380a0656a2b4ac5a0c8e2ed7986cdcbaa6d72544d5384b387e5fc9138510f52d4a96c226980a9b5698ea7cead173cb3db2ec0fa9d6b23a0ff76f60bde1089cbf414ad1e2f143979573eadd4135f7c3c765d9dba3fa297b7b3fc7970ca99edbc7bbf7f44b7f010000ffff3c96d9e80d010000", - "d391eec375a18e4524decc911a55cb37": "1f8b08000000000000ff64cdc14ec3300cc6f1337e0a1fe150274e18092fc13b646d3013cd3cb264429af6ee889603d54e3ef8f7d75774ea73c6eb95de52c9b71b802832b107a8f9ab1f6ac64740449443fbe87b1ab518d1e1b3a6a667b31e2ce9dc72bd67a2e654b5e9bebfe385c911ff9a878d99d35136ca935bd5f222ad62becd3137bc58b2640767f9d5beb8c871e79f77c394d88731c4c053f8eb54e64cff72a9a771d98fc4f004f0130000ffff1f9ac7b6f4000000", - "d48e95472ef22ab5c6e49c918ff4c7f7": "1f8b08000000000000ff5cccc10ac2300cc6f173f214230f505a058782275fc19b78084b0e81761d5dd4d7970ac2d8f1ff83ef7bdcb2e9ec4f84998b0ed781444b2584a555afbd7d5a0881455aaf7418430c31a44b3a1dc7332198e4be4b118127b7f73fc438dfad687df96fb812425396bd7d9ab9eeb19f6e2dae84df000000ffffd60a1b01ab000000", - "d6bf2490901d7994259b59036e5ee787": "1f8b08000000000000ff8a0e4e2d2a4b2d8ae552505050484c492952b0555032d003432b4b03030325b04c49666e6a7e690948d2b058890b100000ffffb9f9177936000000", - "d9e24a8fbdaa656b04d082a11a16fab2": "1f8b08000000000000ff8c525fabda30147f4e3ec5596097e4d2a51f60f8e42e32984ea67b1e599ac4609a53d2e804f1bb8f34451c838b0fe5c0e9f9fd6d07a58fca19e81452eafb0153064e09d318b3b964462961cee7c3e9b7d4d8b70e3f1d93ca38b6f3188eaed54a1f4c9b4ce747f6cc3946db0e2aa8cec767ee033a4605a5f614356ccc9f1f45880be0095e2751396d1ad016ca0d170d9894ca8349c09592b34a2514d1d641452c315aefca2ac36c45aed54089a0c4db09beb8bf5899cc59c565ec0313f267ec551a0f2af0179dc5e7e9fec302a20f458e24934f2952727b20d3b9f22c8337313f72ecbfafbff1176dddbb4485a37ab857306128d1161673f06b92cb80a3e1a2202af83637c73b78ed140ad8fae82a81ce17983f7469a44c01fcdfeebc855fcd9ca193d5c1172cd006d8ee6dcf1a60838f6e9a181dfb3f454027df0a212fbf55943b93f9f6eb6625aa0cff78166c921035e9ecfb6f000000ffffd9fe30409b020000", - "dc9ddbcd9cdbb28fa4632260727f292c": "1f8b08000000000000ff248eb16eeb300c45e7f02b2e3249ef05d6de31f5da0e457e80b61959b52d1a120d0f45ffbdb03391e0bd073c21e07fb7a579c09e8aa4fc2dbd5108788c82d7dd3862e1492aea5604360aaa6d1d5245563b4b8694cfe09932cf2fae215ab99f380a0656a2b4ac5a0c8e2ed7986cdcbaa6d72544d5384b387e5fc9138510f52d4a96c226980a9b5698ea7cead173cb3db2ec0fa9d6b23a0ff76f60bde1089cbf414ad1e2f143979573eadd4135f7c3c765d9dba3fa297b7b3fc7970ca99edbc7bbf7f44b7f010000ffff3c96d9e80d010000", - "ddd2d8d002a9cac8b4c670cb20f96f3c": "1f8b08000000000000ff52567049cdcde7e252565678b970e7f3d9eb9eaf6b78b2bb9bcb508f0b100000ffff24c85b041b000000", - "deae3975927265bbe2e86537858be5d7": "1f8b08000000000000ff8c51cb8e1331103cdb5fd1cc616547c1be202e2807362b7121e1fd013d7ead158f9d783a246834ff8e1c061471401cac96dc5d55dd554734070c0e2c16cee3702c954070d69992c95da9e39c75d3a476c57eaccec7eb3c4f93dae3e0e659c74cae664c7a28d6a58eb32e447a3ef7ca944187f2f25091caa897723c046d4af6fa88096dccff336f91b0c7d1e9f1943a2e39f7e76c60ef2e4f8f4282b03dacc653524f8f6b301e5a53c835b85adb2b55c2c4d977aced22667c8036bc2dd9c7d03e089655d4d70fbbf79c49cea2bfa1377f3aef1c89cef68aca903aa9bee501ebf88c493c18926f6ec32f3690636a52ac3a3ad7ccd97cc764e817c9364597e99ea3a98a07e3c33f896c0f9bdbe27b77d9fdf8f2698170663c6c969b61b2bddaa6323a2167fe1b3d2f7e090b2b8b45c267bcbcad144d72c2d015968c9b23adae215a88995ebf9220b012ac6eb9aa05f2b7af5a4370042d22f0b50c60fb3be59f010000ffff90ef4f8158020000", - "e141e2657bd9e59d6741ab94b976c8d9": "1f8b08000000000000ff4ccccf4ac0300c06f073fb14a587a120b3dd86caa02878f5e84d446a1b59b15d679afae7eda53a7497e4fb7e813cdcc6002b3d72e6cb2a8c909833cd6ddc90db4ef470d9ab5ef57ad6e3a82e4ecf5fd1522e4f1e52bea690205732ba7408d6dfffd70f0c0487be592c3fdd1056e86276e62e3b1b3bb7b40b994a2f57e9793a6b5b72661d857710460c8ab3e0638b7a8ffb5b61e4b448cede2ae0d79f093928958ae40c3ec11d78dc99d0ae079e7ef93b0000fffff662d40406010000", - "e14aca05bd4c126f7220c63941266e17": "1f8b08000000000000ff5256567049cdcde7e2525656562833d433d033e032d45378b2a3ebf9aefd4fbbe6bf68de5b5151c105080000ffff79fbc41327000000", - "e2903d19bafc045ba9a6890ea9a43a52": "1f8b08000000000000ffe2525608c9c82c56c82c56485408f1f7f55148c94f2ecd4dcd2bd15370cacfcfade34a49cdcd77ad28c82c4a55b055503232c950e202040000ffffc36ed62935000000", - "e37cb3fefed2bf7aa681a848a63af049": "1f8b08000000000000ff4ccc41cac2400c05e0f5e414610e5092fefc28822bafe04e5c0c4d16814ea74ca35e5fa22ebacbfb1e2fb7cb6cbaf81dd252aae219b3686d19d2da9bb7c83ead195211e991783c0c34d0c027e691399ac9ed19cb7f826432c7c90449accc57abda1efe1912d52d43ea5a64c7e38f5fdd5c77fef77544c4f8b96b8eb46578070000ffff6f600d93b7000000", - "e4ce7202d112d82a5e014831a14d1ba5": "1f8b08000000000000ff2a484cce4e4c4f55c8cd4f49cde1e2d2d757f02e4a2cc92f56c848cdc9c957c80673f4b84a2a0b526132c52545a5c9250ad55c9c1e6035c525459979e95cb55c10558e452599c939a948ca3c5d1432f34acc4cb8389df3f34a52f34a605a381d4b4b32f28be02600020000ffffcb8696638d000000", - "e5e2572a852f6474077b82fb7793df25": "1f8b08000000000000ff8c534d6edb3c105d8ba71813f802ea834bed5378d126818bb6f9419ca0cb82924734618a54a99163c0f0317a83163d40d73d4e7a8e8294edba08fab3304c7186efcd7b7c6c55b5541a6141d432669ad60702c132ee908ab8c919cbda12f866232ffdfc26606dd6dbed6623af5483db6da15ac359f6dbb27184c1295b347e8e36766a438bbe94956f0aed9f2d8322df15bbbf76a98bcabbba68955573e3fea5df7acd595636f0d7cebda4a2b46a8e8dea08036739632b15a05b55d096f21c1b3fc3b0c2c05851c0153e80c307505036d0a57dc9eade55b122ba5f4fe420d069e310fe2f1b79919663c010e2cf871c362c8b54826559556b281b391c3cf3ae363aee12ec94cbbbebcbb72ccb5966ea04313954a64882471d927c63792eef5da342b750569c54943f4feda309386323631690fae058b63dc2aa688019f88f3122af38a96afd47a0e8d6043a96ed044fa29873ac556f69c01c306276e42d6a13bd8e4ebdbcdc9571674f17253a43b7bea7c376ceb261cee153ce48051239db8fb065c3251c1f3c323d398df2c6382d5ae3e2181a4e2780721a7cdf0a5e1c02ca7396456d5a4e2fee042fbac8c4c7b0f00f773ed1e651f09e30a2898ad689eccc3bc235253653c3fbe1ae4f27314b037945eb71b4efa997d66b7911532178c41c1222fe5be53ca1e4290c6bf9a2f481de195acc4851df8974edc33afa682abc776aa58c55a5c5fda44501b8564d6b116a1fd2d386801f7aec0816cacded21c43f558aea89a465947292dead7c935e511cfc155aeb4f814fbd554ec3e3a72fdfbf7e7cfcfc0d46a3111fa77054f2f5ecfa4a2c07e96cfb230000ffffb57309ca64040000", - "ece08322e1d8a72e1c014ac64ace18a9": "1f8b08000000000000ff94924f8bdb3010c5cf9a4f3135b4d8c1b50fa517971c52d8deba1452e859916765115b12e3719262fcdd8becf42f856e0f466678f39bf78689da9cb5256c1d801b6260c11c54668217ba49062a13375006a0b279ae3e86f613d393bb2dcb3c578f7aa065a99d1762affb7a24be38b389ad936e3a55260cb50dafcfac258cf5fd89675bf7c166a04e03fe53e949ea4e24d6a75eb734e85188b3674c487d1c4d7dd5dc92cfa000a86b1b1a4b9e580be1a64509a1c7ab6302f91a090f31e2283c19c119d47831b8bbe7aa8edb0b2ad9c1dd69a81ebc759e40598e0677dba455460c0bc0d3e40d3ed2f51063fe375289dd2f9812ed1f8c02731d23ee0e319668fa30d287444cd8bc289198d317b8984125e11e5f1d629c4125df0d8e1753825add36d8a5dfe4b3415b825a40fd04eeefc8945819b99568b437d463b3c7fb25545f9c749fdd406192fc7bedbd3667cb61f26db2f3e6ed2edd4a7524137c5b8052ee69f5d8ecd156c76e92365c7d6ee456bc5beb2ff6e85dbf0e557db0d543ca9267c9e4912f3f3ab68cf9cb4b91ad991379f98ddefd073d6de339f46d0379b1ae8a4926f6b0c0b7000000fffffef7822e2f030000", - "ef3624e258287c266b0982f7c486d27b": "1f8b08000000000000ff94924d6fdb300c86cfd2af200c2c90324fbe17c865ed801dda2c6856ecacc8b423d49654594e1a18feef83fcd1a6cbba8f8325987a45be8f4827d5a32c111af407ad90525d3beb03304a12654dc0e790509214754828256e0749d7893b9b6f3c16fab9efbb4eac658d7d9f49a7a3f2bd636d027a23ab2c9736ea4a1df6ed4e285b67a5fdf4e865b04d366deeb1cc943545e66425736d62e9b7172a69cacc791becae2d32174e0e9b0c6b174e17b96d596176d41e13ca293d480f1b6f0f3a470f2b8871b1c6e316035be3311d039fb5c999c123733b7183b5dda23fa0e729c4d8723b3e15e79cd22c83e9777e4141a399d768f0ad0ad0512215c072021277d251924b0bb9b4e2465ada0fb9d6788c3540ced9409a1c3c86d61b418bd6a82861f97c8d036b603694822a206a184f01bd8f9ff53cd66e60058b49d65142a4ba0258cc66be7fbbbbedfa9492e8e80af294929e1255c00a1a715dd9062989f95630dff82183dab3443a57692583b646045b57490a8d908a53325a9ea8b6f2f415abca42e99d821c6b3bd89c80ce08f88b94a9f00cd3fc89eb714fc1e3132cdd4e0c927b7ce2c03cbaea04cba1f5e24b5c7f651f15aba17767324e495107b1f1da848225fbc1e0872619aa0c43fb1ec6c3fd2decea7fe378b8bffd5f94d760e3de63599c8b624387bc265cc10492c0c7178eb19d97b0adaffe0cbcd1a6041797b047f0d8d8d6c701bfc48dcadf73e29bde706017ad7a418b95617176dcf5719ee2a8cff9f9e46c184a50c3fa376f8396c51a3dfd190000ffff35636a08ee040000", - "f00e9b617b63f2dc20a592cf7aeb0f8a": "1f8b08000000000000ff8c90316ec3300c45e7f0148427bb35a4bd5bba756890a11750644661638b024d27050cdfbdb0d3a163d6c7f781cfef3dbe9e26ee3bbcb312e76f8a06dee3d785f0c12d241cc295461c2725b40be168d30979c42cb649869cb7c39973e81f39075042bc8644d831000f45d4b0865d35cfee53baa3d2997f96659edd210cb42c9eb391e6d0fb2e48f58c3792de38d2d32ea94f5a6205b0ab12db653ab928834f22a927bffe5f4103e07d92b744993418e15583c988b6ed03e72947fcc86cfb52ea06eb977d292daeb46e5a2455d10667d8959039d66bc4bdaf6bd45d107754b97147dae25ff37f642de60e746ff140f77d294d030bfc060000ffffe672418b9d010000", - "f02c908885e29d26bfb4de9c33c5e4d1": "1f8b08000000000000ff8c545d6bdb30147db67ec59da0452aa9bd87b1878c3c94ac94b2252b74dbcb184595af55117d18e5a64d08feef438e937e6c747d926cdf73cebde71edc2abd5006a1569131ebdb9808042bb88e81704d9c15bcf1c4192bf8765bce627d95b0b1ebaedb6ecbb9f2d875950d84292857f958a3cb0863e96e755beae82b134f1749515c56c3d12e4ca595bec3caa3ef2f6f42c4d054ad72aab6e12df52e1ace24635565e2d860c0a40861570014a30383c16b469b16e1c66be887689446d8b2a2aac0eb319c2e703359e0e62c119ce6ca894162c534377d96c86a8742c3e05439dd9d23b07566fbf8418238e92d2987e211604a31c97f0be0bab509277559a38fe7fd032bceeafaad72235089e0b9a2048129bd22cb8acfe890f6fcbdd67f667a42d831d6ac8286393ecca64282f01a4ef66b2d67c36504ba815c2764efc080ce46dfab94d356e8c6c001378da1b126bf2518565e7eff36fbca0ac90adbf41493c3970b24c10f588ade7159fe085ea5e59d72e25893fcd443de4d205897558b84b44a8115dd133e4d3baaa9b318e82947d616c7ba31af12790d13781c628e0f3b0c2b740393617ed87a5d4e5d5ca2901ddbc3f7368a1a4e6a15255cd9606653a169fd7213cffccf2d1cfaaf4bafcb6ba48c1ac1f1a1914b42bffd829b31f0d606c347f053b9158ee1d7efdb0da1e06d0c86cb11f49153646318c3fbeeef595d34e579d615f9e7107aadabcbf985dc75238eee25eff72b7b3f5eccb68b9b78ccd192920d2633ef2aa1f1545eb7c9066a0457896e8e6a9e839763f6270000ffffb4a95a77a6040000", - "f33c13a2bed49bfb098a354f3fb3d263": "1f8b08000000000000ff4ccccf4ac0300c06f073fb14a587a120b3dd86caa02878f5e84d446a1b59b15d679afae7eda53a7497e4fb7e813cdcc6002b3d72e6cb2a8c909833cd6ddc90db4ef470d9ab5ef57ad6e3a82e4ecf5fd1522e4f1e52bea690205732ba7408d6dfffd70f0c0487be592c3fdd1056e86276e62e3b1b3bb7b40b994a2f57e9793a6b5b72661d857710460c8ab3e0638b7a8ffb5b61e4b448cede2ae0d79f093928958ae40c3ec11d78dc99d0ae079e7ef93b0000fffff662d40406010000", - "f44350500991f17cec6420d8300b4f3d": "1f8b08000000000000ff5cccc10ac2300cc6f173f214230f505a058782275fc19b78084b0e81761d5dd4d7970ac2d8f1ff83ef7bdcb2e9ec4f84998b0ed781444b2584a555afbd7d5a0881455aaf7418430c31a44b3a1dc7332198e4be4b118127b7f73fc438dfad687df96fb812425396bd7d9ab9eeb19f6e2dae84df000000ffffd60a1b01ab000000", - "f5322c592d97a1cdc37376bd6c2bda44": "1f8b08000000000000ff2a2d4e55c82e4a2cc92f8e4f49cdcdb7e6e2f2f40b760d0a51f0f40bf157482c2ac94cce492dd648c84c49d0514828c92cc9494dd0540873f409750d56d030d45150078ba96b5a7301020000ffffb60f97194b000000", - "f8007e4822e340ca304d913fcd9d783b": "1f8b08000000000000ff8c914daedb201485c7b08a5b0f22a852584095512b75d2a4557f1640c83541b1015d885dc9f2de2b6ca7cd9b3cbd0142e25cbe730e24636fc621384a9673dfa748050467e90ccd34a963bc7c276cfd9f799e2675323dceb336c9379cb3c6f972bd9f958dbd76f1c38d4c89596f5bba396d636875329db9f8d0bc613e60d194ac1e0d5d30345c72ae359c7084802398252364a40149f1f61e6cd5441e2ca4b3fa8c7dfcb96812c498e1fd4a51ebd91e90a8ae481226ce0643b525b3ad8317839f6268bdab4a812dbafaf5edf89533c9996f17cce19ff2058b686a2c5562df3552fd0ebda17c359dd8d9223f2ee3ef0e107c575d1961b953e06c7e62d9b26256ff6746f5153bdbba57416386c3a3c309c715b35eabdfa87ea0f3b920fd7f2031e6adad907bc8839595b2dff254b1182a42f287cdccff060000ffff732d9b0b27020000", - "fd07d10d1e374aa9cdc1a9b1189e2f05": "1f8b08000000000000ff5cccc10ac2300cc6f173f214230f505a058782275fc19b78084b0e81761d5dd4d7970ac2d8f1ff83ef7bdcb2e9ec4f84998b0ed781444b2584a555afbd7d5a0881455aaf7418430c31a4cbe9389e09c124f7598a083cb9bdff21c6f96e45ebcb7fbb95109ab2ecedd3cc758ffd746b7125fc060000ffffe2756aadaa000000", - "fda1b2ac9058723473a3ae92bef45575": "1f8b08000000000000ff5cccc10ac2300cc6f173f214230f505a058782275fc19b78084b0e81761d5dd4d7970ac2d8f1ff83ef7bdcb2e9ec4f84998b0ed781444b2584a555afbd7d5a0881455aaf7418430c31a44b3a1dc7332198e4be4b118127b7f73fc438dfad687df96fb812425396bd7d9ab9eeb19f6e2dae84df000000ffffd60a1b01ab000000", - }) - if err != nil { - panic(err) - } - g.DefaultResolver = hgr - - func() { - b := packr.New("all", "./templates/all") - b.SetResolver("CHANGELOG.md", packr.Pointer{ForwardBox: gk, ForwardPath: "4bafcbfe7d0faa99ecff3bd27c22d83f"}) - b.SetResolver("OWNERS", packr.Pointer{ForwardBox: gk, ForwardPath: "09569e3bf810225e525835d677053b1b"}) - b.SetResolver("README.md", packr.Pointer{ForwardBox: gk, ForwardPath: "b2ab5cb21b5a72490b36cbd46cdfc102"}) - b.SetResolver("api/api.proto", packr.Pointer{ForwardBox: gk, ForwardPath: "bb9c567981605c082029d83839523a6c"}) - b.SetResolver("api/client.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "4f6064f875a8245d8fccaa9a7fa8b27c"}) - b.SetResolver("cmd/main.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "9b5deb2555f0ec914474cefd5111dc33"}) - b.SetResolver("configs/application.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "70005e6633581fbd609bfec1957cd292"}) - b.SetResolver("configs/db.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "2d07c6785bf36355d22a324ee7064328"}) - b.SetResolver("configs/grpc.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "c00dc165bbf20a017bf129c26a1f8e7d"}) - b.SetResolver("configs/http.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "360acf295547cde8b9a14a8d71f86bde"}) - b.SetResolver("configs/memcache.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "e37cb3fefed2bf7aa681a848a63af049"}) - b.SetResolver("configs/redis.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "2b96476f98cc5a0058f5fdd12ab432e1"}) - b.SetResolver("go.mod.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "75fe549e4c6ec53880d899e12f303d4d"}) - b.SetResolver("internal/dao/dao.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "67cc9f9db028b8b028595a334de05c39"}) - b.SetResolver("internal/dao/dao_test.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "56df8426de024acea28c177fdbae2d79"}) - b.SetResolver("internal/dao/db.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "deae3975927265bbe2e86537858be5d7"}) - b.SetResolver("internal/dao/mc.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "91350508efec72331fa05cfb386fe8d3"}) - b.SetResolver("internal/dao/redis.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "66133ffef1d9468dc3772e2c76e5a8d9"}) - b.SetResolver("internal/dao/wire.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "d27273567e8a0cdf9c0f35717b89ff8d"}) - b.SetResolver("internal/di/app.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "ece08322e1d8a72e1c014ac64ace18a9"}) - b.SetResolver("internal/di/wire.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "0a6bc3e72bc7017e18eb65cbe3145be8"}) - b.SetResolver("internal/model/model.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "07b6987724e01017288e376e48d717a4"}) - b.SetResolver("internal/server/grpc/server.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "5f24168e1abae155c721ef6fa09bc429"}) - b.SetResolver("internal/server/http/server.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "c1d37027fe90bb3a8ed6d3857637d326"}) - b.SetResolver("internal/service/service.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "2bec6c6ed25a4c6dcdb30f32376e8814"}) - b.SetResolver("test/0_db.sql", packr.Pointer{ForwardBox: gk, ForwardPath: "9a2d5bf0f7319185c80da50bc1e9bb25"}) - b.SetResolver("test/1_data.sql", packr.Pointer{ForwardBox: gk, ForwardPath: "f5322c592d97a1cdc37376bd6c2bda44"}) - b.SetResolver("test/application.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "8853489c373c278aef6338d0962fd746"}) - b.SetResolver("test/db.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "f33c13a2bed49bfb098a354f3fb3d263"}) - b.SetResolver("test/docker-compose.yaml", packr.Pointer{ForwardBox: gk, ForwardPath: "344d3e90de055e3f730ec93c8d518442"}) - b.SetResolver("test/grpc.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "519277b78f6f840309ab0251f7b54419"}) - b.SetResolver("test/http.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "21390808875e3972b5fb30ef533f7595"}) - b.SetResolver("test/memcache.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "50c9e9a3056e1c5519a3545096c6cd2e"}) - b.SetResolver("test/redis.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "d48e95472ef22ab5c6e49c918ff4c7f7"}) - }() - - - func() { - b := packr.New("grpc", "./templates/grpc") - b.SetResolver("CHANGELOG.md", packr.Pointer{ForwardBox: gk, ForwardPath: "45b4b7a3d09ea9195265478d55919d04"}) - b.SetResolver("OWNERS", packr.Pointer{ForwardBox: gk, ForwardPath: "0babbb2bdef8b27731b8c6e569f74aa9"}) - b.SetResolver("README.md", packr.Pointer{ForwardBox: gk, ForwardPath: "b264df098d6769dab4f670828910fa60"}) - b.SetResolver("api/api.proto", packr.Pointer{ForwardBox: gk, ForwardPath: "7d1ce2614230661551a2feabde69da0a"}) - b.SetResolver("api/client.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "2f0ce61d7311ba883ba12633891e37a5"}) - b.SetResolver("cmd/main.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "0a985fe02d21f9d997ee7c01e1f60f82"}) - b.SetResolver("configs/application.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "e2903d19bafc045ba9a6890ea9a43a52"}) - b.SetResolver("configs/db.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "bac80cf86d8252f4a488b221da8cfe80"}) - b.SetResolver("configs/grpc.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "34564973d460772fafff3026df19a783"}) - b.SetResolver("configs/memcache.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "17e45e0f3497be1d7b615abf452cafa3"}) - b.SetResolver("configs/redis.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "fd07d10d1e374aa9cdc1a9b1189e2f05"}) - b.SetResolver("go.mod.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "240e2e99283e50af1f153a929e3d1116"}) - b.SetResolver("internal/dao/dao.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "5700a76799052e1b831d6826b3f89cc6"}) - b.SetResolver("internal/dao/dao_test.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "2e6074098bbcae09bb1bbbaf5c9c65d7"}) - b.SetResolver("internal/dao/db.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "01947c6dd922af83263605f0243198da"}) - b.SetResolver("internal/dao/mc.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "f02c908885e29d26bfb4de9c33c5e4d1"}) - b.SetResolver("internal/dao/redis.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "d11e25ce5264ce1ed0f0863d28fa7113"}) - b.SetResolver("internal/dao/wire.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "0c5ba5fa8837f2def63c05cc79c53394"}) - b.SetResolver("internal/di/app.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "ca7acd4efcb673fe4e3505be171752b7"}) - b.SetResolver("internal/di/wire.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "f00e9b617b63f2dc20a592cf7aeb0f8a"}) - b.SetResolver("internal/model/model.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "1a41024e60169dfd892e57d36264a7e3"}) - b.SetResolver("internal/server/grpc/server.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "f8007e4822e340ca304d913fcd9d783b"}) - b.SetResolver("internal/service/service.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "ef3624e258287c266b0982f7c486d27b"}) - b.SetResolver("test/0_db.sql", packr.Pointer{ForwardBox: gk, ForwardPath: "47c5a1821e7054758d52c8004a7033de"}) - b.SetResolver("test/1_data.sql", packr.Pointer{ForwardBox: gk, ForwardPath: "79906f2027d86895882c07b014541636"}) - b.SetResolver("test/application.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "69622533133ad8df518fe01a940713ec"}) - b.SetResolver("test/db.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "5ba5532a198294fc750ba2f01006d39b"}) - b.SetResolver("test/docker-compose.yaml", packr.Pointer{ForwardBox: gk, ForwardPath: "ad071804417ba1d9b2afe916414fa2e0"}) - b.SetResolver("test/grpc.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "d6bf2490901d7994259b59036e5ee787"}) - b.SetResolver("test/memcache.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "7a430917a7aa07186808d95369458c4e"}) - b.SetResolver("test/redis.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "fda1b2ac9058723473a3ae92bef45575"}) - }() - - - func() { - b := packr.New("http", "./templates/http") - b.SetResolver("CHANGELOG.md", packr.Pointer{ForwardBox: gk, ForwardPath: "e14aca05bd4c126f7220c63941266e17"}) - b.SetResolver("OWNERS", packr.Pointer{ForwardBox: gk, ForwardPath: "a9cbe2e1020ca7be3617cd6d83fe88f5"}) - b.SetResolver("README.md", packr.Pointer{ForwardBox: gk, ForwardPath: "ddd2d8d002a9cac8b4c670cb20f96f3c"}) - b.SetResolver("api/api.proto", packr.Pointer{ForwardBox: gk, ForwardPath: "ae6a3278e56d79251f55ea74dcd489a8"}) - b.SetResolver("api/client.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "59b3f5316758be0d8bdfb8150516578a"}) - b.SetResolver("cmd/main.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "47f7fee79587764108735a0e37a3f1cb"}) - b.SetResolver("configs/application.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "450e380f3fbd9366ee660538f5f4eb47"}) - b.SetResolver("configs/db.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "7c8bcba5a2dfe4aa3bb18e6ee7c91140"}) - b.SetResolver("configs/http.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "3deb9e59e02e8c5bc6ba59a66141508f"}) - b.SetResolver("configs/memcache.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "7b856fed41dcc4df47597de661eb9042"}) - b.SetResolver("configs/redis.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "7ba5a6706dc47731b76a9fccdc393d28"}) - b.SetResolver("go.mod.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "d391eec375a18e4524decc911a55cb37"}) - b.SetResolver("internal/dao/dao.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "c84c3b26a959752d7eb926d02ce46ddd"}) - b.SetResolver("internal/dao/dao_test.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "710a08c10a56dd632085fe2723c2fdca"}) - b.SetResolver("internal/dao/db.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "28edc15b141434022bafc70e865e28c3"}) - b.SetResolver("internal/dao/mc.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "596e234fc3c0d58dd019eff083dc005f"}) - b.SetResolver("internal/dao/redis.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "d9e24a8fbdaa656b04d082a11a16fab2"}) - b.SetResolver("internal/dao/wire.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "dc9ddbcd9cdbb28fa4632260727f292c"}) - b.SetResolver("internal/di/app.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "991f8014a5f12bbf1f53265c7d62afcb"}) - b.SetResolver("internal/di/wire.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "2c5e0d8471a941cbc1b0532422997425"}) - b.SetResolver("internal/model/model.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "e4ce7202d112d82a5e014831a14d1ba5"}) - b.SetResolver("internal/server/http/server.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "e5e2572a852f6474077b82fb7793df25"}) - b.SetResolver("internal/service/service.go.tmpl", packr.Pointer{ForwardBox: gk, ForwardPath: "0800ba3c38cf5e9bf163d71215211839"}) - b.SetResolver("test/0_db.sql", packr.Pointer{ForwardBox: gk, ForwardPath: "c5dc8e7e6833a138a5738b8d0eaa4040"}) - b.SetResolver("test/1_data.sql", packr.Pointer{ForwardBox: gk, ForwardPath: "bcd84391b0e45f9d8641de9b0f6bdc98"}) - b.SetResolver("test/application.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "3fcb8456a23b643f9cd2b5df95e3e497"}) - b.SetResolver("test/db.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "e141e2657bd9e59d6741ab94b976c8d9"}) - b.SetResolver("test/docker-compose.yaml", packr.Pointer{ForwardBox: gk, ForwardPath: "91045d262e11302068a4d78ac8a2912e"}) - b.SetResolver("test/http.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "7b2c3c96383cfe733a24c5ae20b892e8"}) - b.SetResolver("test/memcache.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "1dc54a258d1b22873bd8a065ef5f5136"}) - b.SetResolver("test/redis.toml", packr.Pointer{ForwardBox: gk, ForwardPath: "f44350500991f17cec6420d8300b4f3d"}) - }() - - return nil -}() diff --git a/tool/kratos-gen-project/project.go b/tool/kratos-gen-project/project.go deleted file mode 100644 index b9025d180..000000000 --- a/tool/kratos-gen-project/project.go +++ /dev/null @@ -1,96 +0,0 @@ -package main - -import ( - "bytes" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - "text/template" - - "github.com/gobuffalo/packr/v2" -) - -// project project config -type project struct { - // project name - Name string - // mod prefix - ModPrefix string - // project dir - path string - none bool - onlyGRPC bool - onlyHTTP bool -} - -var p project - -//go:generate packr2 -func create() (err error) { - box := packr.New("all", "./templates/all") - if p.onlyHTTP { - box = packr.New("http", "./templates/http") - } else if p.onlyGRPC { - box = packr.New("grpc", "./templates/grpc") - } - if err = os.MkdirAll(p.path, 0755); err != nil { - return - } - for _, name := range box.List() { - if p.ModPrefix != "" && name == "go.mod.tmpl" { - continue - } - tmpl, _ := box.FindString(name) - i := strings.LastIndex(name, string(os.PathSeparator)) - if i > 0 { - dir := name[:i] - if err = os.MkdirAll(filepath.Join(p.path, dir), 0755); err != nil { - return - } - } - if strings.HasSuffix(name, ".tmpl") { - name = strings.TrimSuffix(name, ".tmpl") - } - if err = write(filepath.Join(p.path, name), tmpl); err != nil { - return - } - } - - if err = generate("./..."); err != nil { - return - } - if err = generate("./internal/dao/wire.go"); err != nil { - return - } - return -} - -func generate(path string) error { - cmd := exec.Command("go", "generate", "-x", path) - cmd.Dir = p.path - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return cmd.Run() -} - -func write(path, tpl string) (err error) { - data, err := parse(tpl) - if err != nil { - return - } - return ioutil.WriteFile(path, data, 0644) -} - -func parse(s string) ([]byte, error) { - t, err := template.New("").Parse(s) - if err != nil { - return nil, err - } - var buf bytes.Buffer - if err = t.Execute(&buf, p); err != nil { - return nil, err - } - return buf.Bytes(), nil -} diff --git a/tool/kratos-gen-project/templates/all/CHANGELOG.md b/tool/kratos-gen-project/templates/all/CHANGELOG.md deleted file mode 100644 index c39acc0e2..000000000 --- a/tool/kratos-gen-project/templates/all/CHANGELOG.md +++ /dev/null @@ -1,4 +0,0 @@ -## Demo - -### v1.0.0 -1. 上线功能xxx diff --git a/tool/kratos-gen-project/templates/all/OWNERS b/tool/kratos-gen-project/templates/all/OWNERS deleted file mode 100644 index c1e28c554..000000000 --- a/tool/kratos-gen-project/templates/all/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -# Author -# Reviewer diff --git a/tool/kratos-gen-project/templates/all/README.md b/tool/kratos-gen-project/templates/all/README.md deleted file mode 100644 index e43f93fc3..000000000 --- a/tool/kratos-gen-project/templates/all/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Demo - -## 项目简介 -1. diff --git a/tool/kratos-gen-project/templates/all/api/api.proto b/tool/kratos-gen-project/templates/all/api/api.proto deleted file mode 100644 index 0ba83f9f5..000000000 --- a/tool/kratos-gen-project/templates/all/api/api.proto +++ /dev/null @@ -1,34 +0,0 @@ -// 定义项目 API 的 proto 文件 可以同时描述 gRPC 和 HTTP API -// protobuf 文件参考: -// - https://developers.google.com/protocol-buffers/ -syntax = "proto3"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/empty.proto"; -import "google/api/annotations.proto"; - -// package 命名使用 {appid}.{version} 的方式, version 形如 v1, v2 .. -package demo.service.v1; - -// NOTE: 最后请删除这些无用的注释 (゜-゜)つロ - -option go_package = "api"; -option (gogoproto.goproto_getters_all) = false; - -service Demo { - rpc Ping(.google.protobuf.Empty) returns (.google.protobuf.Empty); - rpc SayHello(HelloReq) returns (.google.protobuf.Empty); - rpc SayHelloURL(HelloReq) returns (HelloResp) { - option (google.api.http) = { - get: "/kratos-demo/say_hello" - }; - }; -} - -message HelloReq { - string name = 1 [(gogoproto.moretags) = 'form:"name" validate:"required"']; -} - -message HelloResp { - string Content = 1 [(gogoproto.jsontag) = 'content']; -} diff --git a/tool/kratos-gen-project/templates/all/api/client.go.tmpl b/tool/kratos-gen-project/templates/all/api/client.go.tmpl deleted file mode 100644 index f3a3b2083..000000000 --- a/tool/kratos-gen-project/templates/all/api/client.go.tmpl +++ /dev/null @@ -1,25 +0,0 @@ -package api -import ( - "context" - "fmt" - - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - - "google.golang.org/grpc" -) - -// AppID . -const AppID = "TODO: ADD APP ID" - -// NewClient new grpc client -func NewClient(cfg *warden.ClientConfig, opts ...grpc.DialOption) (DemoClient, error) { - client := warden.NewClient(cfg, opts...) - cc, err := client.Dial(context.Background(), fmt.Sprintf("discovery://default/%s", AppID)) - if err != nil { - return nil, err - } - return NewDemoClient(cc), nil -} - -// 生成 gRPC 代码 -//go:generate kratos tool protoc --grpc --bm api.proto diff --git a/tool/kratos-gen-project/templates/all/cmd/main.go.tmpl b/tool/kratos-gen-project/templates/all/cmd/main.go.tmpl deleted file mode 100644 index de1aefec2..000000000 --- a/tool/kratos-gen-project/templates/all/cmd/main.go.tmpl +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "flag" - "os" - "os/signal" - "syscall" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/di" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -func main() { - flag.Parse() - log.Init(nil) // debug flag: log.dir={path} - defer log.Close() - log.Info("{{.Name}} start") - paladin.Init() - _, closeFunc, err := di.InitApp() - if err != nil { - panic(err) - } - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT) - for { - s := <-c - log.Info("get a signal %s", s.String()) - switch s { - case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: - closeFunc() - log.Info("{{.Name}} exit") - time.Sleep(time.Second) - return - case syscall.SIGHUP: - default: - return - } - } -} diff --git a/tool/kratos-gen-project/templates/all/configs/application.toml b/tool/kratos-gen-project/templates/all/configs/application.toml deleted file mode 100644 index a42ca6e63..000000000 --- a/tool/kratos-gen-project/templates/all/configs/application.toml +++ /dev/null @@ -1,3 +0,0 @@ - -# This is a TOML document. Boom~ -demoExpire = "24h" diff --git a/tool/kratos-gen-project/templates/all/configs/db.toml b/tool/kratos-gen-project/templates/all/configs/db.toml deleted file mode 100644 index 840bd2e78..000000000 --- a/tool/kratos-gen-project/templates/all/configs/db.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - addr = "127.0.0.1:3306" - dsn = "{user}:{password}@tcp(127.0.0.1:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - readDSN = ["{user}:{password}@tcp(127.0.0.2:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8","{user}:{password}@tcp(127.0.0.3:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8,utf8mb4"] - active = 20 - idle = 10 - idleTimeout ="4h" - queryTimeout = "200ms" - execTimeout = "300ms" - tranTimeout = "400ms" diff --git a/tool/kratos-gen-project/templates/all/configs/grpc.toml b/tool/kratos-gen-project/templates/all/configs/grpc.toml deleted file mode 100644 index 40339cecc..000000000 --- a/tool/kratos-gen-project/templates/all/configs/grpc.toml +++ /dev/null @@ -1,3 +0,0 @@ -[Server] - addr = "0.0.0.0:9000" - timeout = "1s" diff --git a/tool/kratos-gen-project/templates/all/configs/http.toml b/tool/kratos-gen-project/templates/all/configs/http.toml deleted file mode 100644 index 951a03197..000000000 --- a/tool/kratos-gen-project/templates/all/configs/http.toml +++ /dev/null @@ -1,3 +0,0 @@ -[Server] - addr = "0.0.0.0:8000" - timeout = "1s" diff --git a/tool/kratos-gen-project/templates/all/configs/memcache.toml b/tool/kratos-gen-project/templates/all/configs/memcache.toml deleted file mode 100644 index 0f2b900d3..000000000 --- a/tool/kratos-gen-project/templates/all/configs/memcache.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:11211" - active = 50 - idle = 10 - dialTimeout = "100ms" - readTimeout = "200ms" - writeTimeout = "300ms" - idleTimeout = "80s" diff --git a/tool/kratos-gen-project/templates/all/configs/redis.toml b/tool/kratos-gen-project/templates/all/configs/redis.toml deleted file mode 100644 index d07950de0..000000000 --- a/tool/kratos-gen-project/templates/all/configs/redis.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:6379" - idle = 10 - active = 10 - dialTimeout = "1s" - readTimeout = "1s" - writeTimeout = "1s" - idleTimeout = "10s" diff --git a/tool/kratos-gen-project/templates/all/go.mod.tmpl b/tool/kratos-gen-project/templates/all/go.mod.tmpl deleted file mode 100644 index fc8719d34..000000000 --- a/tool/kratos-gen-project/templates/all/go.mod.tmpl +++ /dev/null @@ -1,12 +0,0 @@ -module {{.Name}} - -go 1.13 - -require ( - github.com/go-kratos/kratos master - github.com/gogo/protobuf v1.2.1 - github.com/golang/protobuf v1.3.2 - golang.org/x/net v0.0.0-20190628185345-da137c7871d7 - google.golang.org/grpc v1.28.1 -) - diff --git a/tool/kratos-gen-project/templates/all/internal/dao/dao.go.tmpl b/tool/kratos-gen-project/templates/all/internal/dao/dao.go.tmpl deleted file mode 100644 index 81ea0b540..000000000 --- a/tool/kratos-gen-project/templates/all/internal/dao/dao.go.tmpl +++ /dev/null @@ -1,69 +0,0 @@ -package dao - -import ( - "context" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/cache/memcache" - "github.com/go-kratos/kratos/pkg/cache/redis" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/database/sql" - "github.com/go-kratos/kratos/pkg/sync/pipeline/fanout" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/google/wire" -) - -var Provider = wire.NewSet(New, NewDB, NewRedis, NewMC) - -//go:generate kratos tool genbts -// Dao dao interface -type Dao interface { - Close() - Ping(ctx context.Context) (err error) - // bts: -nullcache=&model.Article{ID:-1} -check_null_code=$!=nil&&$.ID==-1 - RawArticle(c context.Context, id int64) (*model.Article, error) -} - -// dao dao. -type dao struct { - db *sql.DB - redis *redis.Redis - mc *memcache.Memcache - cache *fanout.Fanout - demoExpire int32 -} - -// New new a dao and return. -func New(r *redis.Redis, mc *memcache.Memcache, db *sql.DB) (d Dao, cf func(), err error) { - return newDao(r, mc, db) -} - -func newDao(r *redis.Redis, mc *memcache.Memcache, db *sql.DB) (d *dao, cf func(), err error) { - var cfg struct{ - DemoExpire xtime.Duration - } - if err = paladin.Get("application.toml").UnmarshalTOML(&cfg); err != nil { - return - } - d = &dao{ - db: db, - redis: r, - mc: mc, - cache: fanout.New("cache"), - demoExpire: int32(time.Duration(cfg.DemoExpire) / time.Second), - } - cf = d.Close - return -} - -// Close close the resource. -func (d *dao) Close() { - d.cache.Close() -} - -// Ping ping the resource. -func (d *dao) Ping(ctx context.Context) (err error) { - return nil -} diff --git a/tool/kratos-gen-project/templates/all/internal/dao/dao_test.go.tmpl b/tool/kratos-gen-project/templates/all/internal/dao/dao_test.go.tmpl deleted file mode 100644 index 90849e012..000000000 --- a/tool/kratos-gen-project/templates/all/internal/dao/dao_test.go.tmpl +++ /dev/null @@ -1,40 +0,0 @@ -package dao - -import ( - "context" - "flag" - "os" - "testing" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/testing/lich" -) - -var d *dao -var ctx = context.Background() - -func TestMain(m *testing.M) { - flag.Set("conf", "../../test") - flag.Set("f", "../../test/docker-compose.yaml") - flag.Parse() - disableLich := os.Getenv("DISABLE_LICH") != "" - if !disableLich { - if err := lich.Setup(); err != nil { - panic(err) - } - } - var err error - if err = paladin.Init(); err != nil { - panic(err) - } - var cf func() - if d, cf, err = newTestDao();err != nil { - panic(err) - } - ret := m.Run() - cf() - if !disableLich { - _ = lich.Teardown() - } - os.Exit(ret) -} diff --git a/tool/kratos-gen-project/templates/all/internal/dao/db.go.tmpl b/tool/kratos-gen-project/templates/all/internal/dao/db.go.tmpl deleted file mode 100644 index b3bde3064..000000000 --- a/tool/kratos-gen-project/templates/all/internal/dao/db.go.tmpl +++ /dev/null @@ -1,30 +0,0 @@ -package dao - -import ( - "context" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/database/sql" -) - -func NewDB() (db *sql.DB, cf func(), err error) { - var ( - cfg sql.Config - ct paladin.TOML - ) - if err = paladin.Get("db.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - db = sql.NewMySQL(&cfg) - cf = func() {db.Close()} - return -} - -func (d *dao) RawArticle(ctx context.Context, id int64) (art *model.Article, err error) { - // get data from db - return -} diff --git a/tool/kratos-gen-project/templates/all/internal/dao/mc.go.tmpl b/tool/kratos-gen-project/templates/all/internal/dao/mc.go.tmpl deleted file mode 100644 index 44cfdf13b..000000000 --- a/tool/kratos-gen-project/templates/all/internal/dao/mc.go.tmpl +++ /dev/null @@ -1,48 +0,0 @@ -package dao - -import ( - "context" - "fmt" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/cache/memcache" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -//go:generate kratos tool genmc -type _mc interface { - // mc: -key=keyArt -type=get - CacheArticle(c context.Context, id int64) (*model.Article, error) - // mc: -key=keyArt -expire=d.demoExpire - AddCacheArticle(c context.Context, id int64, art *model.Article) (err error) - // mc: -key=keyArt - DeleteArticleCache(c context.Context, id int64) (err error) -} - -func NewMC() (mc *memcache.Memcache, cf func(), err error) { - var ( - cfg memcache.Config - ct paladin.TOML - ) - if err = paladin.Get("memcache.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - mc = memcache.New(&cfg) - cf = func() {mc.Close()} - return -} - -func (d *dao) PingMC(ctx context.Context) (err error) { - if err = d.mc.Set(ctx, &memcache.Item{Key: "ping", Value: []byte("pong"), Expiration: 0}); err != nil { - log.Error("conn.Set(PING) error(%v)", err) - } - return -} - -func keyArt(id int64) string { - return fmt.Sprintf("art_%d", id) -} diff --git a/tool/kratos-gen-project/templates/all/internal/dao/redis.go.tmpl b/tool/kratos-gen-project/templates/all/internal/dao/redis.go.tmpl deleted file mode 100644 index e3c962eee..000000000 --- a/tool/kratos-gen-project/templates/all/internal/dao/redis.go.tmpl +++ /dev/null @@ -1,32 +0,0 @@ -package dao - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/cache/redis" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -func NewRedis() (r *redis.Redis, cf func(), err error) { - var ( - cfg redis.Config - ct paladin.Map - ) - if err = paladin.Get("redis.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - r = redis.NewRedis(&cfg) - cf = func(){r.Close()} - return -} - -func (d *dao) PingRedis(ctx context.Context) (err error) { - if _, err = d.redis.Do(ctx, "SET", "ping", "pong"); err != nil { - log.Error("conn.Set(PING) error(%v)", err) - } - return -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/all/internal/dao/wire.go.tmpl b/tool/kratos-gen-project/templates/all/internal/dao/wire.go.tmpl deleted file mode 100644 index a00b2d39a..000000000 --- a/tool/kratos-gen-project/templates/all/internal/dao/wire.go.tmpl +++ /dev/null @@ -1,13 +0,0 @@ -// +build wireinject -// The build tag makes sure the stub is not built in the final build. - -package dao - -import ( - "github.com/google/wire" -) - -//go:generate kratos tool wire -func newTestDao() (*dao, func(), error) { - panic(wire.Build(newDao, NewDB, NewRedis, NewMC)) -} diff --git a/tool/kratos-gen-project/templates/all/internal/di/app.go.tmpl b/tool/kratos-gen-project/templates/all/internal/di/app.go.tmpl deleted file mode 100644 index 14e2d2a71..000000000 --- a/tool/kratos-gen-project/templates/all/internal/di/app.go.tmpl +++ /dev/null @@ -1,38 +0,0 @@ -package di - -import ( - "context" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/service" - - "github.com/go-kratos/kratos/pkg/log" - bm "github.com/go-kratos/kratos/pkg/net/http/blademaster" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" -) - -//go:generate kratos tool wire -type App struct { - svc *service.Service - http *bm.Engine - grpc *warden.Server -} - -func NewApp(svc *service.Service, h *bm.Engine, g *warden.Server) (app *App, closeFunc func(), err error){ - app = &App{ - svc: svc, - http: h, - grpc: g, - } - closeFunc = func() { - ctx, cancel := context.WithTimeout(context.Background(), 35*time.Second) - if err := g.Shutdown(ctx); err != nil { - log.Error("grpcSrv.Shutdown error(%v)", err) - } - if err := h.Shutdown(ctx); err != nil { - log.Error("httpSrv.Shutdown error(%v)", err) - } - cancel() - } - return -} diff --git a/tool/kratos-gen-project/templates/all/internal/di/wire.go.tmpl b/tool/kratos-gen-project/templates/all/internal/di/wire.go.tmpl deleted file mode 100644 index 9ef04b19b..000000000 --- a/tool/kratos-gen-project/templates/all/internal/di/wire.go.tmpl +++ /dev/null @@ -1,18 +0,0 @@ -// +build wireinject -// The build tag makes sure the stub is not built in the final build. - -package di - -import ( - "{{.ModPrefix}}{{.Name}}/internal/dao" - "{{.ModPrefix}}{{.Name}}/internal/service" - "{{.ModPrefix}}{{.Name}}/internal/server/grpc" - "{{.ModPrefix}}{{.Name}}/internal/server/http" - - "github.com/google/wire" -) - -//go:generate kratos t wire -func InitApp() (*App, func(), error) { - panic(wire.Build(dao.Provider, service.Provider, http.New, grpc.New, NewApp)) -} diff --git a/tool/kratos-gen-project/templates/all/internal/model/model.go.tmpl b/tool/kratos-gen-project/templates/all/internal/model/model.go.tmpl deleted file mode 100644 index b3fcf7985..000000000 --- a/tool/kratos-gen-project/templates/all/internal/model/model.go.tmpl +++ /dev/null @@ -1,12 +0,0 @@ -package model - -// Kratos hello kratos. -type Kratos struct { - Hello string -} - -type Article struct { - ID int64 - Content string - Author string -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/all/internal/server/grpc/server.go.tmpl b/tool/kratos-gen-project/templates/all/internal/server/grpc/server.go.tmpl deleted file mode 100644 index 4db767635..000000000 --- a/tool/kratos-gen-project/templates/all/internal/server/grpc/server.go.tmpl +++ /dev/null @@ -1,26 +0,0 @@ -package grpc - -import ( - pb "{{.ModPrefix}}{{.Name}}/api" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" -) - -// New new a grpc server. -func New(svc pb.DemoServer) (ws *warden.Server, err error) { - var ( - cfg warden.ServerConfig - ct paladin.TOML - ) - if err = paladin.Get("grpc.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Server").UnmarshalTOML(&cfg); err != nil { - return - } - ws = warden.NewServer(&cfg) - pb.RegisterDemoServer(ws.Server(), svc) - ws, err = ws.Start() - return -} diff --git a/tool/kratos-gen-project/templates/all/internal/server/http/server.go.tmpl b/tool/kratos-gen-project/templates/all/internal/server/http/server.go.tmpl deleted file mode 100644 index 3bec93a28..000000000 --- a/tool/kratos-gen-project/templates/all/internal/server/http/server.go.tmpl +++ /dev/null @@ -1,56 +0,0 @@ -package http - -import ( - "net/http" - - pb "{{.ModPrefix}}{{.Name}}/api" - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" - bm "github.com/go-kratos/kratos/pkg/net/http/blademaster" -) - -var svc pb.DemoServer - -// New new a bm server. -func New(s pb.DemoServer) (engine *bm.Engine, err error) { - var ( - cfg bm.ServerConfig - ct paladin.TOML - ) - if err = paladin.Get("http.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Server").UnmarshalTOML(&cfg); err != nil { - return - } - svc = s - engine = bm.DefaultServer(&cfg) - pb.RegisterDemoBMServer(engine, s) - initRouter(engine) - err = engine.Start() - return -} - -func initRouter(e *bm.Engine) { - e.Ping(ping) - g := e.Group("/{{.Name}}") - { - g.GET("/start", howToStart) - } -} - -func ping(ctx *bm.Context) { - if _, err := svc.Ping(ctx, nil); err != nil { - log.Error("ping error(%v)", err) - ctx.AbortWithStatus(http.StatusServiceUnavailable) - } -} - -// example for http request handler. -func howToStart(c *bm.Context) { - k := &model.Kratos{ - Hello: "Golang 大法好 !!!", - } - c.JSON(k, nil) -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/all/internal/service/service.go.tmpl b/tool/kratos-gen-project/templates/all/internal/service/service.go.tmpl deleted file mode 100644 index f3750cc69..000000000 --- a/tool/kratos-gen-project/templates/all/internal/service/service.go.tmpl +++ /dev/null @@ -1,57 +0,0 @@ -package service - -import ( - "context" - "fmt" - - pb "{{.ModPrefix}}{{.Name}}/api" - "{{.ModPrefix}}{{.Name}}/internal/dao" - "github.com/go-kratos/kratos/pkg/conf/paladin" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/wire" -) - -var Provider = wire.NewSet(New, wire.Bind(new(pb.DemoServer), new(*Service))) - -// Service service. -type Service struct { - ac *paladin.Map - dao dao.Dao -} - -// New new a service and return. -func New(d dao.Dao) (s *Service, cf func(), err error) { - s = &Service{ - ac: &paladin.TOML{}, - dao: d, - } - cf = s.Close - err = paladin.Watch("application.toml", s.ac) - return -} - -// SayHello grpc demo func. -func (s *Service) SayHello(ctx context.Context, req *pb.HelloReq) (reply *empty.Empty, err error) { - reply = new(empty.Empty) - fmt.Printf("hello %s", req.Name) - return -} - -// SayHelloURL bm demo func. -func (s *Service) SayHelloURL(ctx context.Context, req *pb.HelloReq) (reply *pb.HelloResp, err error) { - reply = &pb.HelloResp{ - Content: "hello " + req.Name, - } - fmt.Printf("hello url %s", req.Name) - return -} - -// Ping ping the resource. -func (s *Service) Ping(ctx context.Context, e *empty.Empty) (*empty.Empty, error) { - return &empty.Empty{}, s.dao.Ping(ctx) -} - -// Close close the resource. -func (s *Service) Close() { -} diff --git a/tool/kratos-gen-project/templates/all/test/0_db.sql b/tool/kratos-gen-project/templates/all/test/0_db.sql deleted file mode 100644 index e2cbccc19..000000000 --- a/tool/kratos-gen-project/templates/all/test/0_db.sql +++ /dev/null @@ -1,11 +0,0 @@ -create database kratos_demo; -use kratos_demo; - -CREATE TABLE `articles` ( - `id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT '主键ID', - `title` varchar(64) NOT NULL COMMENT '名称', - `mtime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '最后修改时间', - `ctime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `ix_mtime` (`mtime`) -) COMMENT='文章表'; diff --git a/tool/kratos-gen-project/templates/all/test/1_data.sql b/tool/kratos-gen-project/templates/all/test/1_data.sql deleted file mode 100644 index c7cb57167..000000000 --- a/tool/kratos-gen-project/templates/all/test/1_data.sql +++ /dev/null @@ -1,3 +0,0 @@ -use kratos_demo; - -INSERT INTO articles(`id`, `title`) VALUES (1, 'title'); diff --git a/tool/kratos-gen-project/templates/all/test/application.toml b/tool/kratos-gen-project/templates/all/test/application.toml deleted file mode 100644 index a42ca6e63..000000000 --- a/tool/kratos-gen-project/templates/all/test/application.toml +++ /dev/null @@ -1,3 +0,0 @@ - -# This is a TOML document. Boom~ -demoExpire = "24h" diff --git a/tool/kratos-gen-project/templates/all/test/db.toml b/tool/kratos-gen-project/templates/all/test/db.toml deleted file mode 100644 index 9c7bd8618..000000000 --- a/tool/kratos-gen-project/templates/all/test/db.toml +++ /dev/null @@ -1,8 +0,0 @@ -[Client] - dsn = "root:root@tcp(127.0.0.1:13306)/kratos_demo?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - active = 20 - idle = 10 - idleTimeout ="4h" - queryTimeout = "200ms" - execTimeout = "300ms" - tranTimeout = "400ms" diff --git a/tool/kratos-gen-project/templates/all/test/docker-compose.yaml b/tool/kratos-gen-project/templates/all/test/docker-compose.yaml deleted file mode 100644 index b044d4247..000000000 --- a/tool/kratos-gen-project/templates/all/test/docker-compose.yaml +++ /dev/null @@ -1,40 +0,0 @@ - version: "3.7" - services: - db: - image: mysql:5.6 - ports: - - 13306:3306 - environment: - - MYSQL_ROOT_PASSWORD=root - - TZ=Asia/Shanghai - volumes: - - .:/docker-entrypoint-initdb.d - command: [ - '--character-set-server=utf8', - '--collation-server=utf8_unicode_ci' - ] - healthcheck: - test: ["CMD", "mysqladmin" ,"ping", "--protocol=tcp"] - timeout: 20s - interval: 1s - retries: 20 - - redis: - image: redis - ports: - - 16379:6379 - healthcheck: - test: ["CMD", "redis-cli","ping"] - interval: 20s - timeout: 1s - retries: 20 - - memcached: - image: memcached - ports: - - 21211:11211 - healthcheck: - test: ["CMD", "echo", "stats", "|", "nc", "127.0.0.1", "11211"] - interval: 20s - timeout: 1s - retries: 20 diff --git a/tool/kratos-gen-project/templates/all/test/grpc.toml b/tool/kratos-gen-project/templates/all/test/grpc.toml deleted file mode 100644 index 40339cecc..000000000 --- a/tool/kratos-gen-project/templates/all/test/grpc.toml +++ /dev/null @@ -1,3 +0,0 @@ -[Server] - addr = "0.0.0.0:9000" - timeout = "1s" diff --git a/tool/kratos-gen-project/templates/all/test/http.toml b/tool/kratos-gen-project/templates/all/test/http.toml deleted file mode 100644 index 951a03197..000000000 --- a/tool/kratos-gen-project/templates/all/test/http.toml +++ /dev/null @@ -1,3 +0,0 @@ -[Server] - addr = "0.0.0.0:8000" - timeout = "1s" diff --git a/tool/kratos-gen-project/templates/all/test/memcache.toml b/tool/kratos-gen-project/templates/all/test/memcache.toml deleted file mode 100644 index ad2ce9e09..000000000 --- a/tool/kratos-gen-project/templates/all/test/memcache.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:21211" - active = 50 - idle = 10 - dialTimeout = "100ms" - readTimeout = "200ms" - writeTimeout = "300ms" - idleTimeout = "80s" diff --git a/tool/kratos-gen-project/templates/all/test/redis.toml b/tool/kratos-gen-project/templates/all/test/redis.toml deleted file mode 100644 index 371f243da..000000000 --- a/tool/kratos-gen-project/templates/all/test/redis.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:16379" - idle = 10 - active = 10 - dialTimeout = "1s" - readTimeout = "1s" - writeTimeout = "1s" - idleTimeout = "10s" diff --git a/tool/kratos-gen-project/templates/grpc/CHANGELOG.md b/tool/kratos-gen-project/templates/grpc/CHANGELOG.md deleted file mode 100644 index c39acc0e2..000000000 --- a/tool/kratos-gen-project/templates/grpc/CHANGELOG.md +++ /dev/null @@ -1,4 +0,0 @@ -## Demo - -### v1.0.0 -1. 上线功能xxx diff --git a/tool/kratos-gen-project/templates/grpc/OWNERS b/tool/kratos-gen-project/templates/grpc/OWNERS deleted file mode 100644 index c1e28c554..000000000 --- a/tool/kratos-gen-project/templates/grpc/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -# Author -# Reviewer diff --git a/tool/kratos-gen-project/templates/grpc/README.md b/tool/kratos-gen-project/templates/grpc/README.md deleted file mode 100644 index e43f93fc3..000000000 --- a/tool/kratos-gen-project/templates/grpc/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Demo - -## 项目简介 -1. diff --git a/tool/kratos-gen-project/templates/grpc/api/api.proto b/tool/kratos-gen-project/templates/grpc/api/api.proto deleted file mode 100644 index 0ba83f9f5..000000000 --- a/tool/kratos-gen-project/templates/grpc/api/api.proto +++ /dev/null @@ -1,34 +0,0 @@ -// 定义项目 API 的 proto 文件 可以同时描述 gRPC 和 HTTP API -// protobuf 文件参考: -// - https://developers.google.com/protocol-buffers/ -syntax = "proto3"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/empty.proto"; -import "google/api/annotations.proto"; - -// package 命名使用 {appid}.{version} 的方式, version 形如 v1, v2 .. -package demo.service.v1; - -// NOTE: 最后请删除这些无用的注释 (゜-゜)つロ - -option go_package = "api"; -option (gogoproto.goproto_getters_all) = false; - -service Demo { - rpc Ping(.google.protobuf.Empty) returns (.google.protobuf.Empty); - rpc SayHello(HelloReq) returns (.google.protobuf.Empty); - rpc SayHelloURL(HelloReq) returns (HelloResp) { - option (google.api.http) = { - get: "/kratos-demo/say_hello" - }; - }; -} - -message HelloReq { - string name = 1 [(gogoproto.moretags) = 'form:"name" validate:"required"']; -} - -message HelloResp { - string Content = 1 [(gogoproto.jsontag) = 'content']; -} diff --git a/tool/kratos-gen-project/templates/grpc/api/client.go.tmpl b/tool/kratos-gen-project/templates/grpc/api/client.go.tmpl deleted file mode 100644 index 854cc383a..000000000 --- a/tool/kratos-gen-project/templates/grpc/api/client.go.tmpl +++ /dev/null @@ -1,25 +0,0 @@ -package api -import ( - "context" - "fmt" - - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - - "google.golang.org/grpc" -) - -// AppID . -const AppID = "TODO: ADD APP ID" - -// NewClient new grpc client -func NewClient(cfg *warden.ClientConfig, opts ...grpc.DialOption) (DemoClient, error) { - client := warden.NewClient(cfg, opts...) - cc, err := client.Dial(context.Background(), fmt.Sprintf("discovery://default/%s", AppID)) - if err != nil { - return nil, err - } - return NewDemoClient(cc), nil -} - -// 生成 gRPC 代码 -//go:generate kratos tool protoc --grpc api.proto diff --git a/tool/kratos-gen-project/templates/grpc/cmd/main.go.tmpl b/tool/kratos-gen-project/templates/grpc/cmd/main.go.tmpl deleted file mode 100644 index de1aefec2..000000000 --- a/tool/kratos-gen-project/templates/grpc/cmd/main.go.tmpl +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "flag" - "os" - "os/signal" - "syscall" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/di" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -func main() { - flag.Parse() - log.Init(nil) // debug flag: log.dir={path} - defer log.Close() - log.Info("{{.Name}} start") - paladin.Init() - _, closeFunc, err := di.InitApp() - if err != nil { - panic(err) - } - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT) - for { - s := <-c - log.Info("get a signal %s", s.String()) - switch s { - case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: - closeFunc() - log.Info("{{.Name}} exit") - time.Sleep(time.Second) - return - case syscall.SIGHUP: - default: - return - } - } -} diff --git a/tool/kratos-gen-project/templates/grpc/configs/application.toml b/tool/kratos-gen-project/templates/grpc/configs/application.toml deleted file mode 100644 index a42ca6e63..000000000 --- a/tool/kratos-gen-project/templates/grpc/configs/application.toml +++ /dev/null @@ -1,3 +0,0 @@ - -# This is a TOML document. Boom~ -demoExpire = "24h" diff --git a/tool/kratos-gen-project/templates/grpc/configs/db.toml b/tool/kratos-gen-project/templates/grpc/configs/db.toml deleted file mode 100644 index 840bd2e78..000000000 --- a/tool/kratos-gen-project/templates/grpc/configs/db.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - addr = "127.0.0.1:3306" - dsn = "{user}:{password}@tcp(127.0.0.1:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - readDSN = ["{user}:{password}@tcp(127.0.0.2:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8","{user}:{password}@tcp(127.0.0.3:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8,utf8mb4"] - active = 20 - idle = 10 - idleTimeout ="4h" - queryTimeout = "200ms" - execTimeout = "300ms" - tranTimeout = "400ms" diff --git a/tool/kratos-gen-project/templates/grpc/configs/grpc.toml b/tool/kratos-gen-project/templates/grpc/configs/grpc.toml deleted file mode 100644 index 40339cecc..000000000 --- a/tool/kratos-gen-project/templates/grpc/configs/grpc.toml +++ /dev/null @@ -1,3 +0,0 @@ -[Server] - addr = "0.0.0.0:9000" - timeout = "1s" diff --git a/tool/kratos-gen-project/templates/grpc/configs/memcache.toml b/tool/kratos-gen-project/templates/grpc/configs/memcache.toml deleted file mode 100644 index 0f2b900d3..000000000 --- a/tool/kratos-gen-project/templates/grpc/configs/memcache.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:11211" - active = 50 - idle = 10 - dialTimeout = "100ms" - readTimeout = "200ms" - writeTimeout = "300ms" - idleTimeout = "80s" diff --git a/tool/kratos-gen-project/templates/grpc/configs/redis.toml b/tool/kratos-gen-project/templates/grpc/configs/redis.toml deleted file mode 100644 index d07950de0..000000000 --- a/tool/kratos-gen-project/templates/grpc/configs/redis.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:6379" - idle = 10 - active = 10 - dialTimeout = "1s" - readTimeout = "1s" - writeTimeout = "1s" - idleTimeout = "10s" diff --git a/tool/kratos-gen-project/templates/grpc/go.mod.tmpl b/tool/kratos-gen-project/templates/grpc/go.mod.tmpl deleted file mode 100644 index fc8719d34..000000000 --- a/tool/kratos-gen-project/templates/grpc/go.mod.tmpl +++ /dev/null @@ -1,12 +0,0 @@ -module {{.Name}} - -go 1.13 - -require ( - github.com/go-kratos/kratos master - github.com/gogo/protobuf v1.2.1 - github.com/golang/protobuf v1.3.2 - golang.org/x/net v0.0.0-20190628185345-da137c7871d7 - google.golang.org/grpc v1.28.1 -) - diff --git a/tool/kratos-gen-project/templates/grpc/internal/dao/dao.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/dao/dao.go.tmpl deleted file mode 100644 index 81ea0b540..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/dao/dao.go.tmpl +++ /dev/null @@ -1,69 +0,0 @@ -package dao - -import ( - "context" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/cache/memcache" - "github.com/go-kratos/kratos/pkg/cache/redis" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/database/sql" - "github.com/go-kratos/kratos/pkg/sync/pipeline/fanout" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/google/wire" -) - -var Provider = wire.NewSet(New, NewDB, NewRedis, NewMC) - -//go:generate kratos tool genbts -// Dao dao interface -type Dao interface { - Close() - Ping(ctx context.Context) (err error) - // bts: -nullcache=&model.Article{ID:-1} -check_null_code=$!=nil&&$.ID==-1 - RawArticle(c context.Context, id int64) (*model.Article, error) -} - -// dao dao. -type dao struct { - db *sql.DB - redis *redis.Redis - mc *memcache.Memcache - cache *fanout.Fanout - demoExpire int32 -} - -// New new a dao and return. -func New(r *redis.Redis, mc *memcache.Memcache, db *sql.DB) (d Dao, cf func(), err error) { - return newDao(r, mc, db) -} - -func newDao(r *redis.Redis, mc *memcache.Memcache, db *sql.DB) (d *dao, cf func(), err error) { - var cfg struct{ - DemoExpire xtime.Duration - } - if err = paladin.Get("application.toml").UnmarshalTOML(&cfg); err != nil { - return - } - d = &dao{ - db: db, - redis: r, - mc: mc, - cache: fanout.New("cache"), - demoExpire: int32(time.Duration(cfg.DemoExpire) / time.Second), - } - cf = d.Close - return -} - -// Close close the resource. -func (d *dao) Close() { - d.cache.Close() -} - -// Ping ping the resource. -func (d *dao) Ping(ctx context.Context) (err error) { - return nil -} diff --git a/tool/kratos-gen-project/templates/grpc/internal/dao/dao_test.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/dao/dao_test.go.tmpl deleted file mode 100644 index 90849e012..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/dao/dao_test.go.tmpl +++ /dev/null @@ -1,40 +0,0 @@ -package dao - -import ( - "context" - "flag" - "os" - "testing" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/testing/lich" -) - -var d *dao -var ctx = context.Background() - -func TestMain(m *testing.M) { - flag.Set("conf", "../../test") - flag.Set("f", "../../test/docker-compose.yaml") - flag.Parse() - disableLich := os.Getenv("DISABLE_LICH") != "" - if !disableLich { - if err := lich.Setup(); err != nil { - panic(err) - } - } - var err error - if err = paladin.Init(); err != nil { - panic(err) - } - var cf func() - if d, cf, err = newTestDao();err != nil { - panic(err) - } - ret := m.Run() - cf() - if !disableLich { - _ = lich.Teardown() - } - os.Exit(ret) -} diff --git a/tool/kratos-gen-project/templates/grpc/internal/dao/db.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/dao/db.go.tmpl deleted file mode 100644 index b3bde3064..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/dao/db.go.tmpl +++ /dev/null @@ -1,30 +0,0 @@ -package dao - -import ( - "context" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/database/sql" -) - -func NewDB() (db *sql.DB, cf func(), err error) { - var ( - cfg sql.Config - ct paladin.TOML - ) - if err = paladin.Get("db.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - db = sql.NewMySQL(&cfg) - cf = func() {db.Close()} - return -} - -func (d *dao) RawArticle(ctx context.Context, id int64) (art *model.Article, err error) { - // get data from db - return -} diff --git a/tool/kratos-gen-project/templates/grpc/internal/dao/mc.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/dao/mc.go.tmpl deleted file mode 100644 index 44cfdf13b..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/dao/mc.go.tmpl +++ /dev/null @@ -1,48 +0,0 @@ -package dao - -import ( - "context" - "fmt" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/cache/memcache" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -//go:generate kratos tool genmc -type _mc interface { - // mc: -key=keyArt -type=get - CacheArticle(c context.Context, id int64) (*model.Article, error) - // mc: -key=keyArt -expire=d.demoExpire - AddCacheArticle(c context.Context, id int64, art *model.Article) (err error) - // mc: -key=keyArt - DeleteArticleCache(c context.Context, id int64) (err error) -} - -func NewMC() (mc *memcache.Memcache, cf func(), err error) { - var ( - cfg memcache.Config - ct paladin.TOML - ) - if err = paladin.Get("memcache.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - mc = memcache.New(&cfg) - cf = func() {mc.Close()} - return -} - -func (d *dao) PingMC(ctx context.Context) (err error) { - if err = d.mc.Set(ctx, &memcache.Item{Key: "ping", Value: []byte("pong"), Expiration: 0}); err != nil { - log.Error("conn.Set(PING) error(%v)", err) - } - return -} - -func keyArt(id int64) string { - return fmt.Sprintf("art_%d", id) -} diff --git a/tool/kratos-gen-project/templates/grpc/internal/dao/redis.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/dao/redis.go.tmpl deleted file mode 100644 index e3c962eee..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/dao/redis.go.tmpl +++ /dev/null @@ -1,32 +0,0 @@ -package dao - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/cache/redis" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -func NewRedis() (r *redis.Redis, cf func(), err error) { - var ( - cfg redis.Config - ct paladin.Map - ) - if err = paladin.Get("redis.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - r = redis.NewRedis(&cfg) - cf = func(){r.Close()} - return -} - -func (d *dao) PingRedis(ctx context.Context) (err error) { - if _, err = d.redis.Do(ctx, "SET", "ping", "pong"); err != nil { - log.Error("conn.Set(PING) error(%v)", err) - } - return -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/grpc/internal/dao/wire.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/dao/wire.go.tmpl deleted file mode 100644 index a00b2d39a..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/dao/wire.go.tmpl +++ /dev/null @@ -1,13 +0,0 @@ -// +build wireinject -// The build tag makes sure the stub is not built in the final build. - -package dao - -import ( - "github.com/google/wire" -) - -//go:generate kratos tool wire -func newTestDao() (*dao, func(), error) { - panic(wire.Build(newDao, NewDB, NewRedis, NewMC)) -} diff --git a/tool/kratos-gen-project/templates/grpc/internal/di/app.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/di/app.go.tmpl deleted file mode 100644 index f660e4f15..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/di/app.go.tmpl +++ /dev/null @@ -1,32 +0,0 @@ -package di - -import ( - "context" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/service" - - "github.com/go-kratos/kratos/pkg/log" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" -) - -//go:generate kratos tool wire -type App struct { - svc *service.Service - grpc *warden.Server -} - -func NewApp(svc *service.Service, g *warden.Server) (app *App, closeFunc func(), err error){ - app = &App{ - svc: svc, - grpc: g, - } - closeFunc = func() { - ctx, cancel := context.WithTimeout(context.Background(), 35*time.Second) - if err := g.Shutdown(ctx); err != nil { - log.Error("grpcSrv.Shutdown error(%v)", err) - } - cancel() - } - return -} diff --git a/tool/kratos-gen-project/templates/grpc/internal/di/wire.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/di/wire.go.tmpl deleted file mode 100644 index bbed6327c..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/di/wire.go.tmpl +++ /dev/null @@ -1,17 +0,0 @@ -// +build wireinject -// The build tag makes sure the stub is not built in the final build. - -package di - -import ( - "{{.ModPrefix}}{{.Name}}/internal/dao" - "{{.ModPrefix}}{{.Name}}/internal/service" - "{{.ModPrefix}}{{.Name}}/internal/server/grpc" - - "github.com/google/wire" -) - -//go:generate kratos t wire -func InitApp() (*App, func(), error) { - panic(wire.Build(dao.Provider, service.Provider, grpc.New, NewApp)) -} diff --git a/tool/kratos-gen-project/templates/grpc/internal/model/model.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/model/model.go.tmpl deleted file mode 100644 index b3fcf7985..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/model/model.go.tmpl +++ /dev/null @@ -1,12 +0,0 @@ -package model - -// Kratos hello kratos. -type Kratos struct { - Hello string -} - -type Article struct { - ID int64 - Content string - Author string -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/grpc/internal/server/grpc/server.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/server/grpc/server.go.tmpl deleted file mode 100644 index 4db767635..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/server/grpc/server.go.tmpl +++ /dev/null @@ -1,26 +0,0 @@ -package grpc - -import ( - pb "{{.ModPrefix}}{{.Name}}/api" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/net/rpc/warden" -) - -// New new a grpc server. -func New(svc pb.DemoServer) (ws *warden.Server, err error) { - var ( - cfg warden.ServerConfig - ct paladin.TOML - ) - if err = paladin.Get("grpc.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Server").UnmarshalTOML(&cfg); err != nil { - return - } - ws = warden.NewServer(&cfg) - pb.RegisterDemoServer(ws.Server(), svc) - ws, err = ws.Start() - return -} diff --git a/tool/kratos-gen-project/templates/grpc/internal/service/service.go.tmpl b/tool/kratos-gen-project/templates/grpc/internal/service/service.go.tmpl deleted file mode 100644 index f3750cc69..000000000 --- a/tool/kratos-gen-project/templates/grpc/internal/service/service.go.tmpl +++ /dev/null @@ -1,57 +0,0 @@ -package service - -import ( - "context" - "fmt" - - pb "{{.ModPrefix}}{{.Name}}/api" - "{{.ModPrefix}}{{.Name}}/internal/dao" - "github.com/go-kratos/kratos/pkg/conf/paladin" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/wire" -) - -var Provider = wire.NewSet(New, wire.Bind(new(pb.DemoServer), new(*Service))) - -// Service service. -type Service struct { - ac *paladin.Map - dao dao.Dao -} - -// New new a service and return. -func New(d dao.Dao) (s *Service, cf func(), err error) { - s = &Service{ - ac: &paladin.TOML{}, - dao: d, - } - cf = s.Close - err = paladin.Watch("application.toml", s.ac) - return -} - -// SayHello grpc demo func. -func (s *Service) SayHello(ctx context.Context, req *pb.HelloReq) (reply *empty.Empty, err error) { - reply = new(empty.Empty) - fmt.Printf("hello %s", req.Name) - return -} - -// SayHelloURL bm demo func. -func (s *Service) SayHelloURL(ctx context.Context, req *pb.HelloReq) (reply *pb.HelloResp, err error) { - reply = &pb.HelloResp{ - Content: "hello " + req.Name, - } - fmt.Printf("hello url %s", req.Name) - return -} - -// Ping ping the resource. -func (s *Service) Ping(ctx context.Context, e *empty.Empty) (*empty.Empty, error) { - return &empty.Empty{}, s.dao.Ping(ctx) -} - -// Close close the resource. -func (s *Service) Close() { -} diff --git a/tool/kratos-gen-project/templates/grpc/test/0_db.sql b/tool/kratos-gen-project/templates/grpc/test/0_db.sql deleted file mode 100644 index e2cbccc19..000000000 --- a/tool/kratos-gen-project/templates/grpc/test/0_db.sql +++ /dev/null @@ -1,11 +0,0 @@ -create database kratos_demo; -use kratos_demo; - -CREATE TABLE `articles` ( - `id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT '主键ID', - `title` varchar(64) NOT NULL COMMENT '名称', - `mtime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '最后修改时间', - `ctime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `ix_mtime` (`mtime`) -) COMMENT='文章表'; diff --git a/tool/kratos-gen-project/templates/grpc/test/1_data.sql b/tool/kratos-gen-project/templates/grpc/test/1_data.sql deleted file mode 100644 index c7cb57167..000000000 --- a/tool/kratos-gen-project/templates/grpc/test/1_data.sql +++ /dev/null @@ -1,3 +0,0 @@ -use kratos_demo; - -INSERT INTO articles(`id`, `title`) VALUES (1, 'title'); diff --git a/tool/kratos-gen-project/templates/grpc/test/application.toml b/tool/kratos-gen-project/templates/grpc/test/application.toml deleted file mode 100644 index a42ca6e63..000000000 --- a/tool/kratos-gen-project/templates/grpc/test/application.toml +++ /dev/null @@ -1,3 +0,0 @@ - -# This is a TOML document. Boom~ -demoExpire = "24h" diff --git a/tool/kratos-gen-project/templates/grpc/test/db.toml b/tool/kratos-gen-project/templates/grpc/test/db.toml deleted file mode 100644 index 9c7bd8618..000000000 --- a/tool/kratos-gen-project/templates/grpc/test/db.toml +++ /dev/null @@ -1,8 +0,0 @@ -[Client] - dsn = "root:root@tcp(127.0.0.1:13306)/kratos_demo?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - active = 20 - idle = 10 - idleTimeout ="4h" - queryTimeout = "200ms" - execTimeout = "300ms" - tranTimeout = "400ms" diff --git a/tool/kratos-gen-project/templates/grpc/test/docker-compose.yaml b/tool/kratos-gen-project/templates/grpc/test/docker-compose.yaml deleted file mode 100644 index b044d4247..000000000 --- a/tool/kratos-gen-project/templates/grpc/test/docker-compose.yaml +++ /dev/null @@ -1,40 +0,0 @@ - version: "3.7" - services: - db: - image: mysql:5.6 - ports: - - 13306:3306 - environment: - - MYSQL_ROOT_PASSWORD=root - - TZ=Asia/Shanghai - volumes: - - .:/docker-entrypoint-initdb.d - command: [ - '--character-set-server=utf8', - '--collation-server=utf8_unicode_ci' - ] - healthcheck: - test: ["CMD", "mysqladmin" ,"ping", "--protocol=tcp"] - timeout: 20s - interval: 1s - retries: 20 - - redis: - image: redis - ports: - - 16379:6379 - healthcheck: - test: ["CMD", "redis-cli","ping"] - interval: 20s - timeout: 1s - retries: 20 - - memcached: - image: memcached - ports: - - 21211:11211 - healthcheck: - test: ["CMD", "echo", "stats", "|", "nc", "127.0.0.1", "11211"] - interval: 20s - timeout: 1s - retries: 20 diff --git a/tool/kratos-gen-project/templates/grpc/test/grpc.toml b/tool/kratos-gen-project/templates/grpc/test/grpc.toml deleted file mode 100644 index 40339cecc..000000000 --- a/tool/kratos-gen-project/templates/grpc/test/grpc.toml +++ /dev/null @@ -1,3 +0,0 @@ -[Server] - addr = "0.0.0.0:9000" - timeout = "1s" diff --git a/tool/kratos-gen-project/templates/grpc/test/memcache.toml b/tool/kratos-gen-project/templates/grpc/test/memcache.toml deleted file mode 100644 index ad2ce9e09..000000000 --- a/tool/kratos-gen-project/templates/grpc/test/memcache.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:21211" - active = 50 - idle = 10 - dialTimeout = "100ms" - readTimeout = "200ms" - writeTimeout = "300ms" - idleTimeout = "80s" diff --git a/tool/kratos-gen-project/templates/grpc/test/redis.toml b/tool/kratos-gen-project/templates/grpc/test/redis.toml deleted file mode 100644 index 371f243da..000000000 --- a/tool/kratos-gen-project/templates/grpc/test/redis.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:16379" - idle = 10 - active = 10 - dialTimeout = "1s" - readTimeout = "1s" - writeTimeout = "1s" - idleTimeout = "10s" diff --git a/tool/kratos-gen-project/templates/http/CHANGELOG.md b/tool/kratos-gen-project/templates/http/CHANGELOG.md deleted file mode 100644 index c39acc0e2..000000000 --- a/tool/kratos-gen-project/templates/http/CHANGELOG.md +++ /dev/null @@ -1,4 +0,0 @@ -## Demo - -### v1.0.0 -1. 上线功能xxx diff --git a/tool/kratos-gen-project/templates/http/OWNERS b/tool/kratos-gen-project/templates/http/OWNERS deleted file mode 100644 index c1e28c554..000000000 --- a/tool/kratos-gen-project/templates/http/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -# Author -# Reviewer diff --git a/tool/kratos-gen-project/templates/http/README.md b/tool/kratos-gen-project/templates/http/README.md deleted file mode 100644 index e43f93fc3..000000000 --- a/tool/kratos-gen-project/templates/http/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Demo - -## 项目简介 -1. diff --git a/tool/kratos-gen-project/templates/http/api/api.proto b/tool/kratos-gen-project/templates/http/api/api.proto deleted file mode 100644 index ed1664f5e..000000000 --- a/tool/kratos-gen-project/templates/http/api/api.proto +++ /dev/null @@ -1,34 +0,0 @@ -// 定义项目 API 的 proto 文件 可以同时描述 gRPC 和 HTTP API -// protobuf 文件参考: -// - https://developers.google.com/protocol-buffers/ -syntax = "proto3"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/empty.proto"; -import "google/api/annotations.proto"; - -// package 命名使用 {appid}.{version} 的方式, version 形如 v1, v2 .. -package demo.service.v1; - -// NOTE: 最后请删除这些无用的注释 (゜-゜)つロ - -option go_package = "api"; -option (gogoproto.goproto_getters_all) = false; - -service Demo { - rpc Ping(.google.protobuf.Empty) returns (.google.protobuf.Empty); - rpc SayHello(HelloReq) returns (.google.protobuf.Empty); - rpc SayHelloURL(HelloReq) returns (HelloResp) { - option (google.api.http) = { - get: "/demo/say_hello" - }; - }; -} - -message HelloReq { - string name = 1 [(gogoproto.moretags) = 'form:"name" validate:"required"']; -} - -message HelloResp { - string Content = 1 [(gogoproto.jsontag) = 'content']; -} diff --git a/tool/kratos-gen-project/templates/http/api/client.go.tmpl b/tool/kratos-gen-project/templates/http/api/client.go.tmpl deleted file mode 100644 index f3a3b2083..000000000 --- a/tool/kratos-gen-project/templates/http/api/client.go.tmpl +++ /dev/null @@ -1,25 +0,0 @@ -package api -import ( - "context" - "fmt" - - "github.com/go-kratos/kratos/pkg/net/rpc/warden" - - "google.golang.org/grpc" -) - -// AppID . -const AppID = "TODO: ADD APP ID" - -// NewClient new grpc client -func NewClient(cfg *warden.ClientConfig, opts ...grpc.DialOption) (DemoClient, error) { - client := warden.NewClient(cfg, opts...) - cc, err := client.Dial(context.Background(), fmt.Sprintf("discovery://default/%s", AppID)) - if err != nil { - return nil, err - } - return NewDemoClient(cc), nil -} - -// 生成 gRPC 代码 -//go:generate kratos tool protoc --grpc --bm api.proto diff --git a/tool/kratos-gen-project/templates/http/cmd/main.go.tmpl b/tool/kratos-gen-project/templates/http/cmd/main.go.tmpl deleted file mode 100644 index de1aefec2..000000000 --- a/tool/kratos-gen-project/templates/http/cmd/main.go.tmpl +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "flag" - "os" - "os/signal" - "syscall" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/di" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -func main() { - flag.Parse() - log.Init(nil) // debug flag: log.dir={path} - defer log.Close() - log.Info("{{.Name}} start") - paladin.Init() - _, closeFunc, err := di.InitApp() - if err != nil { - panic(err) - } - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT) - for { - s := <-c - log.Info("get a signal %s", s.String()) - switch s { - case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: - closeFunc() - log.Info("{{.Name}} exit") - time.Sleep(time.Second) - return - case syscall.SIGHUP: - default: - return - } - } -} diff --git a/tool/kratos-gen-project/templates/http/configs/application.toml b/tool/kratos-gen-project/templates/http/configs/application.toml deleted file mode 100644 index a42ca6e63..000000000 --- a/tool/kratos-gen-project/templates/http/configs/application.toml +++ /dev/null @@ -1,3 +0,0 @@ - -# This is a TOML document. Boom~ -demoExpire = "24h" diff --git a/tool/kratos-gen-project/templates/http/configs/db.toml b/tool/kratos-gen-project/templates/http/configs/db.toml deleted file mode 100644 index 840bd2e78..000000000 --- a/tool/kratos-gen-project/templates/http/configs/db.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - addr = "127.0.0.1:3306" - dsn = "{user}:{password}@tcp(127.0.0.1:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - readDSN = ["{user}:{password}@tcp(127.0.0.2:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8","{user}:{password}@tcp(127.0.0.3:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8,utf8mb4"] - active = 20 - idle = 10 - idleTimeout ="4h" - queryTimeout = "200ms" - execTimeout = "300ms" - tranTimeout = "400ms" diff --git a/tool/kratos-gen-project/templates/http/configs/http.toml b/tool/kratos-gen-project/templates/http/configs/http.toml deleted file mode 100644 index 951a03197..000000000 --- a/tool/kratos-gen-project/templates/http/configs/http.toml +++ /dev/null @@ -1,3 +0,0 @@ -[Server] - addr = "0.0.0.0:8000" - timeout = "1s" diff --git a/tool/kratos-gen-project/templates/http/configs/memcache.toml b/tool/kratos-gen-project/templates/http/configs/memcache.toml deleted file mode 100644 index 0f2b900d3..000000000 --- a/tool/kratos-gen-project/templates/http/configs/memcache.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:11211" - active = 50 - idle = 10 - dialTimeout = "100ms" - readTimeout = "200ms" - writeTimeout = "300ms" - idleTimeout = "80s" diff --git a/tool/kratos-gen-project/templates/http/configs/redis.toml b/tool/kratos-gen-project/templates/http/configs/redis.toml deleted file mode 100644 index d07950de0..000000000 --- a/tool/kratos-gen-project/templates/http/configs/redis.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:6379" - idle = 10 - active = 10 - dialTimeout = "1s" - readTimeout = "1s" - writeTimeout = "1s" - idleTimeout = "10s" diff --git a/tool/kratos-gen-project/templates/http/go.mod.tmpl b/tool/kratos-gen-project/templates/http/go.mod.tmpl deleted file mode 100644 index fc8719d34..000000000 --- a/tool/kratos-gen-project/templates/http/go.mod.tmpl +++ /dev/null @@ -1,12 +0,0 @@ -module {{.Name}} - -go 1.13 - -require ( - github.com/go-kratos/kratos master - github.com/gogo/protobuf v1.2.1 - github.com/golang/protobuf v1.3.2 - golang.org/x/net v0.0.0-20190628185345-da137c7871d7 - google.golang.org/grpc v1.28.1 -) - diff --git a/tool/kratos-gen-project/templates/http/internal/dao/dao.go.tmpl b/tool/kratos-gen-project/templates/http/internal/dao/dao.go.tmpl deleted file mode 100644 index 81ea0b540..000000000 --- a/tool/kratos-gen-project/templates/http/internal/dao/dao.go.tmpl +++ /dev/null @@ -1,69 +0,0 @@ -package dao - -import ( - "context" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/cache/memcache" - "github.com/go-kratos/kratos/pkg/cache/redis" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/database/sql" - "github.com/go-kratos/kratos/pkg/sync/pipeline/fanout" - xtime "github.com/go-kratos/kratos/pkg/time" - - "github.com/google/wire" -) - -var Provider = wire.NewSet(New, NewDB, NewRedis, NewMC) - -//go:generate kratos tool genbts -// Dao dao interface -type Dao interface { - Close() - Ping(ctx context.Context) (err error) - // bts: -nullcache=&model.Article{ID:-1} -check_null_code=$!=nil&&$.ID==-1 - RawArticle(c context.Context, id int64) (*model.Article, error) -} - -// dao dao. -type dao struct { - db *sql.DB - redis *redis.Redis - mc *memcache.Memcache - cache *fanout.Fanout - demoExpire int32 -} - -// New new a dao and return. -func New(r *redis.Redis, mc *memcache.Memcache, db *sql.DB) (d Dao, cf func(), err error) { - return newDao(r, mc, db) -} - -func newDao(r *redis.Redis, mc *memcache.Memcache, db *sql.DB) (d *dao, cf func(), err error) { - var cfg struct{ - DemoExpire xtime.Duration - } - if err = paladin.Get("application.toml").UnmarshalTOML(&cfg); err != nil { - return - } - d = &dao{ - db: db, - redis: r, - mc: mc, - cache: fanout.New("cache"), - demoExpire: int32(time.Duration(cfg.DemoExpire) / time.Second), - } - cf = d.Close - return -} - -// Close close the resource. -func (d *dao) Close() { - d.cache.Close() -} - -// Ping ping the resource. -func (d *dao) Ping(ctx context.Context) (err error) { - return nil -} diff --git a/tool/kratos-gen-project/templates/http/internal/dao/dao_test.go.tmpl b/tool/kratos-gen-project/templates/http/internal/dao/dao_test.go.tmpl deleted file mode 100644 index 90849e012..000000000 --- a/tool/kratos-gen-project/templates/http/internal/dao/dao_test.go.tmpl +++ /dev/null @@ -1,40 +0,0 @@ -package dao - -import ( - "context" - "flag" - "os" - "testing" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/testing/lich" -) - -var d *dao -var ctx = context.Background() - -func TestMain(m *testing.M) { - flag.Set("conf", "../../test") - flag.Set("f", "../../test/docker-compose.yaml") - flag.Parse() - disableLich := os.Getenv("DISABLE_LICH") != "" - if !disableLich { - if err := lich.Setup(); err != nil { - panic(err) - } - } - var err error - if err = paladin.Init(); err != nil { - panic(err) - } - var cf func() - if d, cf, err = newTestDao();err != nil { - panic(err) - } - ret := m.Run() - cf() - if !disableLich { - _ = lich.Teardown() - } - os.Exit(ret) -} diff --git a/tool/kratos-gen-project/templates/http/internal/dao/db.go.tmpl b/tool/kratos-gen-project/templates/http/internal/dao/db.go.tmpl deleted file mode 100644 index b3bde3064..000000000 --- a/tool/kratos-gen-project/templates/http/internal/dao/db.go.tmpl +++ /dev/null @@ -1,30 +0,0 @@ -package dao - -import ( - "context" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/database/sql" -) - -func NewDB() (db *sql.DB, cf func(), err error) { - var ( - cfg sql.Config - ct paladin.TOML - ) - if err = paladin.Get("db.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - db = sql.NewMySQL(&cfg) - cf = func() {db.Close()} - return -} - -func (d *dao) RawArticle(ctx context.Context, id int64) (art *model.Article, err error) { - // get data from db - return -} diff --git a/tool/kratos-gen-project/templates/http/internal/dao/mc.go.tmpl b/tool/kratos-gen-project/templates/http/internal/dao/mc.go.tmpl deleted file mode 100644 index 44cfdf13b..000000000 --- a/tool/kratos-gen-project/templates/http/internal/dao/mc.go.tmpl +++ /dev/null @@ -1,48 +0,0 @@ -package dao - -import ( - "context" - "fmt" - - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/cache/memcache" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -//go:generate kratos tool genmc -type _mc interface { - // mc: -key=keyArt -type=get - CacheArticle(c context.Context, id int64) (*model.Article, error) - // mc: -key=keyArt -expire=d.demoExpire - AddCacheArticle(c context.Context, id int64, art *model.Article) (err error) - // mc: -key=keyArt - DeleteArticleCache(c context.Context, id int64) (err error) -} - -func NewMC() (mc *memcache.Memcache, cf func(), err error) { - var ( - cfg memcache.Config - ct paladin.TOML - ) - if err = paladin.Get("memcache.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - mc = memcache.New(&cfg) - cf = func() {mc.Close()} - return -} - -func (d *dao) PingMC(ctx context.Context) (err error) { - if err = d.mc.Set(ctx, &memcache.Item{Key: "ping", Value: []byte("pong"), Expiration: 0}); err != nil { - log.Error("conn.Set(PING) error(%v)", err) - } - return -} - -func keyArt(id int64) string { - return fmt.Sprintf("art_%d", id) -} diff --git a/tool/kratos-gen-project/templates/http/internal/dao/redis.go.tmpl b/tool/kratos-gen-project/templates/http/internal/dao/redis.go.tmpl deleted file mode 100644 index e3c962eee..000000000 --- a/tool/kratos-gen-project/templates/http/internal/dao/redis.go.tmpl +++ /dev/null @@ -1,32 +0,0 @@ -package dao - -import ( - "context" - - "github.com/go-kratos/kratos/pkg/cache/redis" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" -) - -func NewRedis() (r *redis.Redis, cf func(), err error) { - var ( - cfg redis.Config - ct paladin.Map - ) - if err = paladin.Get("redis.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Client").UnmarshalTOML(&cfg); err != nil { - return - } - r = redis.NewRedis(&cfg) - cf = func(){r.Close()} - return -} - -func (d *dao) PingRedis(ctx context.Context) (err error) { - if _, err = d.redis.Do(ctx, "SET", "ping", "pong"); err != nil { - log.Error("conn.Set(PING) error(%v)", err) - } - return -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/http/internal/dao/wire.go.tmpl b/tool/kratos-gen-project/templates/http/internal/dao/wire.go.tmpl deleted file mode 100644 index a00b2d39a..000000000 --- a/tool/kratos-gen-project/templates/http/internal/dao/wire.go.tmpl +++ /dev/null @@ -1,13 +0,0 @@ -// +build wireinject -// The build tag makes sure the stub is not built in the final build. - -package dao - -import ( - "github.com/google/wire" -) - -//go:generate kratos tool wire -func newTestDao() (*dao, func(), error) { - panic(wire.Build(newDao, NewDB, NewRedis, NewMC)) -} diff --git a/tool/kratos-gen-project/templates/http/internal/di/app.go.tmpl b/tool/kratos-gen-project/templates/http/internal/di/app.go.tmpl deleted file mode 100644 index b78ce0d37..000000000 --- a/tool/kratos-gen-project/templates/http/internal/di/app.go.tmpl +++ /dev/null @@ -1,32 +0,0 @@ -package di - -import ( - "context" - "time" - - "{{.ModPrefix}}{{.Name}}/internal/service" - - "github.com/go-kratos/kratos/pkg/log" - bm "github.com/go-kratos/kratos/pkg/net/http/blademaster" -) - -//go:generate kratos tool wire -type App struct { - svc *service.Service - http *bm.Engine -} - -func NewApp(svc *service.Service, h *bm.Engine) (app *App, closeFunc func(), err error){ - app = &App{ - svc: svc, - http: h, - } - closeFunc = func() { - ctx, cancel := context.WithTimeout(context.Background(), 35*time.Second) - if err := h.Shutdown(ctx); err != nil { - log.Error("httpSrv.Shutdown error(%v)", err) - } - cancel() - } - return -} diff --git a/tool/kratos-gen-project/templates/http/internal/di/wire.go.tmpl b/tool/kratos-gen-project/templates/http/internal/di/wire.go.tmpl deleted file mode 100644 index 7533269a0..000000000 --- a/tool/kratos-gen-project/templates/http/internal/di/wire.go.tmpl +++ /dev/null @@ -1,17 +0,0 @@ -// +build wireinject -// The build tag makes sure the stub is not built in the final build. - -package di - -import ( - "{{.ModPrefix}}{{.Name}}/internal/dao" - "{{.ModPrefix}}{{.Name}}/internal/service" - "{{.ModPrefix}}{{.Name}}/internal/server/http" - - "github.com/google/wire" -) - -//go:generate kratos t wire -func InitApp() (*App, func(), error) { - panic(wire.Build(dao.Provider, service.Provider, http.New, NewApp)) -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/http/internal/model/model.go.tmpl b/tool/kratos-gen-project/templates/http/internal/model/model.go.tmpl deleted file mode 100644 index b3fcf7985..000000000 --- a/tool/kratos-gen-project/templates/http/internal/model/model.go.tmpl +++ /dev/null @@ -1,12 +0,0 @@ -package model - -// Kratos hello kratos. -type Kratos struct { - Hello string -} - -type Article struct { - ID int64 - Content string - Author string -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/http/internal/server/http/server.go.tmpl b/tool/kratos-gen-project/templates/http/internal/server/http/server.go.tmpl deleted file mode 100644 index 3bec93a28..000000000 --- a/tool/kratos-gen-project/templates/http/internal/server/http/server.go.tmpl +++ /dev/null @@ -1,56 +0,0 @@ -package http - -import ( - "net/http" - - pb "{{.ModPrefix}}{{.Name}}/api" - "{{.ModPrefix}}{{.Name}}/internal/model" - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/log" - bm "github.com/go-kratos/kratos/pkg/net/http/blademaster" -) - -var svc pb.DemoServer - -// New new a bm server. -func New(s pb.DemoServer) (engine *bm.Engine, err error) { - var ( - cfg bm.ServerConfig - ct paladin.TOML - ) - if err = paladin.Get("http.toml").Unmarshal(&ct); err != nil { - return - } - if err = ct.Get("Server").UnmarshalTOML(&cfg); err != nil { - return - } - svc = s - engine = bm.DefaultServer(&cfg) - pb.RegisterDemoBMServer(engine, s) - initRouter(engine) - err = engine.Start() - return -} - -func initRouter(e *bm.Engine) { - e.Ping(ping) - g := e.Group("/{{.Name}}") - { - g.GET("/start", howToStart) - } -} - -func ping(ctx *bm.Context) { - if _, err := svc.Ping(ctx, nil); err != nil { - log.Error("ping error(%v)", err) - ctx.AbortWithStatus(http.StatusServiceUnavailable) - } -} - -// example for http request handler. -func howToStart(c *bm.Context) { - k := &model.Kratos{ - Hello: "Golang 大法好 !!!", - } - c.JSON(k, nil) -} \ No newline at end of file diff --git a/tool/kratos-gen-project/templates/http/internal/service/service.go.tmpl b/tool/kratos-gen-project/templates/http/internal/service/service.go.tmpl deleted file mode 100644 index f3750cc69..000000000 --- a/tool/kratos-gen-project/templates/http/internal/service/service.go.tmpl +++ /dev/null @@ -1,57 +0,0 @@ -package service - -import ( - "context" - "fmt" - - pb "{{.ModPrefix}}{{.Name}}/api" - "{{.ModPrefix}}{{.Name}}/internal/dao" - "github.com/go-kratos/kratos/pkg/conf/paladin" - - "github.com/golang/protobuf/ptypes/empty" - "github.com/google/wire" -) - -var Provider = wire.NewSet(New, wire.Bind(new(pb.DemoServer), new(*Service))) - -// Service service. -type Service struct { - ac *paladin.Map - dao dao.Dao -} - -// New new a service and return. -func New(d dao.Dao) (s *Service, cf func(), err error) { - s = &Service{ - ac: &paladin.TOML{}, - dao: d, - } - cf = s.Close - err = paladin.Watch("application.toml", s.ac) - return -} - -// SayHello grpc demo func. -func (s *Service) SayHello(ctx context.Context, req *pb.HelloReq) (reply *empty.Empty, err error) { - reply = new(empty.Empty) - fmt.Printf("hello %s", req.Name) - return -} - -// SayHelloURL bm demo func. -func (s *Service) SayHelloURL(ctx context.Context, req *pb.HelloReq) (reply *pb.HelloResp, err error) { - reply = &pb.HelloResp{ - Content: "hello " + req.Name, - } - fmt.Printf("hello url %s", req.Name) - return -} - -// Ping ping the resource. -func (s *Service) Ping(ctx context.Context, e *empty.Empty) (*empty.Empty, error) { - return &empty.Empty{}, s.dao.Ping(ctx) -} - -// Close close the resource. -func (s *Service) Close() { -} diff --git a/tool/kratos-gen-project/templates/http/test/0_db.sql b/tool/kratos-gen-project/templates/http/test/0_db.sql deleted file mode 100644 index e2cbccc19..000000000 --- a/tool/kratos-gen-project/templates/http/test/0_db.sql +++ /dev/null @@ -1,11 +0,0 @@ -create database kratos_demo; -use kratos_demo; - -CREATE TABLE `articles` ( - `id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT COMMENT '主键ID', - `title` varchar(64) NOT NULL COMMENT '名称', - `mtime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '最后修改时间', - `ctime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `ix_mtime` (`mtime`) -) COMMENT='文章表'; diff --git a/tool/kratos-gen-project/templates/http/test/1_data.sql b/tool/kratos-gen-project/templates/http/test/1_data.sql deleted file mode 100644 index c7cb57167..000000000 --- a/tool/kratos-gen-project/templates/http/test/1_data.sql +++ /dev/null @@ -1,3 +0,0 @@ -use kratos_demo; - -INSERT INTO articles(`id`, `title`) VALUES (1, 'title'); diff --git a/tool/kratos-gen-project/templates/http/test/application.toml b/tool/kratos-gen-project/templates/http/test/application.toml deleted file mode 100644 index a42ca6e63..000000000 --- a/tool/kratos-gen-project/templates/http/test/application.toml +++ /dev/null @@ -1,3 +0,0 @@ - -# This is a TOML document. Boom~ -demoExpire = "24h" diff --git a/tool/kratos-gen-project/templates/http/test/db.toml b/tool/kratos-gen-project/templates/http/test/db.toml deleted file mode 100644 index 9c7bd8618..000000000 --- a/tool/kratos-gen-project/templates/http/test/db.toml +++ /dev/null @@ -1,8 +0,0 @@ -[Client] - dsn = "root:root@tcp(127.0.0.1:13306)/kratos_demo?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - active = 20 - idle = 10 - idleTimeout ="4h" - queryTimeout = "200ms" - execTimeout = "300ms" - tranTimeout = "400ms" diff --git a/tool/kratos-gen-project/templates/http/test/docker-compose.yaml b/tool/kratos-gen-project/templates/http/test/docker-compose.yaml deleted file mode 100644 index b044d4247..000000000 --- a/tool/kratos-gen-project/templates/http/test/docker-compose.yaml +++ /dev/null @@ -1,40 +0,0 @@ - version: "3.7" - services: - db: - image: mysql:5.6 - ports: - - 13306:3306 - environment: - - MYSQL_ROOT_PASSWORD=root - - TZ=Asia/Shanghai - volumes: - - .:/docker-entrypoint-initdb.d - command: [ - '--character-set-server=utf8', - '--collation-server=utf8_unicode_ci' - ] - healthcheck: - test: ["CMD", "mysqladmin" ,"ping", "--protocol=tcp"] - timeout: 20s - interval: 1s - retries: 20 - - redis: - image: redis - ports: - - 16379:6379 - healthcheck: - test: ["CMD", "redis-cli","ping"] - interval: 20s - timeout: 1s - retries: 20 - - memcached: - image: memcached - ports: - - 21211:11211 - healthcheck: - test: ["CMD", "echo", "stats", "|", "nc", "127.0.0.1", "11211"] - interval: 20s - timeout: 1s - retries: 20 diff --git a/tool/kratos-gen-project/templates/http/test/http.toml b/tool/kratos-gen-project/templates/http/test/http.toml deleted file mode 100644 index 951a03197..000000000 --- a/tool/kratos-gen-project/templates/http/test/http.toml +++ /dev/null @@ -1,3 +0,0 @@ -[Server] - addr = "0.0.0.0:8000" - timeout = "1s" diff --git a/tool/kratos-gen-project/templates/http/test/memcache.toml b/tool/kratos-gen-project/templates/http/test/memcache.toml deleted file mode 100644 index ad2ce9e09..000000000 --- a/tool/kratos-gen-project/templates/http/test/memcache.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:21211" - active = 50 - idle = 10 - dialTimeout = "100ms" - readTimeout = "200ms" - writeTimeout = "300ms" - idleTimeout = "80s" diff --git a/tool/kratos-gen-project/templates/http/test/redis.toml b/tool/kratos-gen-project/templates/http/test/redis.toml deleted file mode 100644 index 371f243da..000000000 --- a/tool/kratos-gen-project/templates/http/test/redis.toml +++ /dev/null @@ -1,10 +0,0 @@ -[Client] - name = "demo" - proto = "tcp" - addr = "127.0.0.1:16379" - idle = 10 - active = 10 - dialTimeout = "1s" - readTimeout = "1s" - writeTimeout = "1s" - idleTimeout = "10s" diff --git a/tool/kratos-gen-project/testdata/test_in_gomod.sh b/tool/kratos-gen-project/testdata/test_in_gomod.sh deleted file mode 100755 index b2fb82a96..000000000 --- a/tool/kratos-gen-project/testdata/test_in_gomod.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -set -e - -dir=`pwd` - -cd $dir -rm -rf ./a -kratos new a -cd ./a/cmd && go build -if [ $? -ne 0 ]; then - echo "Failed: all" - exit 1 -else - rm -rf ../../a -fi - -cd $dir -rm -rf ./b -kratos new b --grpc -cd ./b/cmd && go build -if [ $? -ne 0 ];then - echo "Failed: --grpc" - exit 1 -else - rm -rf ../../b -fi - -cd $dir -rm -rf ./c -kratos new c --http -cd ./c/cmd && go build -if [ $? -ne 0 ]; then - echo "Failed: --http" - exit 1 -else - rm -rf ../../c -fi diff --git a/tool/kratos-gen-project/testdata/test_not_in_gomod.sh b/tool/kratos-gen-project/testdata/test_not_in_gomod.sh deleted file mode 100755 index 2d05a131a..000000000 --- a/tool/kratos-gen-project/testdata/test_not_in_gomod.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash -set -e - -dir=/tmp/test-kratos -rm -rf $dir -mkdir $dir - -cd $dir -rm -rf ./a -kratos new a -cd ./a/cmd && go build -if [ $? -ne 0 ]; then - echo "Failed: all" - exit 1 -else - rm -rf ../../a -fi - -cd $dir -rm -rf ./b -kratos new b --grpc -cd ./b/cmd && go build -if [ $? -ne 0 ];then - echo "Failed: --grpc" - exit 1 -else - rm -rf ../../b -fi - -cd $dir -rm -rf ./c -kratos new c --http -cd ./c/cmd && go build -if [ $? -ne 0 ]; then - echo "Failed: --http" - exit 1 -else - rm -rf ../../c -fi - -rm -rf $dir diff --git a/tool/kratos-protoc/bm.go b/tool/kratos-protoc/bm.go deleted file mode 100644 index 284e713da..000000000 --- a/tool/kratos-protoc/bm.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "os/exec" -) - -const ( - _getBMGen = "go get -u github.com/go-kratos/kratos/tool/protobuf/protoc-gen-bm" - _bmProtoc = "protoc --proto_path=%s --proto_path=%s --proto_path=%s --bm_out=:." -) - -func installBMGen() error { - if _, err := exec.LookPath("protoc-gen-bm"); err != nil { - if err := goget(_getBMGen); err != nil { - return err - } - } - return nil -} - -func genBM(files []string) error { - return generate(_bmProtoc, files) -} diff --git a/tool/kratos-protoc/ecode.go b/tool/kratos-protoc/ecode.go deleted file mode 100644 index eed3cf367..000000000 --- a/tool/kratos-protoc/ecode.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "os/exec" -) - -const ( - _getEcodeGen = "go get -u github.com/go-kratos/kratos/tool/protobuf/protoc-gen-ecode" - _ecodeProtoc = "protoc --proto_path=%s --proto_path=%s --proto_path=%s --ecode_out=" + - "Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:." -) - -func installEcodeGen() error { - if _, err := exec.LookPath("protoc-gen-ecode"); err != nil { - if err := goget(_getEcodeGen); err != nil { - return err - } - } - return nil -} - -func genEcode(files []string) error { - return generate(_ecodeProtoc, files) -} diff --git a/tool/kratos-protoc/grpc.go b/tool/kratos-protoc/grpc.go deleted file mode 100644 index f81cdcd12..000000000 --- a/tool/kratos-protoc/grpc.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "os/exec" -) - -const ( - _getGRPCGen = "go get -u github.com/gogo/protobuf/protoc-gen-gofast" - _grpcProtoc = "protoc --proto_path=%s --proto_path=%s --proto_path=%s --gofast_out=plugins=grpc," + - "Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:." -) - -func installGRPCGen() error { - if _, err := exec.LookPath("protoc-gen-gofast"); err != nil { - if err := goget(_getGRPCGen); err != nil { - return err - } - } - return nil -} - -func genGRPC(files []string) error { - return generate(_grpcProtoc, files) -} diff --git a/tool/kratos-protoc/main.go b/tool/kratos-protoc/main.go deleted file mode 100644 index 981d9857c..000000000 --- a/tool/kratos-protoc/main.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "log" - "os" - - "github.com/urfave/cli/v2" -) - -func main() { - app := cli.NewApp() - app.Name = "protc" - app.Usage = "protobuf生成工具" - app.Flags = []cli.Flag{ - &cli.BoolFlag{ - Name: "bm", - Usage: "whether to use BM for generation", - Destination: &withBM, - }, - &cli.BoolFlag{ - Name: "grpc", - Usage: "whether to use gRPC for generation", - Destination: &withGRPC, - }, - &cli.BoolFlag{ - Name: "swagger", - Usage: "whether to use swagger for generation", - Destination: &withSwagger, - }, - &cli.BoolFlag{ - Name: "ecode", - Usage: "whether to use ecode for generation", - Destination: &withEcode, - }, - } - app.Action = func(c *cli.Context) error { - return protocAction(c) - } - if err := app.Run(os.Args); err != nil { - log.Fatal(err) - } -} diff --git a/tool/kratos-protoc/protoc.go b/tool/kratos-protoc/protoc.go deleted file mode 100644 index 91e3a460d..000000000 --- a/tool/kratos-protoc/protoc.go +++ /dev/null @@ -1,181 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "go/build" - "io/ioutil" - "log" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" - "strings" - - "github.com/urfave/cli/v2" -) - -var ( - withBM bool - withGRPC bool - withSwagger bool - withEcode bool -) - -func protocAction(ctx *cli.Context) (err error) { - if err = checkProtoc(); err != nil { - return err - } - files := ctx.Args().Slice() - if len(files) == 0 { - files, _ = filepath.Glob("*.proto") - } - if !withGRPC && !withBM && !withSwagger && !withEcode { - withBM = true - withGRPC = true - withSwagger = true - withEcode = true - } - if withBM { - if err = installBMGen(); err != nil { - return - } - if err = genBM(files); err != nil { - return - } - } - if withGRPC { - if err = installGRPCGen(); err != nil { - return err - } - if err = genGRPC(files); err != nil { - return - } - } - if withSwagger { - if err = installSwaggerGen(); err != nil { - return - } - if err = genSwagger(files); err != nil { - return - } - } - if withEcode { - if err = installEcodeGen(); err != nil { - return - } - if err = genEcode(files); err != nil { - return - } - } - log.Printf("generate %s success.\n", strings.Join(files, " ")) - return nil -} - -func checkProtoc() error { - if _, err := exec.LookPath("protoc"); err != nil { - switch runtime.GOOS { - case "darwin": - fmt.Println("brew install protobuf") - cmd := exec.Command("brew", "install", "protobuf") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err = cmd.Run(); err != nil { - return err - } - case "linux": - fmt.Println("snap install --classic protobuf") - cmd := exec.Command("snap", "install", "--classic", "protobuf") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err = cmd.Run(); err != nil { - return err - } - default: - return errors.New("您还没安装protobuf,请进行手动安装:https://github.com/protocolbuffers/protobuf/releases") - } - } - return nil -} - -func generate(protoc string, files []string) error { - pwd, _ := os.Getwd() - gosrc := path.Join(gopath(), "src") - ext, err := latestKratos() - if err != nil { - return err - } - line := fmt.Sprintf(protoc, gosrc, ext, pwd) - log.Println(line, strings.Join(files, " ")) - args := strings.Split(line, " ") - args = append(args, files...) - cmd := exec.Command(args[0], args[1:]...) - cmd.Dir = pwd - cmd.Env = os.Environ() - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return cmd.Run() -} - -func goget(url string) error { - args := strings.Split(url, " ") - cmd := exec.Command(args[0], args[1:]...) - cmd.Env = os.Environ() - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - log.Println(url) - return cmd.Run() -} - -func latestKratos() (string, error) { - gopath := gopath() - ext := path.Join(gopath, "src/github.com/go-kratos/kratos/third_party") - if _, err := os.Stat(ext); !os.IsNotExist(err) { - return ext, nil - } - ext = path.Join(gopath, "src/kratos/third_party") - if _, err := os.Stat(ext); !os.IsNotExist(err) { - return ext, nil - } - baseMod := path.Join(gopath, "pkg/mod/github.com/go-kratos") - files, err := ioutil.ReadDir(baseMod) - if err != nil { - return "", err - } - for i := len(files) - 1; i >= 0; i-- { - if strings.HasPrefix(files[i].Name(), "kratos@") { - return path.Join(baseMod, files[i].Name(), "third_party"), nil - } - } - return "", errors.New("not found kratos package") -} - -func gopath() (gp string) { - gopaths := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator)) - - if len(gopaths) == 1 && gopaths[0] != "" { - return gopaths[0] - } - pwd, err := os.Getwd() - if err != nil { - return - } - abspwd, err := filepath.Abs(pwd) - if err != nil { - return - } - for _, gopath := range gopaths { - if gopath == "" { - continue - } - absgp, err := filepath.Abs(gopath) - if err != nil { - return - } - if strings.HasPrefix(abspwd, absgp) { - return absgp - } - } - return build.Default.GOPATH -} diff --git a/tool/kratos-protoc/swagger.go b/tool/kratos-protoc/swagger.go deleted file mode 100644 index b0f529a5a..000000000 --- a/tool/kratos-protoc/swagger.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "os/exec" -) - -const ( - _getSwaggerGen = "go get -u github.com/go-kratos/kratos/tool/protobuf/protoc-gen-bswagger" - _swaggerProtoc = "protoc --proto_path=%s --proto_path=%s --proto_path=%s --bswagger_out=" + - "Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types," + - "Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:." -) - -func installSwaggerGen() error { - if _, err := exec.LookPath("protoc-gen-bswagger"); err != nil { - if err := goget(_getSwaggerGen); err != nil { - return err - } - } - return nil -} - -func genSwagger(files []string) error { - return generate(_swaggerProtoc, files) -} diff --git a/tool/kratos/README.MD b/tool/kratos/README.MD deleted file mode 100644 index b5e6da240..000000000 --- a/tool/kratos/README.MD +++ /dev/null @@ -1,14 +0,0 @@ -# kratos - -## 项目简介 -kratos 工具 - -## 安装 - -`go get -u github.com/go-kratos/kratos/tool/kratos` - -## 使用说明 - -### 参数 - -kratos -h diff --git a/tool/kratos/build.go b/tool/kratos/build.go deleted file mode 100644 index f0e8ec291..000000000 --- a/tool/kratos/build.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - - "github.com/urfave/cli/v2" -) - -func buildAction(c *cli.Context) error { - base, err := os.Getwd() - if err != nil { - panic(err) - } - args := append([]string{"build"}, c.Args().Slice()...) - cmd := exec.Command("go", args...) - cmd.Dir = buildDir(base, "cmd", 5) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - fmt.Printf("directory: %s\n", cmd.Dir) - fmt.Printf("kratos: %s\n", Version) - if err := cmd.Run(); err != nil { - panic(err) - } - fmt.Println("build success.") - return nil -} - -func buildDir(base string, cmd string, n int) string { - dirs, err := ioutil.ReadDir(base) - if err != nil { - panic(err) - } - for _, d := range dirs { - if d.IsDir() && d.Name() == cmd { - return path.Join(base, cmd) - } - } - if n <= 1 { - return base - } - return buildDir(filepath.Dir(base), cmd, n-1) -} diff --git a/tool/kratos/env.go b/tool/kratos/env.go deleted file mode 100644 index 8447076fc..000000000 --- a/tool/kratos/env.go +++ /dev/null @@ -1,78 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" -) - -var envCache struct { - once sync.Once - m map[string]string -} - -// EnvFile returns the name of the Go environment configuration file. -func EnvFile() (string, error) { - if file := os.Getenv("GOENV"); file != "" { - if file == "off" { - return "", fmt.Errorf("GOENV=off") - } - return file, nil - } - dir, err := os.UserConfigDir() - if err != nil { - return "", err - } - if dir == "" { - return "", fmt.Errorf("missing user-config dir") - } - return filepath.Join(dir, "go/env"), nil -} - -func initEnvCache() { - envCache.m = make(map[string]string) - file, _ := EnvFile() - if file == "" { - return - } - data, err := ioutil.ReadFile(file) - if err != nil { - return - } - - for len(data) > 0 { - // Get next line. - line := data - i := bytes.IndexByte(data, '\n') - if i >= 0 { - line, data = line[:i], data[i+1:] - } else { - data = nil - } - - i = bytes.IndexByte(line, '=') - if i < 0 || line[0] < 'A' || 'Z' < line[0] { - // Line is missing = (or empty) or a comment or not a valid env name. Ignore. - // (This should not happen, since the file should be maintained almost - // exclusively by "go env -w", but better to silently ignore than to make - // the go command unusable just because somehow the env file has - // gotten corrupted.) - continue - } - key, val := line[:i], line[i+1:] - envCache.m[string(key)] = string(val) - } -} - -// Getenv gets the value from env or configuration. -func Getenv(key string) string { - val := os.Getenv(key) - if val != "" { - return val - } - envCache.once.Do(initEnvCache) - return envCache.m[key] -} diff --git a/tool/kratos/main.go b/tool/kratos/main.go deleted file mode 100644 index a6d87bcac..000000000 --- a/tool/kratos/main.go +++ /dev/null @@ -1,65 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/urfave/cli/v2" -) - -func main() { - app := cli.NewApp() - app.Name = "kratos" - app.Usage = "kratos工具集" - app.Version = Version - app.Commands = []*cli.Command{ - { - Name: "new", - Aliases: []string{"n"}, - Usage: "创建新项目", - Action: runNew, - SkipFlagParsing: true, - }, - { - Name: "build", - Aliases: []string{"b"}, - Usage: "kratos build", - Action: buildAction, - }, - { - Name: "run", - Aliases: []string{"r"}, - Usage: "kratos run", - Action: runAction, - }, - { - Name: "tool", - Aliases: []string{"t"}, - Usage: "kratos tool", - Action: toolAction, - SkipFlagParsing: true, - }, - { - Name: "version", - Aliases: []string{"v"}, - Usage: "kratos version", - Action: func(c *cli.Context) error { - fmt.Println(getVersion()) - return nil - }, - }, - { - Name: "self-upgrade", - Usage: "kratos self-upgrade", - Action: upgradeAction, - }, - } - err := app.Run(os.Args) - if err != nil { - panic(err) - } -} - -func runNew(ctx *cli.Context) error { - return installAndRun("genproject", ctx.Args().Slice()) -} diff --git a/tool/kratos/run.go b/tool/kratos/run.go deleted file mode 100644 index e0ae3be99..000000000 --- a/tool/kratos/run.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "os" - "os/exec" - "path" - "path/filepath" - - "github.com/urfave/cli/v2" -) - -func runAction(c *cli.Context) error { - base, err := os.Getwd() - if err != nil { - panic(err) - } - dir := buildDir(base, "cmd", 5) - conf := path.Join(filepath.Dir(dir), "configs") - args := append([]string{"run", "main.go", "-conf", conf}, c.Args().Slice()...) - cmd := exec.Command("go", args...) - cmd.Dir = dir - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - panic(err) - } - return nil -} diff --git a/tool/kratos/tool.go b/tool/kratos/tool.go deleted file mode 100644 index 0c38224c2..000000000 --- a/tool/kratos/tool.go +++ /dev/null @@ -1,284 +0,0 @@ -package main - -import ( - "fmt" - "go/build" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" - "sort" - "strings" - "time" - - "github.com/fatih/color" - "github.com/urfave/cli/v2" -) - -const ( - toolDoc = "https://go-kratos.github.io/kratos/#/kratos-tool" -) - -// Tool is kratos tool. -type Tool struct { - Name string `json:"name"` - Alias string `json:"alias"` - BuildTime time.Time `json:"build_time"` - Install string `json:"install"` - Requirements []string `json:"requirements"` - Dir string `json:"dir"` - Summary string `json:"summary"` - Platform []string `json:"platform"` - Author string `json:"author"` - URL string `json:"url"` - Hidden bool `json:"hidden"` - requires []*Tool -} - -func toolAction(c *cli.Context) (err error) { - if c.NArg() == 0 { - sort.Slice(toolIndexs, func(i, j int) bool { return toolIndexs[i].BuildTime.After(toolIndexs[j].BuildTime) }) - for _, t := range toolIndexs { - if t.Hidden { - continue - } - updateTime := t.BuildTime.Format("2006/01/02") - fmt.Printf("%s%s: %s Author(%s) [%s]\n", color.HiMagentaString(t.Name), getNotice(t), color.HiCyanString(t.Summary), t.Author, updateTime) - } - fmt.Println("\n安装工具: kratos tool install demo") - fmt.Println("执行工具: kratos tool demo") - fmt.Println("安装全部工具: kratos tool install all") - fmt.Println("全部升级: kratos tool upgrade all") - fmt.Println("\n详细文档:", toolDoc) - return - } - commond := c.Args().First() - switch commond { - case "upgrade": - upgradeAll() - return - case "install": - name := c.Args().Get(1) - if name == "all" { - installAll() - } else { - install(name) - } - return - case "check_install": - if e := checkInstall(c.Args().Get(1)); e != nil { - fmt.Fprintf(os.Stderr, fmt.Sprintf("%v\n", e)) - } - return - } - if e := installAndRun(commond, c.Args().Slice()[1:]); e != nil { - fmt.Fprintf(os.Stderr, fmt.Sprintf("%v\n", e)) - } - return -} - -func installAndRun(name string, args []string) (err error) { - for _, t := range toolList() { - if name == t.Name { - if !t.installed() || t.needUpdated() { - t.install() - } - pwd, _ := os.Getwd() - err = runTool(t.Name, pwd, t.toolPath(), args) - return - } - } - return fmt.Errorf("找不到%s", name) -} - -func checkInstall(name string) (err error) { - for _, t := range toolList() { - if name == t.Name { - if !t.installed() || t.needUpdated() { - t.install() - } - return - } - } - return fmt.Errorf("找不到%s", name) -} - -func upgradeAction(c *cli.Context) error { - install("kratos") - return nil -} - -func install(name string) { - if name == "" { - fmt.Fprintf(os.Stderr, color.HiRedString("请填写要安装的工具名称\n")) - return - } - for _, t := range toolList() { - if name == t.Name { - t.install() - return - } - } - fmt.Fprintf(os.Stderr, color.HiRedString("安装失败 找不到 %s\n", name)) - return -} - -func installAll() { - for _, t := range toolList() { - if t.Install != "" { - t.install() - } - } -} - -func upgradeAll() { - for _, t := range toolList() { - if t.needUpdated() { - t.install() - } - } -} - -func toolList() (tools []*Tool) { - return toolIndexs -} - -func getNotice(t *Tool) (notice string) { - if !t.supportOS() || t.Install == "" { - return - } - notice = color.HiGreenString("(未安装)") - if t.installed() { - notice = color.HiBlueString("(已安装)") - if t.needUpdated() { - notice = color.RedString("(有更新)") - } - } - return -} - -func (t Tool) needUpdated() bool { - for _, r := range t.requires { - if r.needUpdated() { - return true - } - } - if !t.supportOS() || t.Install == "" { - return false - } - if f, err := os.Stat(t.toolPath()); err == nil { - if t.BuildTime.After(f.ModTime()) { - return true - } - } - return false -} - -func (t Tool) toolPath() string { - name := t.Alias - if name == "" { - name = t.Name - } - gobin := Getenv("GOBIN") - if runtime.GOOS == "windows" { - name += ".exe" - } - if gobin != "" { - return filepath.Join(gobin, name) - } - return filepath.Join(gopath(), "bin", name) -} - -func (t Tool) installed() bool { - _, err := os.Stat(t.toolPath()) - return err == nil -} - -func (t Tool) supportOS() bool { - for _, p := range t.Platform { - if strings.ToLower(p) == runtime.GOOS { - return true - } - } - return false -} - -func (t Tool) install() { - if t.Install == "" { - fmt.Fprintf(os.Stderr, color.RedString("%s: 自动安装失败详情请查看文档:%s\n", t.Name, toolDoc)) - return - } - fmt.Println(t.Install) - cmds := strings.Split(t.Install, " ") - if len(cmds) > 0 { - if err := runTool(t.Name, path.Dir(t.toolPath()), cmds[0], cmds[1:]); err == nil { - color.Green("%s: 安装成功!", t.Name) - } - } -} - -func (t Tool) updated() bool { - if !t.supportOS() || t.Install == "" { - return false - } - if f, err := os.Stat(t.toolPath()); err == nil { - if t.BuildTime.After(f.ModTime()) { - return true - } - } - return false -} - -func gopath() (gp string) { - gopaths := strings.Split(Getenv("GOPATH"), string(filepath.ListSeparator)) - - if len(gopaths) == 1 && gopaths[0] != "" { - return gopaths[0] - } - pwd, err := os.Getwd() - if err != nil { - return - } - abspwd, err := filepath.Abs(pwd) - if err != nil { - return - } - for _, gopath := range gopaths { - if gopath == "" { - continue - } - absgp, err := filepath.Abs(gopath) - if err != nil { - return - } - if strings.HasPrefix(abspwd, absgp) { - return absgp - } - } - return build.Default.GOPATH -} - -func runTool(name, dir, cmd string, args []string) (err error) { - toolCmd := &exec.Cmd{ - Path: cmd, - Args: append([]string{cmd}, args...), - Dir: dir, - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - Env: os.Environ(), - } - if filepath.Base(cmd) == cmd { - var lp string - if lp, err = exec.LookPath(cmd); err == nil { - toolCmd.Path = lp - } - } - if err = toolCmd.Run(); err != nil { - if e, ok := err.(*exec.ExitError); !ok || !e.Exited() { - fmt.Fprintf(os.Stderr, "运行 %s 出错: %v\n", name, err) - } - } - return -} diff --git a/tool/kratos/tool_index.go b/tool/kratos/tool_index.go deleted file mode 100644 index 46b1f9d7a..000000000 --- a/tool/kratos/tool_index.go +++ /dev/null @@ -1,88 +0,0 @@ -package main - -import "time" - -var toolIndexs = []*Tool{ - { - Name: "kratos", - Alias: "kratos", - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Install: "go get -u github.com/go-kratos/kratos/tool/kratos@" + Version, - Summary: "Kratos工具集本体", - Platform: []string{"darwin", "linux", "windows"}, - Author: "kratos", - Hidden: true, - }, - { - Name: "protoc", - Alias: "kratos-protoc", - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Install: "go get -u github.com/go-kratos/kratos/tool/kratos-protoc@" + Version, - Summary: "快速方便生成pb.go的protoc封装,windows、Linux请先安装protoc工具", - Platform: []string{"darwin", "linux", "windows"}, - Author: "kratos", - }, - { - Name: "genbts", - Alias: "kratos-gen-bts", - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Install: "go get -u github.com/go-kratos/kratos/tool/kratos-gen-bts@" + Version, - Summary: "缓存回源逻辑代码生成器", - Platform: []string{"darwin", "linux", "windows"}, - Author: "kratos", - }, - { - Name: "genmc", - Alias: "kratos-gen-mc", - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Install: "go get -u github.com/go-kratos/kratos/tool/kratos-gen-mc@" + Version, - Summary: "mc缓存代码生成", - Platform: []string{"darwin", "linux", "windows"}, - Author: "kratos", - }, - { - Name: "genproject", - Alias: "kratos-gen-project", - Install: "go get -u github.com/go-kratos/kratos/tool/kratos-gen-project@" + Version, - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Platform: []string{"darwin", "linux", "windows"}, - Hidden: true, - Requirements: []string{"wire"}, - }, - { - Name: "testgen", - Alias: "testgen", - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Install: "go get -u github.com/go-kratos/kratos/tool/testgen@" + Version, - Summary: "测试代码生成", - Platform: []string{"darwin", "linux", "windows"}, - Author: "kratos", - }, - { - Name: "testcli", - Alias: "testcli", - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Install: "go get -u github.com/go-kratos/kratos/tool/testcli@" + Version, - Summary: "测试代码运行", - Platform: []string{"darwin", "linux", "windows"}, - Author: "kratos", - }, - // third party - { - Name: "wire", - Alias: "wire", - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Install: "go get -u github.com/google/wire/cmd/wire", - Platform: []string{"darwin", "linux", "windows"}, - Hidden: true, - }, - { - Name: "swagger", - Alias: "swagger", - BuildTime: time.Date(2020, 3, 31, 0, 0, 0, 0, time.Local), - Install: "go get -u github.com/go-swagger/go-swagger/cmd/swagger", - Summary: "swagger api文档", - Platform: []string{"darwin", "linux", "windows"}, - Author: "goswagger.io", - }, -} diff --git a/tool/kratos/version.go b/tool/kratos/version.go deleted file mode 100644 index b6120fb02..000000000 --- a/tool/kratos/version.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "bytes" - "runtime" - "text/template" -) - -var ( - // Version is version - Version = "v0.6.0" - // BuildTime is BuildTime - BuildTime = "2020/12/4" -) - -// VersionOptions include version -type VersionOptions struct { - GitCommit string - Version string - BuildTime string - GoVersion string - Os string - Arch string -} - -var versionTemplate = ` Version: {{.Version}} - Go version: {{.GoVersion}} - Built: {{.BuildTime}} - OS/Arch: {{.Os}}/{{.Arch}} - ` - -func getVersion() string { - var doc bytes.Buffer - vo := VersionOptions{ - Version: Version, - BuildTime: BuildTime, - GoVersion: runtime.Version(), - Os: runtime.GOOS, - Arch: runtime.GOARCH, - } - tmpl, _ := template.New("version").Parse(versionTemplate) - tmpl.Execute(&doc, vo) - return doc.String() -} diff --git a/tool/pkg/common.go b/tool/pkg/common.go deleted file mode 100644 index 0941203f1..000000000 --- a/tool/pkg/common.go +++ /dev/null @@ -1,151 +0,0 @@ -package pkg - -import ( - "fmt" - "go/ast" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "regexp" - "strings" -) - -// Source source -type Source struct { - Fset *token.FileSet - Src string - F *ast.File -} - -// NewSource new source -func NewSource(src string) *Source { - s := &Source{ - Fset: token.NewFileSet(), - Src: src, - } - f, err := parser.ParseFile(s.Fset, "", src, 0) - if err != nil { - log.Fatal("无法解析源文件") - } - s.F = f - return s -} - -// ExprString expr string -func (s *Source) ExprString(typ ast.Expr) string { - fset := s.Fset - s1 := fset.Position(typ.Pos()).Offset - s2 := fset.Position(typ.End()).Offset - return s.Src[s1:s2] -} - -// pkgPath package path -func (s *Source) pkgPath(name string) (res string) { - for _, im := range s.F.Imports { - if im.Name != nil && im.Name.Name == name { - return im.Path.Value - } - } - for _, im := range s.F.Imports { - if strings.HasSuffix(im.Path.Value, name+"\"") { - return im.Path.Value - } - } - return -} - -// GetDef get define code -func (s *Source) GetDef(name string) string { - c := s.F.Scope.Lookup(name).Decl.(*ast.TypeSpec).Type.(*ast.InterfaceType) - s1 := s.Fset.Position(c.Pos()).Offset - s2 := s.Fset.Position(c.End()).Offset - line := s.Fset.Position(c.Pos()).Line - lines := []string{strings.Split(s.Src, "\n")[line-1]} - for _, l := range strings.Split(s.Src[s1:s2], "\n")[1:] { - lines = append(lines, "\t"+l) - } - return strings.Join(lines, "\n") -} - -// RegexpReplace replace regexp -func RegexpReplace(reg, src, temp string) string { - result := []byte{} - pattern := regexp.MustCompile(reg) - for _, submatches := range pattern.FindAllStringSubmatchIndex(src, -1) { - result = pattern.ExpandString(result, temp, src, submatches) - } - return string(result) -} - -// formatPackage format package -func formatPackage(name, path string) (res string) { - if path != "" { - if strings.HasSuffix(path, name+"\"") { - res = path - return - } - res = fmt.Sprintf("%s %s", name, path) - } - return -} - -// SourceText get source file text -func SourceText() string { - file := os.Getenv("GOFILE") - data, err := ioutil.ReadFile(file) - if err != nil { - log.Fatal("请使用go generate执行", file) - } - return string(data) -} - -// FormatCode format code -func FormatCode(source string) string { - src, err := format.Source([]byte(source)) - if err != nil { - // Should never happen, but can arise when developing this code. - // The user can compile the output to see the error. - log.Printf("warning: 输出文件不合法: %s", err) - log.Printf("warning: 详细错误请编译查看") - return source - } - return string(src) -} - -// Packages get import packages -func (s *Source) Packages(f *ast.Field) (res []string) { - fs := f.Type.(*ast.FuncType).Params.List - if f.Type.(*ast.FuncType).Results != nil { - fs = append(fs, f.Type.(*ast.FuncType).Results.List...) - } - var types []string - resMap := make(map[string]bool) - for _, field := range fs { - if p, ok := field.Type.(*ast.MapType); ok { - types = append(types, s.ExprString(p.Key)) - types = append(types, s.ExprString(p.Value)) - } else if p, ok := field.Type.(*ast.ArrayType); ok { - types = append(types, s.ExprString(p.Elt)) - } else { - types = append(types, s.ExprString(field.Type)) - } - } - - for _, t := range types { - name := RegexpReplace(`(?P\w+)\.\w+`, t, "$pkg") - if name == "" { - continue - } - pkg := formatPackage(name, s.pkgPath(name)) - if !resMap[pkg] { - resMap[pkg] = true - } - } - for pkg := range resMap { - res = append(res, pkg) - } - return -} diff --git a/tool/protobuf/pkg/extensions/gogoproto/gogo.pb.go b/tool/protobuf/pkg/extensions/gogoproto/gogo.pb.go deleted file mode 100644 index 7819be39d..000000000 --- a/tool/protobuf/pkg/extensions/gogoproto/gogo.pb.go +++ /dev/null @@ -1,818 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: gogo.proto - -package gogoproto // import "github.com/go-kratos/kratos/tool/protobuf/pkg/extensions/gogoproto" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62001, - Name: "gogoproto.goproto_enum_prefix", - Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62021, - Name: "gogoproto.goproto_enum_stringer", - Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", - Filename: "gogo.proto", -} - -var E_EnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62022, - Name: "gogoproto.enum_stringer", - Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", - Filename: "gogo.proto", -} - -var E_EnumCustomname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*string)(nil), - Field: 62023, - Name: "gogoproto.enum_customname", - Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", - Filename: "gogo.proto", -} - -var E_Enumdecl = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62024, - Name: "gogoproto.enumdecl", - Tag: "varint,62024,opt,name=enumdecl", - Filename: "gogo.proto", -} - -var E_EnumvalueCustomname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumValueOptions)(nil), - ExtensionType: (*string)(nil), - Field: 66001, - Name: "gogoproto.enumvalue_customname", - Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", - Filename: "gogo.proto", -} - -var E_GoprotoGettersAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63001, - Name: "gogoproto.goproto_getters_all", - Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", - Filename: "gogo.proto", -} - -var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63002, - Name: "gogoproto.goproto_enum_prefix_all", - Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", - Filename: "gogo.proto", -} - -var E_GoprotoStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63003, - Name: "gogoproto.goproto_stringer_all", - Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", - Filename: "gogo.proto", -} - -var E_VerboseEqualAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63004, - Name: "gogoproto.verbose_equal_all", - Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", - Filename: "gogo.proto", -} - -var E_FaceAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63005, - Name: "gogoproto.face_all", - Tag: "varint,63005,opt,name=face_all,json=faceAll", - Filename: "gogo.proto", -} - -var E_GostringAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63006, - Name: "gogoproto.gostring_all", - Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", - Filename: "gogo.proto", -} - -var E_PopulateAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63007, - Name: "gogoproto.populate_all", - Tag: "varint,63007,opt,name=populate_all,json=populateAll", - Filename: "gogo.proto", -} - -var E_StringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63008, - Name: "gogoproto.stringer_all", - Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", - Filename: "gogo.proto", -} - -var E_OnlyoneAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63009, - Name: "gogoproto.onlyone_all", - Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", - Filename: "gogo.proto", -} - -var E_EqualAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63013, - Name: "gogoproto.equal_all", - Tag: "varint,63013,opt,name=equal_all,json=equalAll", - Filename: "gogo.proto", -} - -var E_DescriptionAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63014, - Name: "gogoproto.description_all", - Tag: "varint,63014,opt,name=description_all,json=descriptionAll", - Filename: "gogo.proto", -} - -var E_TestgenAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63015, - Name: "gogoproto.testgen_all", - Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", - Filename: "gogo.proto", -} - -var E_BenchgenAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63016, - Name: "gogoproto.benchgen_all", - Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", - Filename: "gogo.proto", -} - -var E_MarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63017, - Name: "gogoproto.marshaler_all", - Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", - Filename: "gogo.proto", -} - -var E_UnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63018, - Name: "gogoproto.unmarshaler_all", - Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", - Filename: "gogo.proto", -} - -var E_StableMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63019, - Name: "gogoproto.stable_marshaler_all", - Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", - Filename: "gogo.proto", -} - -var E_SizerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63020, - Name: "gogoproto.sizer_all", - Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63021, - Name: "gogoproto.goproto_enum_stringer_all", - Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", - Filename: "gogo.proto", -} - -var E_EnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63022, - Name: "gogoproto.enum_stringer_all", - Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", - Filename: "gogo.proto", -} - -var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63023, - Name: "gogoproto.unsafe_marshaler_all", - Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63024, - Name: "gogoproto.unsafe_unmarshaler_all", - Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63025, - Name: "gogoproto.goproto_extensions_map_all", - Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63026, - Name: "gogoproto.goproto_unrecognized_all", - Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", - Filename: "gogo.proto", -} - -var E_GogoprotoImport = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63027, - Name: "gogoproto.gogoproto_import", - Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", - Filename: "gogo.proto", -} - -var E_ProtosizerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63028, - Name: "gogoproto.protosizer_all", - Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", - Filename: "gogo.proto", -} - -var E_CompareAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63029, - Name: "gogoproto.compare_all", - Tag: "varint,63029,opt,name=compare_all,json=compareAll", - Filename: "gogo.proto", -} - -var E_TypedeclAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63030, - Name: "gogoproto.typedecl_all", - Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", - Filename: "gogo.proto", -} - -var E_EnumdeclAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63031, - Name: "gogoproto.enumdecl_all", - Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", - Filename: "gogo.proto", -} - -var E_GoprotoRegistration = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63032, - Name: "gogoproto.goproto_registration", - Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", - Filename: "gogo.proto", -} - -var E_MessagenameAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63033, - Name: "gogoproto.messagename_all", - Tag: "varint,63033,opt,name=messagename_all,json=messagenameAll", - Filename: "gogo.proto", -} - -var E_GoprotoGetters = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64001, - Name: "gogoproto.goproto_getters", - Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", - Filename: "gogo.proto", -} - -var E_GoprotoStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64003, - Name: "gogoproto.goproto_stringer", - Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", - Filename: "gogo.proto", -} - -var E_VerboseEqual = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64004, - Name: "gogoproto.verbose_equal", - Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", - Filename: "gogo.proto", -} - -var E_Face = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64005, - Name: "gogoproto.face", - Tag: "varint,64005,opt,name=face", - Filename: "gogo.proto", -} - -var E_Gostring = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64006, - Name: "gogoproto.gostring", - Tag: "varint,64006,opt,name=gostring", - Filename: "gogo.proto", -} - -var E_Populate = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64007, - Name: "gogoproto.populate", - Tag: "varint,64007,opt,name=populate", - Filename: "gogo.proto", -} - -var E_Stringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 67008, - Name: "gogoproto.stringer", - Tag: "varint,67008,opt,name=stringer", - Filename: "gogo.proto", -} - -var E_Onlyone = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64009, - Name: "gogoproto.onlyone", - Tag: "varint,64009,opt,name=onlyone", - Filename: "gogo.proto", -} - -var E_Equal = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64013, - Name: "gogoproto.equal", - Tag: "varint,64013,opt,name=equal", - Filename: "gogo.proto", -} - -var E_Description = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64014, - Name: "gogoproto.description", - Tag: "varint,64014,opt,name=description", - Filename: "gogo.proto", -} - -var E_Testgen = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64015, - Name: "gogoproto.testgen", - Tag: "varint,64015,opt,name=testgen", - Filename: "gogo.proto", -} - -var E_Benchgen = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64016, - Name: "gogoproto.benchgen", - Tag: "varint,64016,opt,name=benchgen", - Filename: "gogo.proto", -} - -var E_Marshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64017, - Name: "gogoproto.marshaler", - Tag: "varint,64017,opt,name=marshaler", - Filename: "gogo.proto", -} - -var E_Unmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64018, - Name: "gogoproto.unmarshaler", - Tag: "varint,64018,opt,name=unmarshaler", - Filename: "gogo.proto", -} - -var E_StableMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64019, - Name: "gogoproto.stable_marshaler", - Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", - Filename: "gogo.proto", -} - -var E_Sizer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64020, - Name: "gogoproto.sizer", - Tag: "varint,64020,opt,name=sizer", - Filename: "gogo.proto", -} - -var E_UnsafeMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64023, - Name: "gogoproto.unsafe_marshaler", - Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64024, - Name: "gogoproto.unsafe_unmarshaler", - Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64025, - Name: "gogoproto.goproto_extensions_map", - Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognized = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64026, - Name: "gogoproto.goproto_unrecognized", - Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", - Filename: "gogo.proto", -} - -var E_Protosizer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64028, - Name: "gogoproto.protosizer", - Tag: "varint,64028,opt,name=protosizer", - Filename: "gogo.proto", -} - -var E_Compare = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64029, - Name: "gogoproto.compare", - Tag: "varint,64029,opt,name=compare", - Filename: "gogo.proto", -} - -var E_Typedecl = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64030, - Name: "gogoproto.typedecl", - Tag: "varint,64030,opt,name=typedecl", - Filename: "gogo.proto", -} - -var E_Messagename = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64033, - Name: "gogoproto.messagename", - Tag: "varint,64033,opt,name=messagename", - Filename: "gogo.proto", -} - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65001, - Name: "gogoproto.nullable", - Tag: "varint,65001,opt,name=nullable", - Filename: "gogo.proto", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65002, - Name: "gogoproto.embed", - Tag: "varint,65002,opt,name=embed", - Filename: "gogo.proto", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65003, - Name: "gogoproto.customtype", - Tag: "bytes,65003,opt,name=customtype", - Filename: "gogo.proto", -} - -var E_Customname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65004, - Name: "gogoproto.customname", - Tag: "bytes,65004,opt,name=customname", - Filename: "gogo.proto", -} - -var E_Jsontag = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65005, - Name: "gogoproto.jsontag", - Tag: "bytes,65005,opt,name=jsontag", - Filename: "gogo.proto", -} - -var E_Moretags = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65006, - Name: "gogoproto.moretags", - Tag: "bytes,65006,opt,name=moretags", - Filename: "gogo.proto", -} - -var E_Casttype = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65007, - Name: "gogoproto.casttype", - Tag: "bytes,65007,opt,name=casttype", - Filename: "gogo.proto", -} - -var E_Castkey = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65008, - Name: "gogoproto.castkey", - Tag: "bytes,65008,opt,name=castkey", - Filename: "gogo.proto", -} - -var E_Castvalue = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65009, - Name: "gogoproto.castvalue", - Tag: "bytes,65009,opt,name=castvalue", - Filename: "gogo.proto", -} - -var E_Stdtime = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65010, - Name: "gogoproto.stdtime", - Tag: "varint,65010,opt,name=stdtime", - Filename: "gogo.proto", -} - -var E_Stdduration = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65011, - Name: "gogoproto.stdduration", - Tag: "varint,65011,opt,name=stdduration", - Filename: "gogo.proto", -} - -func init() { - proto.RegisterExtension(E_GoprotoEnumPrefix) - proto.RegisterExtension(E_GoprotoEnumStringer) - proto.RegisterExtension(E_EnumStringer) - proto.RegisterExtension(E_EnumCustomname) - proto.RegisterExtension(E_Enumdecl) - proto.RegisterExtension(E_EnumvalueCustomname) - proto.RegisterExtension(E_GoprotoGettersAll) - proto.RegisterExtension(E_GoprotoEnumPrefixAll) - proto.RegisterExtension(E_GoprotoStringerAll) - proto.RegisterExtension(E_VerboseEqualAll) - proto.RegisterExtension(E_FaceAll) - proto.RegisterExtension(E_GostringAll) - proto.RegisterExtension(E_PopulateAll) - proto.RegisterExtension(E_StringerAll) - proto.RegisterExtension(E_OnlyoneAll) - proto.RegisterExtension(E_EqualAll) - proto.RegisterExtension(E_DescriptionAll) - proto.RegisterExtension(E_TestgenAll) - proto.RegisterExtension(E_BenchgenAll) - proto.RegisterExtension(E_MarshalerAll) - proto.RegisterExtension(E_UnmarshalerAll) - proto.RegisterExtension(E_StableMarshalerAll) - proto.RegisterExtension(E_SizerAll) - proto.RegisterExtension(E_GoprotoEnumStringerAll) - proto.RegisterExtension(E_EnumStringerAll) - proto.RegisterExtension(E_UnsafeMarshalerAll) - proto.RegisterExtension(E_UnsafeUnmarshalerAll) - proto.RegisterExtension(E_GoprotoExtensionsMapAll) - proto.RegisterExtension(E_GoprotoUnrecognizedAll) - proto.RegisterExtension(E_GogoprotoImport) - proto.RegisterExtension(E_ProtosizerAll) - proto.RegisterExtension(E_CompareAll) - proto.RegisterExtension(E_TypedeclAll) - proto.RegisterExtension(E_EnumdeclAll) - proto.RegisterExtension(E_GoprotoRegistration) - proto.RegisterExtension(E_MessagenameAll) - proto.RegisterExtension(E_GoprotoGetters) - proto.RegisterExtension(E_GoprotoStringer) - proto.RegisterExtension(E_VerboseEqual) - proto.RegisterExtension(E_Face) - proto.RegisterExtension(E_Gostring) - proto.RegisterExtension(E_Populate) - proto.RegisterExtension(E_Stringer) - proto.RegisterExtension(E_Onlyone) - proto.RegisterExtension(E_Equal) - proto.RegisterExtension(E_Description) - proto.RegisterExtension(E_Testgen) - proto.RegisterExtension(E_Benchgen) - proto.RegisterExtension(E_Marshaler) - proto.RegisterExtension(E_Unmarshaler) - proto.RegisterExtension(E_StableMarshaler) - proto.RegisterExtension(E_Sizer) - proto.RegisterExtension(E_UnsafeMarshaler) - proto.RegisterExtension(E_UnsafeUnmarshaler) - proto.RegisterExtension(E_GoprotoExtensionsMap) - proto.RegisterExtension(E_GoprotoUnrecognized) - proto.RegisterExtension(E_Protosizer) - proto.RegisterExtension(E_Compare) - proto.RegisterExtension(E_Typedecl) - proto.RegisterExtension(E_Messagename) - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) - proto.RegisterExtension(E_Customname) - proto.RegisterExtension(E_Jsontag) - proto.RegisterExtension(E_Moretags) - proto.RegisterExtension(E_Casttype) - proto.RegisterExtension(E_Castkey) - proto.RegisterExtension(E_Castvalue) - proto.RegisterExtension(E_Stdtime) - proto.RegisterExtension(E_Stdduration) -} - -func init() { proto.RegisterFile("gogo.proto", fileDescriptor_gogo_e935c22a8aa82c87) } - -var fileDescriptor_gogo_e935c22a8aa82c87 = []byte{ - // 1260 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xc9, 0x6f, 0x1c, 0x45, - 0x17, 0xc0, 0xf5, 0xe9, 0x4b, 0x14, 0xcf, 0xf3, 0x86, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xb8, 0x71, - 0xc1, 0x73, 0x40, 0x11, 0x4a, 0x59, 0x91, 0xe5, 0x58, 0x8e, 0x15, 0x84, 0xc1, 0x98, 0xd8, 0x6c, - 0x87, 0x51, 0xcf, 0x4c, 0xb9, 0x33, 0xa4, 0xbb, 0xab, 0xe9, 0xae, 0x8e, 0xe2, 0xdc, 0x50, 0x58, - 0x84, 0x10, 0x3b, 0x12, 0x24, 0x24, 0x81, 0x1c, 0xd8, 0xd7, 0xb0, 0x73, 0xe3, 0xc2, 0x72, 0xe5, - 0x7f, 0xe0, 0x02, 0x98, 0xdd, 0x37, 0x5f, 0xd0, 0xeb, 0x7e, 0xaf, 0xa7, 0x66, 0x3c, 0x52, 0xd5, - 0xdc, 0xda, 0x76, 0xfd, 0x7e, 0xae, 0x7e, 0xaf, 0xea, 0xbd, 0x37, 0x03, 0xe0, 0x2b, 0x5f, 0xcd, - 0xc4, 0x89, 0xd2, 0xaa, 0x5a, 0xc1, 0xe7, 0xfc, 0xf1, 0xc0, 0x41, 0x5f, 0x29, 0x3f, 0x90, 0xb5, - 0xfc, 0xa7, 0x46, 0xb6, 0x51, 0x6b, 0xc9, 0xb4, 0x99, 0xb4, 0x63, 0xad, 0x92, 0x62, 0xb1, 0xb8, - 0x0b, 0x26, 0x69, 0x71, 0x5d, 0x46, 0x59, 0x58, 0x8f, 0x13, 0xb9, 0xd1, 0x3e, 0x53, 0xbd, 0x61, - 0xa6, 0x20, 0x67, 0x98, 0x9c, 0x59, 0x8c, 0xb2, 0xf0, 0xee, 0x58, 0xb7, 0x55, 0x94, 0xee, 0xbf, - 0xfa, 0xf3, 0xff, 0x0f, 0xfe, 0xef, 0x96, 0xa1, 0xd5, 0x09, 0x42, 0xf1, 0x6f, 0x2b, 0x39, 0x28, - 0x56, 0xe1, 0xda, 0x2e, 0x5f, 0xaa, 0x93, 0x76, 0xe4, 0xcb, 0xc4, 0x62, 0xfc, 0x8e, 0x8c, 0x93, - 0x86, 0xf1, 0x5e, 0x42, 0xc5, 0x02, 0x8c, 0x0e, 0xe2, 0xfa, 0x9e, 0x5c, 0x23, 0xd2, 0x94, 0x2c, - 0xc1, 0x78, 0x2e, 0x69, 0x66, 0xa9, 0x56, 0x61, 0xe4, 0x85, 0xd2, 0xa2, 0xf9, 0x21, 0xd7, 0x54, - 0x56, 0xc7, 0x10, 0x5b, 0x28, 0x29, 0x21, 0x60, 0x08, 0x7f, 0xd3, 0x92, 0xcd, 0xc0, 0x62, 0xf8, - 0x91, 0x36, 0x52, 0xae, 0x17, 0xeb, 0x30, 0x85, 0xcf, 0xa7, 0xbd, 0x20, 0x93, 0xe6, 0x4e, 0x6e, - 0xee, 0xeb, 0x59, 0xc7, 0x65, 0x2c, 0xfb, 0xe9, 0xdc, 0x9e, 0x7c, 0x3b, 0x93, 0xa5, 0xc0, 0xd8, - 0x93, 0x91, 0x45, 0x5f, 0x6a, 0x2d, 0x93, 0xb4, 0xee, 0x05, 0xfd, 0xb6, 0x77, 0xac, 0x1d, 0x94, - 0xc6, 0xf3, 0x5b, 0xdd, 0x59, 0x5c, 0x2a, 0xc8, 0xf9, 0x20, 0x10, 0x6b, 0x70, 0x5d, 0x9f, 0x53, - 0xe1, 0xe0, 0xbc, 0x40, 0xce, 0xa9, 0x5d, 0x27, 0x03, 0xb5, 0x2b, 0xc0, 0xbf, 0x2f, 0x73, 0xe9, - 0xe0, 0x7c, 0x8d, 0x9c, 0x55, 0x62, 0x39, 0xa5, 0x68, 0xbc, 0x03, 0x26, 0x4e, 0xcb, 0xa4, 0xa1, - 0x52, 0x59, 0x97, 0x8f, 0x64, 0x5e, 0xe0, 0xa0, 0xbb, 0x48, 0xba, 0x71, 0x02, 0x17, 0x91, 0x43, - 0xd7, 0x61, 0x18, 0xda, 0xf0, 0x9a, 0xd2, 0x41, 0x71, 0x89, 0x14, 0xfb, 0x70, 0x3d, 0xa2, 0xf3, - 0x30, 0xe2, 0xab, 0xe2, 0x95, 0x1c, 0xf0, 0xcb, 0x84, 0x0f, 0x33, 0x43, 0x8a, 0x58, 0xc5, 0x59, - 0xe0, 0x69, 0x97, 0x1d, 0xbc, 0xce, 0x0a, 0x66, 0x48, 0x31, 0x40, 0x58, 0xdf, 0x60, 0x45, 0x6a, - 0xc4, 0x73, 0x0e, 0x86, 0x55, 0x14, 0x6c, 0xaa, 0xc8, 0x65, 0x13, 0x57, 0xc8, 0x00, 0x84, 0xa0, - 0x60, 0x16, 0x2a, 0xae, 0x89, 0x78, 0x73, 0x8b, 0xaf, 0x07, 0x67, 0x60, 0x09, 0xc6, 0xb9, 0x40, - 0xb5, 0x55, 0xe4, 0xa0, 0x78, 0x8b, 0x14, 0x63, 0x06, 0x46, 0xaf, 0xa1, 0x65, 0xaa, 0x7d, 0xe9, - 0x22, 0x79, 0x9b, 0x5f, 0x83, 0x10, 0x0a, 0x65, 0x43, 0x46, 0xcd, 0x93, 0x6e, 0x86, 0x77, 0x38, - 0x94, 0xcc, 0xa0, 0x62, 0x01, 0x46, 0x43, 0x2f, 0x49, 0x4f, 0x7a, 0x81, 0x53, 0x3a, 0xde, 0x25, - 0xc7, 0x48, 0x09, 0x51, 0x44, 0xb2, 0x68, 0x10, 0xcd, 0x7b, 0x1c, 0x11, 0x03, 0xa3, 0xab, 0x97, - 0x6a, 0xaf, 0x11, 0xc8, 0xfa, 0x20, 0xb6, 0xf7, 0xf9, 0xea, 0x15, 0xec, 0xb2, 0x69, 0x9c, 0x85, - 0x4a, 0xda, 0x3e, 0xeb, 0xa4, 0xf9, 0x80, 0x33, 0x9d, 0x03, 0x08, 0x3f, 0x00, 0xd7, 0xf7, 0x6d, - 0x13, 0x0e, 0xb2, 0x0f, 0x49, 0x36, 0xdd, 0xa7, 0x55, 0x50, 0x49, 0x18, 0x54, 0xf9, 0x11, 0x97, - 0x04, 0xd9, 0xe3, 0x5a, 0x81, 0xa9, 0x2c, 0x4a, 0xbd, 0x8d, 0xc1, 0xa2, 0xf6, 0x31, 0x47, 0xad, - 0x60, 0xbb, 0xa2, 0x76, 0x02, 0xa6, 0xc9, 0x38, 0x58, 0x5e, 0x3f, 0xe1, 0xc2, 0x5a, 0xd0, 0x6b, - 0xdd, 0xd9, 0x7d, 0x08, 0x0e, 0x94, 0xe1, 0x3c, 0xa3, 0x65, 0x94, 0x22, 0x53, 0x0f, 0xbd, 0xd8, - 0xc1, 0x7c, 0x95, 0xcc, 0x5c, 0xf1, 0x17, 0x4b, 0xc1, 0xb2, 0x17, 0xa3, 0xfc, 0x7e, 0xd8, 0xcf, - 0xf2, 0x2c, 0x4a, 0x64, 0x53, 0xf9, 0x51, 0xfb, 0xac, 0x6c, 0x39, 0xa8, 0x3f, 0xed, 0x49, 0xd5, - 0x9a, 0x81, 0xa3, 0xf9, 0x38, 0x5c, 0x53, 0xce, 0x2a, 0xf5, 0x76, 0x18, 0xab, 0x44, 0x5b, 0x8c, - 0x9f, 0x71, 0xa6, 0x4a, 0xee, 0x78, 0x8e, 0x89, 0x45, 0x18, 0xcb, 0x7f, 0x74, 0x3d, 0x92, 0x9f, - 0x93, 0x68, 0xb4, 0x43, 0x51, 0xe1, 0x68, 0xaa, 0x30, 0xf6, 0x12, 0x97, 0xfa, 0xf7, 0x05, 0x17, - 0x0e, 0x42, 0xa8, 0x70, 0xe8, 0xcd, 0x58, 0x62, 0xb7, 0x77, 0x30, 0x7c, 0xc9, 0x85, 0x83, 0x19, - 0x52, 0xf0, 0xc0, 0xe0, 0xa0, 0xf8, 0x8a, 0x15, 0xcc, 0xa0, 0xe2, 0x9e, 0x4e, 0xa3, 0x4d, 0xa4, - 0xdf, 0x4e, 0x75, 0xe2, 0xe1, 0x6a, 0x8b, 0xea, 0xeb, 0xad, 0xee, 0x21, 0x6c, 0xd5, 0x40, 0xb1, - 0x12, 0x85, 0x32, 0x4d, 0x3d, 0x5f, 0xe2, 0xc4, 0xe1, 0xb0, 0xb1, 0x6f, 0xb8, 0x12, 0x19, 0x58, - 0x71, 0x3f, 0xc7, 0x7b, 0x66, 0x95, 0xea, 0x4d, 0xbb, 0x44, 0xcb, 0x05, 0xc3, 0xae, 0x47, 0xb7, - 0xc9, 0xd5, 0x3d, 0xaa, 0x88, 0x3b, 0xf1, 0x00, 0x75, 0x0f, 0x14, 0x76, 0xd9, 0xb9, 0xed, 0xf2, - 0x0c, 0x75, 0xcd, 0x13, 0xe2, 0x18, 0x8c, 0x76, 0x0d, 0x13, 0x76, 0xd5, 0x63, 0xa4, 0x1a, 0x31, - 0x67, 0x09, 0x71, 0x08, 0xf6, 0xe0, 0x60, 0x60, 0xc7, 0x1f, 0x27, 0x3c, 0x5f, 0x2e, 0x8e, 0xc0, - 0x10, 0x0f, 0x04, 0x76, 0xf4, 0x09, 0x42, 0x4b, 0x04, 0x71, 0x1e, 0x06, 0xec, 0xf8, 0x93, 0x8c, - 0x33, 0x82, 0xb8, 0x7b, 0x08, 0xbf, 0x7d, 0x7a, 0x0f, 0x15, 0x74, 0x8e, 0xdd, 0x2c, 0xec, 0xa3, - 0x29, 0xc0, 0x4e, 0x3f, 0x45, 0xff, 0x9c, 0x09, 0x71, 0x3b, 0xec, 0x75, 0x0c, 0xf8, 0x33, 0x84, - 0x16, 0xeb, 0xc5, 0x02, 0x0c, 0x1b, 0x9d, 0xdf, 0x8e, 0x3f, 0x4b, 0xb8, 0x49, 0xe1, 0xd6, 0xa9, - 0xf3, 0xdb, 0x05, 0xcf, 0xf1, 0xd6, 0x89, 0xc0, 0xb0, 0x71, 0xd3, 0xb7, 0xd3, 0xcf, 0x73, 0xd4, - 0x19, 0x11, 0x73, 0x50, 0x29, 0x0b, 0xb9, 0x9d, 0x7f, 0x81, 0xf8, 0x0e, 0x83, 0x11, 0x30, 0x1a, - 0x89, 0x5d, 0xf1, 0x22, 0x47, 0xc0, 0xa0, 0xf0, 0x1a, 0xf5, 0x0e, 0x07, 0x76, 0xd3, 0x4b, 0x7c, - 0x8d, 0x7a, 0x66, 0x03, 0xcc, 0x66, 0x5e, 0x4f, 0xed, 0x8a, 0x97, 0x39, 0x9b, 0xf9, 0x7a, 0xdc, - 0x46, 0x6f, 0xb7, 0xb5, 0x3b, 0x5e, 0xe1, 0x6d, 0xf4, 0x34, 0x5b, 0xb1, 0x02, 0xd5, 0xdd, 0x9d, - 0xd6, 0xee, 0x7b, 0x95, 0x7c, 0x13, 0xbb, 0x1a, 0xad, 0xb8, 0x0f, 0xa6, 0xfb, 0x77, 0x59, 0xbb, - 0xf5, 0xfc, 0x76, 0xcf, 0xe7, 0x22, 0xb3, 0xc9, 0x8a, 0x13, 0x9d, 0x72, 0x6d, 0x76, 0x58, 0xbb, - 0xf6, 0xc2, 0x76, 0x77, 0xc5, 0x36, 0x1b, 0xac, 0x98, 0x07, 0xe8, 0x34, 0x37, 0xbb, 0xeb, 0x22, - 0xb9, 0x0c, 0x08, 0xaf, 0x06, 0xf5, 0x36, 0x3b, 0x7f, 0x89, 0xaf, 0x06, 0x11, 0x78, 0x35, 0xb8, - 0xad, 0xd9, 0xe9, 0xcb, 0x7c, 0x35, 0x18, 0xc1, 0x93, 0x6d, 0x74, 0x0e, 0xbb, 0xe1, 0x0a, 0x9f, - 0x6c, 0x83, 0x12, 0xb3, 0x30, 0x14, 0x65, 0x41, 0x80, 0x07, 0xb4, 0x7a, 0x63, 0x9f, 0x76, 0x25, - 0x83, 0x16, 0xf3, 0xbf, 0xec, 0xd0, 0x0e, 0x18, 0x10, 0x87, 0x60, 0xaf, 0x0c, 0x1b, 0xb2, 0x65, - 0x23, 0x7f, 0xdd, 0xe1, 0xa2, 0x84, 0xab, 0xc5, 0x1c, 0x40, 0xf1, 0xd1, 0x1e, 0x5f, 0xc5, 0xc6, - 0xfe, 0xb6, 0x53, 0x7c, 0xcb, 0x60, 0x20, 0x1d, 0x41, 0xfe, 0xe2, 0x16, 0xc1, 0x56, 0xb7, 0x20, - 0x7f, 0xeb, 0xc3, 0xb0, 0xef, 0xe1, 0x54, 0x45, 0xda, 0xf3, 0x6d, 0xf4, 0xef, 0x44, 0xf3, 0x7a, - 0x0c, 0x58, 0xa8, 0x12, 0xa9, 0x3d, 0x3f, 0xb5, 0xb1, 0x7f, 0x10, 0x5b, 0x02, 0x08, 0x37, 0xbd, - 0x54, 0xbb, 0xbc, 0xf7, 0x9f, 0x0c, 0x33, 0x80, 0x9b, 0xc6, 0xe7, 0x53, 0x72, 0xd3, 0xc6, 0xfe, - 0xc5, 0x9b, 0xa6, 0xf5, 0xe2, 0x08, 0x54, 0xf0, 0x31, 0xff, 0x56, 0xc4, 0x06, 0xff, 0x4d, 0x70, - 0x87, 0xc0, 0xff, 0x9c, 0xea, 0x96, 0x6e, 0xdb, 0x83, 0xfd, 0x0f, 0x65, 0x9a, 0xd7, 0x8b, 0x79, - 0x18, 0x4e, 0x75, 0xab, 0x95, 0xd1, 0x7c, 0x65, 0xc1, 0xff, 0xdd, 0x29, 0x3f, 0x72, 0x97, 0xcc, - 0xd1, 0x75, 0x98, 0x6c, 0xaa, 0xb0, 0x17, 0x3c, 0x0a, 0x4b, 0x6a, 0x49, 0xad, 0xe4, 0x57, 0xf1, - 0xc1, 0xdb, 0x7c, 0x75, 0x6b, 0x53, 0x85, 0xa1, 0x8a, 0x6a, 0x5e, 0x1c, 0xd7, 0xb4, 0x52, 0x41, - 0xad, 0x11, 0xe6, 0x4b, 0x6b, 0xf1, 0x29, 0xbf, 0xd6, 0xa9, 0x46, 0xb5, 0x72, 0x2e, 0xfe, 0x2f, - 0x00, 0x00, 0xff, 0xff, 0x97, 0xb1, 0x98, 0x88, 0x13, 0x14, 0x00, 0x00, -} diff --git a/tool/protobuf/pkg/extensions/gogoproto/gogo.pb.golden b/tool/protobuf/pkg/extensions/gogoproto/gogo.pb.golden deleted file mode 100644 index f6502e4b9..000000000 --- a/tool/protobuf/pkg/extensions/gogoproto/gogo.pb.golden +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by protoc-gen-go. -// source: gogo.proto -// DO NOT EDIT! - -package gogoproto - -import proto "github.com/gogo/protobuf/proto" -import json "encoding/json" -import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51235, - Name: "gogoproto.nullable", - Tag: "varint,51235,opt,name=nullable", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51236, - Name: "gogoproto.embed", - Tag: "varint,51236,opt,name=embed", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 51237, - Name: "gogoproto.customtype", - Tag: "bytes,51237,opt,name=customtype", -} - -func init() { - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) -} diff --git a/tool/protobuf/pkg/extensions/gogoproto/gogo.proto b/tool/protobuf/pkg/extensions/gogoproto/gogo.proto deleted file mode 100644 index d7a8b3709..000000000 --- a/tool/protobuf/pkg/extensions/gogoproto/gogo.proto +++ /dev/null @@ -1,136 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option java_package = "com.google.protobuf"; -option java_outer_classname = "GoGoProtos"; -option go_package = "github.com/go-kratos/kratos/tool/protobuf/pkg/extensions/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; -} diff --git a/tool/protobuf/pkg/extensions/google/api/annotations.proto b/tool/protobuf/pkg/extensions/google/api/annotations.proto deleted file mode 100644 index 85c361b47..000000000 --- a/tool/protobuf/pkg/extensions/google/api/annotations.proto +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/api/http.proto"; -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "AnnotationsProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // See `HttpRule`. - HttpRule http = 72295728; -} diff --git a/tool/protobuf/pkg/extensions/google/api/http.proto b/tool/protobuf/pkg/extensions/google/api/http.proto deleted file mode 100644 index 2bd3a19bf..000000000 --- a/tool/protobuf/pkg/extensions/google/api/http.proto +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "HttpProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -message Http { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated HttpRule rules = 1; - - // When set to true, URL path parmeters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - bool fully_decode_reserved_expansion = 2; -} - -// `HttpRule` defines the mapping of an RPC method to one or more HTTP -// REST API methods. The mapping specifies how different portions of the RPC -// request message are mapped to URL path, URL query parameters, and -// HTTP request body. The mapping is typically specified as an -// `google.api.http` annotation on the RPC method, -// see "google/api/annotations.proto" for details. -// -// The mapping consists of a field specifying the path template and -// method kind. The path template can refer to fields in the request -// message, as in the example below which describes a REST GET -// operation on a resource collection of messages: -// -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // mapped to the URL -// SubMessage sub = 2; // `sub.subfield` is url-mapped -// } -// message Message { -// string text = 1; // content of the resource -// } -// -// The same http annotation can alternatively be expressed inside the -// `GRPC API Configuration` YAML file. -// -// http: -// rules: -// - selector: .Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// This definition enables an automatic, bidrectional mapping of HTTP -// JSON to RPC. Example: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` -// -// In general, not only fields but also field paths can be referenced -// from a path pattern. Fields mapped to the path pattern cannot be -// repeated and must have a primitive (non-message) type. -// -// Any fields in the request message which are not bound by the path -// pattern automatically become (optional) HTTP query -// parameters. Assume the following definition of the request message: -// -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http).get = "/v1/messages/{message_id}"; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // mapped to the URL -// int64 revision = 2; // becomes a parameter -// SubMessage sub = 3; // `sub.subfield` becomes a parameter -// } -// -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` -// -// Note that fields which are mapped to HTTP parameters must have a -// primitive type or a repeated primitive type. Message types are not -// allowed. In the case of a repeated type, the parameter can be -// repeated in the URL, as in `...?param=A¶m=B`. -// -// For HTTP method kinds which allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// put: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | RPC -// -----|----- -// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// put: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | RPC -// -----|----- -// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice of -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// -// This enables the following two alternative HTTP JSON to RPC -// mappings: -// -// HTTP | RPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` -// -// # Rules for HTTP mapping -// -// The rules for mapping HTTP path, query parameters, and body fields -// to the request message are as follows: -// -// 1. The `body` field specifies either `*` or a field path, or is -// omitted. If omitted, it indicates there is no HTTP request body. -// 2. Leaf fields (recursive expansion of nested messages in the -// request) can be classified into three types: -// (a) Matched in the URL template. -// (b) Covered by body (if body is `*`, everything except (a) fields; -// else everything under the body field) -// (c) All other fields. -// 3. URL query parameters found in the HTTP request are mapped to (c) fields. -// 4. Any body sent with an HTTP request can contain only (b) fields. -// -// The syntax of the path template is as follows: -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single path segment. The syntax `**` matches zero -// or more path segments, which must be the last part of the path except the -// `Verb`. The syntax `LITERAL` matches literal text in the path. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path, all characters -// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the -// Discovery Document as `{var}`. -// -// If a variable contains one or more path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path, all -// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables -// show up in the Discovery Document as `{+var}`. -// -// NOTE: While the single segment variable matches the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 -// Simple String Expansion, the multi segment variable **does not** match -// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. -// -// NOTE: the field paths in variables and in the `body` must not refer to -// repeated fields or map fields. -message HttpRule { - // Selects methods to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - oneof pattern { - // Used for listing and getting information about resources. - string get = 2; - - // Used for updating a resource. - string put = 3; - - // Used for creating a resource. - string post = 4; - - // Used for deleting a resource. - string delete = 5; - - // Used for updating a resource. - string patch = 6; - - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - CustomHttpPattern custom = 8; - } - - // The name of the request field whose value is mapped to the HTTP body, or - // `*` for mapping all fields not captured by the path pattern to the HTTP - // body. NOTE: the referred field must not be a repeated field and must be - // present at the top-level of request message type. - string body = 7; - - // Optional. The name of the response field whose value is mapped to the HTTP - // body of response. Other response fields are ignored. When - // not set, the response message will be used as HTTP body of response. - string response_body = 12; - - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - repeated HttpRule additional_bindings = 11; -} - -// A custom pattern is used for defining custom HTTP verb. -message CustomHttpPattern { - // The name of this custom HTTP verb. - string kind = 1; - - // The path matched by this custom verb. - string path = 2; -} diff --git a/tool/protobuf/pkg/gen/main.go b/tool/protobuf/pkg/gen/main.go deleted file mode 100644 index 071ba2ae2..000000000 --- a/tool/protobuf/pkg/gen/main.go +++ /dev/null @@ -1,92 +0,0 @@ -package gen - -import ( - "io" - "io/ioutil" - "log" - "os" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" -) - -// Generator ... -type Generator interface { - Generate(in *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse -} - -// Main ... -func Main(g Generator) { - req := readGenRequest() - resp := g.Generate(req) - writeResponse(os.Stdout, resp) -} - -// FilesToGenerate ... -func FilesToGenerate(req *plugin.CodeGeneratorRequest) []*descriptor.FileDescriptorProto { - genFiles := make([]*descriptor.FileDescriptorProto, 0) -Outer: - for _, name := range req.FileToGenerate { - for _, f := range req.ProtoFile { - if f.GetName() == name { - genFiles = append(genFiles, f) - continue Outer - } - } - Fail("could not find file named", name) - } - - return genFiles -} - -func readGenRequest() *plugin.CodeGeneratorRequest { - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - Error(err, "reading input") - } - - req := new(plugin.CodeGeneratorRequest) - if err = proto.Unmarshal(data, req); err != nil { - Error(err, "parsing input proto") - } - - if len(req.FileToGenerate) == 0 { - Fail("no files to generate") - } - - return req -} - -func writeResponse(w io.Writer, resp *plugin.CodeGeneratorResponse) { - data, err := proto.Marshal(resp) - if err != nil { - Error(err, "marshaling response") - } - _, err = w.Write(data) - if err != nil { - Error(err, "writing response") - } -} - -// Fail log and exit -func Fail(msgs ...string) { - s := strings.Join(msgs, " ") - log.Print("error:", s) - os.Exit(1) -} - -// Fail log and exit -func Info(msgs ...string) { - s := strings.Join(msgs, " ") - log.Print("info:", s) - os.Exit(1) -} - -// Error log and exit -func Error(err error, msgs ...string) { - s := strings.Join(msgs, " ") + ":" + err.Error() - log.Print("error:", s) - os.Exit(1) -} diff --git a/tool/protobuf/pkg/generator/command_line.go b/tool/protobuf/pkg/generator/command_line.go deleted file mode 100644 index 551a558de..000000000 --- a/tool/protobuf/pkg/generator/command_line.go +++ /dev/null @@ -1,71 +0,0 @@ -package generator - -import ( - "fmt" - "strings" -) - -type ParamsBase struct { - ImportPrefix string // String to prefix to imported package file names. - ImportMap map[string]string // Mapping from .proto file name to import path. - //Tpl bool // generate service implementation template - ExplicitHTTP bool // Only generate for method that add http option -} - -type GeneratorParamsInterface interface { - GetBase() *ParamsBase - SetParam(key string, value string) error -} - -type BasicParam struct{ ParamsBase } - -func (b *BasicParam) GetBase() *ParamsBase { - return &b.ParamsBase -} -func (b *BasicParam) SetParam(key string, value string) error { - return nil -} - -func ParseGeneratorParams(parameter string, result GeneratorParamsInterface) error { - ps := make(map[string]string) - for _, p := range strings.Split(parameter, ",") { - if p == "" { - continue - } - i := strings.Index(p, "=") - if i < 0 { - return fmt.Errorf("invalid parameter %q: expected format of parameter to be k=v", p) - } - k := p[0:i] - v := p[i+1:] - if v == "" { - return fmt.Errorf("invalid parameter %q: expected format of parameter to be k=v", k) - } - ps[k] = v - } - - if result.GetBase().ImportMap == nil { - result.GetBase().ImportMap = map[string]string{} - } - for k, v := range ps { - switch { - case k == "explicit_http": - if v == "true" || v == "1" { - result.GetBase().ExplicitHTTP = true - } - case k == "import_prefix": - result.GetBase().ImportPrefix = v - // Support import map 'M' prefix per https://github.com/golang/protobuf/blob/6fb5325/protoc-gen-go/generator/generator.go#L497. - case len(k) > 0 && k[0] == 'M': - result.GetBase().ImportMap[k[1:]] = v // 1 is the length of 'M'. - case len(k) > 0 && strings.HasPrefix(k, "go_import_mapping@"): - result.GetBase().ImportMap[k[18:]] = v // 18 is the length of 'go_import_mapping@'. - default: - e := result.SetParam(k, v) - if e != nil { - return e - } - } - } - return nil -} diff --git a/tool/protobuf/pkg/generator/generator.go b/tool/protobuf/pkg/generator/generator.go deleted file mode 100644 index 3a228053d..000000000 --- a/tool/protobuf/pkg/generator/generator.go +++ /dev/null @@ -1,318 +0,0 @@ -package generator - -import ( - "bufio" - "bytes" - "fmt" - "go/parser" - "go/printer" - "go/token" - "path" - "strconv" - "strings" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" - "github.com/pkg/errors" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/gen" - "github.com/go-kratos/kratos/tool/protobuf/pkg/naming" - "github.com/go-kratos/kratos/tool/protobuf/pkg/typemap" - "github.com/go-kratos/kratos/tool/protobuf/pkg/utils" -) - -const Version = "v0.1" - -var GoModuleImportPath = "github.com/go-kratos/kratos" -var GoModuleDirName = "github.com/go-kratos/kratos" - -type Base struct { - Reg *typemap.Registry - - // Map to record whether we've built each package - // pkgName => alias name - pkgs map[string]string - pkgNamesInUse map[string]bool - - ImportPrefix string // String to prefix to imported package file names. - importMap map[string]string // Mapping from .proto file name to import path. - - // Package naming: - GenPkgName string // Name of the package that we're generating - PackageName string // Name of the proto file package - fileToGoPackageName map[*descriptor.FileDescriptorProto]string - - // List of files that were inputs to the generator. We need to hold this in - // the struct so we can write a header for the file that lists its inputs. - GenFiles []*descriptor.FileDescriptorProto - - // Output buffer that holds the bytes we want to write out for a single file. - // Gets reset after working on a file. - Output *bytes.Buffer - - // key: pkgName - // value: importPath - Deps map[string]string - - Params *ParamsBase - - httpInfoCache map[string]*HTTPInfo -} - -// RegisterPackageName name is the go package name or proto pkg name -// return go pkg alias -func (t *Base) RegisterPackageName(name string) (alias string) { - alias = name - i := 1 - for t.pkgNamesInUse[alias] { - alias = name + strconv.Itoa(i) - i++ - } - t.pkgNamesInUse[alias] = true - t.pkgs[name] = alias - return alias -} - -func (t *Base) Setup(in *plugin.CodeGeneratorRequest, paramsOpt ...GeneratorParamsInterface) { - t.httpInfoCache = make(map[string]*HTTPInfo) - t.pkgs = make(map[string]string) - t.pkgNamesInUse = make(map[string]bool) - t.importMap = make(map[string]string) - t.Deps = make(map[string]string) - t.fileToGoPackageName = make(map[*descriptor.FileDescriptorProto]string) - t.Output = bytes.NewBuffer(nil) - - var params GeneratorParamsInterface - if len(paramsOpt) > 0 { - params = paramsOpt[0] - } else { - params = &BasicParam{} - } - err := ParseGeneratorParams(in.GetParameter(), params) - if err != nil { - gen.Fail("could not parse parameters", err.Error()) - } - t.Params = params.GetBase() - t.ImportPrefix = params.GetBase().ImportPrefix - t.importMap = params.GetBase().ImportMap - - t.GenFiles = gen.FilesToGenerate(in) - - // Collect information on types. - t.Reg = typemap.New(in.ProtoFile) - t.RegisterPackageName("context") - t.RegisterPackageName("ioutil") - t.RegisterPackageName("proto") - // Time to figure out package names of objects defined in protobuf. First, - // we'll figure out the name for the package we're generating. - genPkgName, err := DeduceGenPkgName(t.GenFiles) - if err != nil { - gen.Fail(err.Error()) - } - t.GenPkgName = genPkgName - // Next, we need to pick names for all the files that are dependencies. - if len(in.ProtoFile) > 0 { - t.PackageName = t.GenFiles[0].GetPackage() - } - - for _, f := range in.ProtoFile { - if fileDescSliceContains(t.GenFiles, f) { - // This is a file we are generating. It gets the shared package name. - t.fileToGoPackageName[f] = t.GenPkgName - } else { - // This is a dependency. Use its package name. - name := f.GetPackage() - if name == "" { - name = utils.BaseName(f.GetName()) - } - name = utils.CleanIdentifier(name) - alias := t.RegisterPackageName(name) - t.fileToGoPackageName[f] = alias - } - } - - for _, f := range t.GenFiles { - deps := t.DeduceDeps(f) - for k, v := range deps { - t.Deps[k] = v - } - } -} - -func (t *Base) DeduceDeps(file *descriptor.FileDescriptorProto) map[string]string { - deps := make(map[string]string) // Map of package name to quoted import path. - ourImportPath := path.Dir(naming.GoFileName(file, "")) - for _, s := range file.Service { - for _, m := range s.Method { - defs := []*typemap.MessageDefinition{ - t.Reg.MethodInputDefinition(m), - t.Reg.MethodOutputDefinition(m), - } - for _, def := range defs { - if def.File.GetPackage() == t.PackageName { - continue - } - // By default, import path is the dirname of the Go filename. - importPath := path.Dir(naming.GoFileName(def.File, "")) - if importPath == ourImportPath { - continue - } - importPath = t.SubstituteImportPath(importPath, def.File.GetName()) - importPath = t.ImportPrefix + importPath - pkg := t.GoPackageNameForProtoFile(def.File) - deps[pkg] = strconv.Quote(importPath) - } - } - } - return deps -} - -// DeduceGenPkgName figures out the go package name to use for generated code. -// Will try to use the explicit go_package setting in a file (if set, must be -// consistent in all files). If no files have go_package set, then use the -// protobuf package name (must be consistent in all files) -func DeduceGenPkgName(genFiles []*descriptor.FileDescriptorProto) (string, error) { - var genPkgName string - for _, f := range genFiles { - name, explicit := naming.GoPackageName(f) - if explicit { - name = utils.CleanIdentifier(name) - if genPkgName != "" && genPkgName != name { - // Make sure they're all set consistently. - return "", errors.Errorf("files have conflicting go_package settings, must be the same: %q and %q", genPkgName, name) - } - genPkgName = name - } - } - if genPkgName != "" { - return genPkgName, nil - } - - // If there is no explicit setting, then check the implicit package name - // (derived from the protobuf package name) of the files and make sure it's - // consistent. - for _, f := range genFiles { - name, _ := naming.GoPackageName(f) - name = utils.CleanIdentifier(name) - if genPkgName != "" && genPkgName != name { - return "", errors.Errorf("files have conflicting package names, must be the same or overridden with go_package: %q and %q", genPkgName, name) - } - genPkgName = name - } - - // All the files have the same name, so we're good. - return genPkgName, nil -} - -func (t *Base) GoPackageNameForProtoFile(file *descriptor.FileDescriptorProto) string { - return t.fileToGoPackageName[file] -} - -func fileDescSliceContains(slice []*descriptor.FileDescriptorProto, f *descriptor.FileDescriptorProto) bool { - for _, sf := range slice { - if f == sf { - return true - } - } - return false -} - -// P forwards to g.gen.P, which prints output. -func (t *Base) P(args ...string) { - for _, v := range args { - t.Output.WriteString(v) - } - t.Output.WriteByte('\n') -} - -func (t *Base) FormattedOutput() string { - // Reformat generated code. - fset := token.NewFileSet() - raw := t.Output.Bytes() - ast, err := parser.ParseFile(fset, "", raw, parser.ParseComments) - if err != nil { - // Print out the bad code with line numbers. - // This should never happen in practice, but it can while changing generated code, - // so consider this a debugging aid. - var src bytes.Buffer - s := bufio.NewScanner(bytes.NewReader(raw)) - for line := 1; s.Scan(); line++ { - fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) - } - gen.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) - } - - out := bytes.NewBuffer(nil) - err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(out, fset, ast) - if err != nil { - gen.Fail("generated Go source code could not be reformatted:", err.Error()) - } - - return out.String() -} - -func (t *Base) PrintComments(comments typemap.DefinitionComments) bool { - text := strings.TrimSuffix(comments.Leading, "\n") - if len(strings.TrimSpace(text)) == 0 { - return false - } - split := strings.Split(text, "\n") - for _, line := range split { - t.P("// ", strings.TrimPrefix(line, " ")) - } - return len(split) > 0 -} - -// IsOwnPackage ... -// protoName is fully qualified name of a type -func (t *Base) IsOwnPackage(protoName string) bool { - def := t.Reg.MessageDefinition(protoName) - if def == nil { - gen.Fail("could not find message for", protoName) - } - return def.File.GetPackage() == t.PackageName -} - -// Given a protobuf name for a Message, return the Go name we will use for that -// type, including its package prefix. -func (t *Base) GoTypeName(protoName string) string { - def := t.Reg.MessageDefinition(protoName) - if def == nil { - gen.Fail("could not find message for", protoName) - } - - var prefix string - if def.File.GetPackage() != t.PackageName { - prefix = t.GoPackageNameForProtoFile(def.File) + "." - } - - var name string - for _, parent := range def.Lineage() { - name += parent.Descriptor.GetName() + "_" - } - name += def.Descriptor.GetName() - return prefix + name -} - -func streamingMethod(method *descriptor.MethodDescriptorProto) bool { - return (method.ServerStreaming != nil && *method.ServerStreaming) || (method.ClientStreaming != nil && *method.ClientStreaming) -} - -func (t *Base) ShouldGenForMethod(file *descriptor.FileDescriptorProto, - service *descriptor.ServiceDescriptorProto, - method *descriptor.MethodDescriptorProto) bool { - if streamingMethod(method) { - return false - } - if !t.Params.ExplicitHTTP { - return true - } - httpInfo := t.GetHttpInfoCached(file, service, method) - return httpInfo.HasExplicitHTTPPath -} -func (t *Base) SubstituteImportPath(importPath string, importFile string) string { - if substitution, ok := t.importMap[importFile]; ok { - importPath = substitution - } - return importPath -} diff --git a/tool/protobuf/pkg/generator/helper.go b/tool/protobuf/pkg/generator/helper.go deleted file mode 100644 index ae7e1fd62..000000000 --- a/tool/protobuf/pkg/generator/helper.go +++ /dev/null @@ -1,136 +0,0 @@ -package generator - -import ( - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/extensions/gogoproto" - "github.com/go-kratos/kratos/tool/protobuf/pkg/tag" - "github.com/go-kratos/kratos/tool/protobuf/pkg/typemap" -) - -// GetJSONFieldName get name from gogoproto.jsontag -// else the original name -func GetJSONFieldName(field *descriptor.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, gogoproto.E_Jsontag) - if err == nil && v.(*string) != nil { - ret := *(v.(*string)) - i := strings.Index(ret, ",") - if i != -1 { - ret = ret[:i] - } - return ret - } - } - return field.GetName() -} - -// GetFormOrJSONName get name from form tag, then json tag -// then original name -func GetFormOrJSONName(field *descriptor.FieldDescriptorProto) string { - if field == nil { - return "" - } - tags := tag.GetMoreTags(field) - if tags != nil { - tag := reflect.StructTag(*tags) - fName := tag.Get("form") - if fName != "" { - i := strings.Index(fName, ",") - if i != -1 { - fName = fName[:i] - } - return fName - } - } - return GetJSONFieldName(field) -} - -// IsScalar Is this field a scalar numeric type? -func IsScalar(field *descriptor.FieldDescriptorProto) bool { - if field.Type == nil { - return false - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE, - descriptor.FieldDescriptorProto_TYPE_FLOAT, - descriptor.FieldDescriptorProto_TYPE_INT64, - descriptor.FieldDescriptorProto_TYPE_UINT64, - descriptor.FieldDescriptorProto_TYPE_INT32, - descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_BOOL, - descriptor.FieldDescriptorProto_TYPE_UINT32, - descriptor.FieldDescriptorProto_TYPE_ENUM, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64, - descriptor.FieldDescriptorProto_TYPE_BYTES, - descriptor.FieldDescriptorProto_TYPE_STRING: - return true - default: - return false - } -} - -// IsMap is protocol buffer map -func IsMap(field *descriptor.FieldDescriptorProto, reg *typemap.Registry) bool { - if field.GetType() != descriptor.FieldDescriptorProto_TYPE_MESSAGE { - return false - } - md := reg.MessageDefinition(field.GetTypeName()) - if md == nil || !md.Descriptor.GetOptions().GetMapEntry() { - return false - } - return true -} - -// IsRepeated Is this field repeated? -func IsRepeated(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED -} - -// GetFieldRequired is field required? -// eg. validate="required" -func GetFieldRequired( - f *descriptor.FieldDescriptorProto, - reg *typemap.Registry, - md *typemap.MessageDefinition, -) bool { - fComment, _ := reg.FieldComments(md, f) - var tags []reflect.StructTag - { - //get required info from gogoproto.moretags - moretags := tag.GetMoreTags(f) - if moretags != nil { - tags = []reflect.StructTag{reflect.StructTag(*moretags)} - } - } - if len(tags) == 0 { - tags = tag.GetTagsInComment(fComment.Leading) - } - validateTag := tag.GetTagValue("validate", tags) - var validateRules []string - if validateTag != "" { - validateRules = strings.Split(validateTag, ",") - } - required := false - for _, rule := range validateRules { - if rule == "required" { - required = true - } - } - return required -} - -func MakeIndentStr(i int) string { - return strings.Repeat(" ", i) -} diff --git a/tool/protobuf/pkg/generator/http.go b/tool/protobuf/pkg/generator/http.go deleted file mode 100644 index 0291752a8..000000000 --- a/tool/protobuf/pkg/generator/http.go +++ /dev/null @@ -1,146 +0,0 @@ -package generator - -import ( - "fmt" - "net/http" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - "google.golang.org/genproto/googleapis/api/annotations" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/tag" - "github.com/go-kratos/kratos/tool/protobuf/pkg/typemap" -) - -// HTTPInfo http info for method -type HTTPInfo struct { - HttpMethod string - Path string - LegacyPath string - NewPath string - IsLegacyPath bool - Title string - Description string - // is http path added in the google.api.http option ? - HasExplicitHTTPPath bool -} - -type googleMethodOptionInfo struct { - Method string - PathPattern string - HTTPRule *annotations.HttpRule -} - -// GetHTTPInfo http info of method -func GetHTTPInfo( - file *descriptor.FileDescriptorProto, - service *descriptor.ServiceDescriptorProto, - method *descriptor.MethodDescriptorProto, - reg *typemap.Registry) *HTTPInfo { - var ( - title string - desc string - httpMethod string - newPath string - explicitHTTPPath bool - ) - comment, _ := reg.MethodComments(file, service, method) - tags := tag.GetTagsInComment(comment.Leading) - cleanComments := tag.GetCommentWithoutTag(comment.Leading) - if len(cleanComments) > 0 { - title = strings.Trim(cleanComments[0], "\n\r ") - if len(cleanComments) > 1 { - descLines := cleanComments[1:] - desc = strings.Trim(strings.Join(descLines, "\n"), "\r\n ") - } else { - desc = "" - } - } else { - title = "" - } - googleOptionInfo, err := ParseBMMethod(method) - if err == nil { - httpMethod = strings.ToUpper(googleOptionInfo.Method) - p := googleOptionInfo.PathPattern - if p != "" { - explicitHTTPPath = true - newPath = p - goto END - } - } - - if httpMethod == "" { - // resolve http method - httpMethod = tag.GetTagValue("method", tags) - if httpMethod == "" { - httpMethod = "GET" - } else { - httpMethod = strings.ToUpper(httpMethod) - } - } - - newPath = "/" + file.GetPackage() + "." + service.GetName() + "/" + method.GetName() -END: - var p = newPath - param := &HTTPInfo{HttpMethod: httpMethod, - Path: p, - NewPath: newPath, - IsLegacyPath: false, - Title: title, - Description: desc, - HasExplicitHTTPPath: explicitHTTPPath, - } - if title == "" { - param.Title = param.Path - } - return param -} - -func (t *Base) GetHttpInfoCached(file *descriptor.FileDescriptorProto, - service *descriptor.ServiceDescriptorProto, - method *descriptor.MethodDescriptorProto) *HTTPInfo { - key := file.GetPackage() + service.GetName() + method.GetName() - httpInfo, ok := t.httpInfoCache[key] - if !ok { - httpInfo = GetHTTPInfo(file, service, method, t.Reg) - t.httpInfoCache[key] = httpInfo - } - return httpInfo -} - -// ParseBMMethod parse BMMethodDescriptor form method descriptor proto -func ParseBMMethod(method *descriptor.MethodDescriptorProto) (*googleMethodOptionInfo, error) { - ext, err := proto.GetExtension(method.GetOptions(), annotations.E_Http) - if err != nil { - return nil, fmt.Errorf("get extension error: %s", err) - } - rule := ext.(*annotations.HttpRule) - var httpMethod string - var pathPattern string - switch pattern := rule.Pattern.(type) { - case *annotations.HttpRule_Get: - pathPattern = pattern.Get - httpMethod = http.MethodGet - case *annotations.HttpRule_Put: - pathPattern = pattern.Put - httpMethod = http.MethodPut - case *annotations.HttpRule_Post: - pathPattern = pattern.Post - httpMethod = http.MethodPost - case *annotations.HttpRule_Patch: - pathPattern = pattern.Patch - httpMethod = http.MethodPatch - case *annotations.HttpRule_Delete: - pathPattern = pattern.Delete - httpMethod = http.MethodDelete - default: - return nil, fmt.Errorf("unsupport http pattern %s", rule.Pattern) - } - bmMethod := &googleMethodOptionInfo{ - Method: httpMethod, - PathPattern: pathPattern, - HTTPRule: rule, - } - return bmMethod, nil -} diff --git a/tool/protobuf/pkg/naming/go_naming.go b/tool/protobuf/pkg/naming/go_naming.go deleted file mode 100644 index 3b428a6cf..000000000 --- a/tool/protobuf/pkg/naming/go_naming.go +++ /dev/null @@ -1,27 +0,0 @@ -package naming - -import ( - "path" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" -) - -// GoFileName returns the output name for the generated Go file. -func GoFileName(f *descriptor.FileDescriptorProto, suffix string) string { - name := *f.Name - if ext := path.Ext(name); ext == ".pb" || ext == ".proto" || ext == ".protodevel" { - name = name[:len(name)-len(ext)] - } - name += suffix - - // Does the file have a "go_package" option? If it does, it may override the - // filename. - if impPath, _, ok := goPackageOption(f); ok && impPath != "" { - // Replace the existing dirname with the declared import path. - _, name = path.Split(name) - name = path.Join(impPath, name) - return name - } - - return name -} diff --git a/tool/protobuf/pkg/naming/naming.go b/tool/protobuf/pkg/naming/naming.go deleted file mode 100644 index bf18c5900..000000000 --- a/tool/protobuf/pkg/naming/naming.go +++ /dev/null @@ -1,113 +0,0 @@ -package naming - -import ( - "os" - "path" - "path/filepath" - "strings" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - "github.com/pkg/errors" - "github.com/siddontang/go/ioutil2" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/utils" -) - -// GetVersionPrefix 根据go包名获取api版本前缀 -// @param pkg 从proto获取到的对应的go报名 -// @return 如果是v*开始的 返回v* -// 否则返回空 -func GetVersionPrefix(pkg string) string { - if pkg == "" { - return "" - } - if pkg[:1] == "v" { - return pkg - } - return "" -} - -// GenFileName returns the output name for the generated Go file. -func GenFileName(f *descriptor.FileDescriptorProto, suffix string) string { - name := *f.Name - if ext := path.Ext(name); ext == ".pb" || ext == ".proto" || ext == ".protodevel" { - name = name[:len(name)-len(ext)] - } - name += suffix - return name -} - -func ServiceName(service *descriptor.ServiceDescriptorProto) string { - return utils.CamelCase(service.GetName()) -} - -// MethodName ... -func MethodName(method *descriptor.MethodDescriptorProto) string { - return utils.CamelCase(method.GetName()) -} - -// GetGoImportPathForPb 得到 proto 文件对应的 go import路径 -// protoFilename is the proto file name -// 可能根本无法得到proto文件的具体路径, 只能假设 proto 的filename 是相对当前目录的 -// 假设 protoAbsolutePath = wd/protoFilename -func GetGoImportPathForPb(protoFilename string, moduleImportPath string, moduleDirName string) (importPath string, err error) { - wd, err := os.Getwd() - if err != nil { - panic("cannot get working directory") - } - absPath := wd + "/" + protoFilename - if !ioutil2.FileExists(absPath) { - err = errors.New("Cannot find proto file path of " + protoFilename) - return "", err - } - index := strings.Index(absPath, moduleDirName) - if index == -1 { - return "", errors.Errorf("proto file %s is not inside project %s", protoFilename, moduleDirName) - } - relativePath := absPath[index:] - importPath = filepath.Dir(relativePath) - return importPath, nil -} - -// GoPackageNameForProtoFile returns the Go package name to use in the generated Go file. -// The result explicitly reports whether the name came from an option go_package -// statement. If explicit is false, the name was derived from the protocol -// buffer's package statement or the input file name. -func GoPackageName(f *descriptor.FileDescriptorProto) (name string, explicit bool) { - // Does the file have a "go_package" option? - if _, pkg, ok := goPackageOption(f); ok { - return pkg, true - } - - // Does the file have a package clause? - if pkg := f.GetPackage(); pkg != "" { - return pkg, false - } - // Use the file base name. - return utils.BaseName(f.GetName()), false -} - -// goPackageOption interprets the file's go_package option. -// If there is no go_package, it returns ("", "", false). -// If there's a simple name, it returns ("", pkg, true). -// If the option implies an import path, it returns (impPath, pkg, true). -func goPackageOption(f *descriptor.FileDescriptorProto) (impPath, pkg string, ok bool) { - pkg = f.GetOptions().GetGoPackage() - if pkg == "" { - return - } - ok = true - // The presence of a slash implies there's an import path. - slash := strings.LastIndex(pkg, "/") - if slash < 0 { - return - } - impPath, pkg = pkg, pkg[slash+1:] - // A semicolon-delimited suffix overrides the package name. - sc := strings.IndexByte(impPath, ';') - if sc < 0 { - return - } - impPath, pkg = impPath[:sc], impPath[sc+1:] - return -} diff --git a/tool/protobuf/pkg/project/project.go b/tool/protobuf/pkg/project/project.go deleted file mode 100644 index 537909ff5..000000000 --- a/tool/protobuf/pkg/project/project.go +++ /dev/null @@ -1,121 +0,0 @@ -package project - -import ( - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - "github.com/siddontang/go/ioutil2" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/utils" -) - -// if proto file is inside a project (that has a /api directory) -// this present a project info -// 必须假设proto文件的路径就是相对work-dir的路径,否则无法找到proto文件以及对应的project -type ProjectInfo struct { - // AbsolutePath of the project - AbsolutePath string - // ImportPath of project - ImportPath string - // dir name of project - Name string - // parent dir of project, maybe empty - Department string - // grandma dir of project , maybe empty - Typ string - HasInternalPkg bool - // 从工作目录(working directory)到project目录的相对路径 比如a/b .. ../a - // 作用是什么? - // 假设目录结构是 - // -project - // - api/api.proto - // - internal/service - // 我想在 internal/service下生成一个文件 service.go - // work-dir 为 project/api - // proto 生成命令为 protoc --xx_out=. api.proto - // 那么在 protoc plugin 中的文件输出路径就得是 ../internal/service/ => {pathRefToProj}internal/service - // - PathRefToProj string -} - -func NewProjInfo(file string, modDirName string, modImportPath string) (projInfo *ProjectInfo, err error) { - projInfo = &ProjectInfo{} - wd, err := os.Getwd() - if err != nil { - panic("cannot get working directory") - } - protoAbs := wd + "/" + file - protoAbs, _ = filepath.Abs(protoAbs) - - if !ioutil2.FileExists(protoAbs) { - return nil, errors.Errorf("Cannot find proto file in current dir %s, file: %s ", wd, file) - } - //appIndex := strings.Index(wd, modDirName) - //if appIndex == -1 { - // err = errors.New("not in " + modDirName) - // return nil, err - //} - - projPath := LookupProjPath(protoAbs) - - if projPath == "" { - err = errors.New("not in project") - return nil, err - } - rel, _ := filepath.Rel(wd, projPath) - projInfo.PathRefToProj = rel - projInfo.AbsolutePath = projPath - if ioutil2.FileExists(projPath + "/internal") { - projInfo.HasInternalPkg = true - } - - i := strings.Index(projInfo.AbsolutePath, modDirName) - if i == -1 { - err = errors.Errorf("project is not inside module, project=%s, module=%s", projPath, modDirName) - return nil, err - } - relativePath := projInfo.AbsolutePath[i+len(modDirName):] - projInfo.ImportPath = modImportPath + relativePath - projInfo.Name = filepath.Base(projPath) - if p := filepath.Dir(projPath); p != "/" { - projInfo.Department = filepath.Base(p) - if p = filepath.Dir(p); p != "/" { - projInfo.Typ = filepath.Base(p) - } - } - return projInfo, nil -} - -// LookupProjPath get project path by proto absolute path -// assume that proto is in the project's api directory -func LookupProjPath(protoAbs string) (result string) { - f := func(protoAbs string, dirs []string) string { - lastIndex := len(protoAbs) - curPath := protoAbs - - for lastIndex > 0 { - found := true - for _, d := range dirs { - if !utils.IsDir(curPath + "/" + d) { - found = false - break - } - } - if found { - return curPath - } - lastIndex = strings.LastIndex(curPath, string(os.PathSeparator)) - curPath = protoAbs[:lastIndex] - } - result = "" - return result - } - - firstStep := f(protoAbs, []string{"cmd", "api"}) - if firstStep != "" { - return firstStep - } - return f(protoAbs, []string{"api"}) -} diff --git a/tool/protobuf/pkg/tag/ext_tags.go b/tool/protobuf/pkg/tag/ext_tags.go deleted file mode 100644 index d2bc6726a..000000000 --- a/tool/protobuf/pkg/tag/ext_tags.go +++ /dev/null @@ -1,21 +0,0 @@ -package tag - -import ( - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/extensions/gogoproto" -) - -func GetMoreTags(field *descriptor.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, gogoproto.E_Moretags) - if err == nil && v.(*string) != nil { - return v.(*string) - } - } - return nil -} diff --git a/tool/protobuf/pkg/tag/tags.go b/tool/protobuf/pkg/tag/tags.go deleted file mode 100644 index e9f0e9c85..000000000 --- a/tool/protobuf/pkg/tag/tags.go +++ /dev/null @@ -1,55 +0,0 @@ -package tag - -import ( - "reflect" - "strings" -) - -// GetCommentWithoutTag strip tags in comment -func GetCommentWithoutTag(comment string) []string { - var lines []string - if comment == "" { - return lines - } - split := strings.Split(strings.TrimRight(comment, "\n\r"), "\n") - for _, line := range split { - tag, _, _ := GetLineTag(line) - if tag == "" { - lines = append(lines, line) - } - } - return lines -} - -func GetTagsInComment(comment string) []reflect.StructTag { - split := strings.Split(comment, "\n") - var tagsInComment []reflect.StructTag - for _, line := range split { - tag, _, _ := GetLineTag(line) - if tag != "" { - tagsInComment = append(tagsInComment, tag) - } - } - return tagsInComment -} - -func GetTagValue(key string, tags []reflect.StructTag) string { - for _, t := range tags { - val := t.Get(key) - if val != "" { - return val - } - } - return "" -} - -// find tag between backtick, start & end is the position of backtick -func GetLineTag(line string) (tag reflect.StructTag, start int, end int) { - start = strings.Index(line, "`") - end = strings.LastIndex(line, "`") - if end <= start { - return - } - tag = reflect.StructTag(line[start+1 : end]) - return -} diff --git a/tool/protobuf/pkg/typemap/typemap.go b/tool/protobuf/pkg/typemap/typemap.go deleted file mode 100644 index 27344750c..000000000 --- a/tool/protobuf/pkg/typemap/typemap.go +++ /dev/null @@ -1,277 +0,0 @@ -package typemap - -// Copyright 2018 Twitch Interactive, Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may not -// use this file except in compliance with the License. A copy of the License is -// located at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// or in the "license" file accompanying this file. This file is distributed on -// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -import ( - "strings" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - "github.com/pkg/errors" -) - -// Registry is the place of descriptors resolving -type Registry struct { - allFiles []*descriptor.FileDescriptorProto - filesByName map[string]*descriptor.FileDescriptorProto - - // Mapping of fully-qualified names to their definitions - messagesByProtoName map[string]*MessageDefinition -} - -// New Registry -func New(files []*descriptor.FileDescriptorProto) *Registry { - r := &Registry{ - allFiles: files, - filesByName: make(map[string]*descriptor.FileDescriptorProto), - messagesByProtoName: make(map[string]*MessageDefinition), - } - - // First, index the file descriptors by name. We need this so - // messageDefsForFile can correctly scan imports. - for _, f := range files { - r.filesByName[f.GetName()] = f - } - - // Next, index all the message definitions by their fully-qualified proto - // names. - for _, f := range files { - defs := messageDefsForFile(f, r.filesByName) - for name, def := range defs { - r.messagesByProtoName[name] = def - } - } - return r -} - -// FileComments comment of file -func (r *Registry) FileComments(file *descriptor.FileDescriptorProto) (DefinitionComments, error) { - return commentsAtPath([]int32{packagePath}, file), nil -} - -// ServiceComments comments of service -func (r *Registry) ServiceComments(file *descriptor.FileDescriptorProto, svc *descriptor.ServiceDescriptorProto) (DefinitionComments, error) { - for i, s := range file.Service { - if s == svc { - path := []int32{servicePath, int32(i)} - return commentsAtPath(path, file), nil - } - } - return DefinitionComments{}, errors.Errorf("service not found in file") -} - -// FieldComments ... -func (r *Registry) FieldComments(message *MessageDefinition, field *descriptor.FieldDescriptorProto) (DefinitionComments, error) { - file := message.File - mpath := message.path - for i, f := range message.Descriptor.Field { - if f == field { - path := append(mpath, messageFieldPath, int32(i)) - return commentsAtPath(path, file), nil - } - } - return DefinitionComments{}, errors.Errorf("field not found in msg") -} - -// MethodComments comment of method -func (r *Registry) MethodComments(file *descriptor.FileDescriptorProto, svc *descriptor.ServiceDescriptorProto, method *descriptor.MethodDescriptorProto) (DefinitionComments, error) { - for i, s := range file.Service { - if s == svc { - path := []int32{servicePath, int32(i)} - for j, m := range s.Method { - if m == method { - path = append(path, serviceMethodPath, int32(j)) - return commentsAtPath(path, file), nil - } - } - } - } - return DefinitionComments{}, errors.Errorf("service not found in file") -} - -// MethodInputDefinition returns MethodInputDefinition -func (r *Registry) MethodInputDefinition(method *descriptor.MethodDescriptorProto) *MessageDefinition { - return r.messagesByProtoName[method.GetInputType()] -} - -// MethodOutputDefinition returns MethodOutputDefinition -func (r *Registry) MethodOutputDefinition(method *descriptor.MethodDescriptorProto) *MessageDefinition { - return r.messagesByProtoName[method.GetOutputType()] -} - -// MessageDefinition by name -func (r *Registry) MessageDefinition(name string) *MessageDefinition { - return r.messagesByProtoName[name] -} - -// MessageDefinition msg info -type MessageDefinition struct { - // Descriptor is is the DescriptorProto defining the message. - Descriptor *descriptor.DescriptorProto - // File is the File that the message was defined in. Or, if it has been - // publicly imported, what File was that import performed in? - File *descriptor.FileDescriptorProto - // Parent is the parent message, if this was defined as a nested message. If - // this was defiend at the top level, parent is nil. - Parent *MessageDefinition - // Comments describes the comments surrounding a message's definition. If it - // was publicly imported, then these comments are from the actual source file, - // not the file that the import was performed in. - Comments DefinitionComments - - // path is the 'SourceCodeInfo' path. See the documentation for - // github.com/golang/protobuf/protoc-gen-go/descriptor.SourceCodeInfo for an - // explanation of its format. - path []int32 -} - -// ProtoName returns the dot-delimited, fully-qualified protobuf name of the -// message. -func (m *MessageDefinition) ProtoName() string { - prefix := "." - if pkg := m.File.GetPackage(); pkg != "" { - prefix += pkg + "." - } - - if lineage := m.Lineage(); len(lineage) > 0 { - for _, parent := range lineage { - prefix += parent.Descriptor.GetName() + "." - } - } - - return prefix + m.Descriptor.GetName() -} - -// Lineage returns m's parental chain all the way back up to a top-level message -// definition. The first element of the returned slice is the highest-level -// parent. -func (m *MessageDefinition) Lineage() []*MessageDefinition { - var parents []*MessageDefinition - for p := m.Parent; p != nil; p = p.Parent { - parents = append([]*MessageDefinition{p}, parents...) - } - return parents -} - -// descendants returns all the submessages defined within m, and all the -// descendants of those, recursively. -func (m *MessageDefinition) descendants() []*MessageDefinition { - descendants := make([]*MessageDefinition, 0) - for i, child := range m.Descriptor.NestedType { - path := append(m.path, []int32{messageMessagePath, int32(i)}...) - childDef := &MessageDefinition{ - Descriptor: child, - File: m.File, - Parent: m, - Comments: commentsAtPath(path, m.File), - path: path, - } - descendants = append(descendants, childDef) - descendants = append(descendants, childDef.descendants()...) - } - return descendants -} - -// messageDefsForFile gathers a mapping of fully-qualified protobuf names to -// their definitions. It scans a singles file at a time. It requires a mapping -// of .proto file names to their definitions in order to correctly handle -// 'import public' declarations; this mapping should include all files -// transitively imported by f. -func messageDefsForFile(f *descriptor.FileDescriptorProto, filesByName map[string]*descriptor.FileDescriptorProto) map[string]*MessageDefinition { - byProtoName := make(map[string]*MessageDefinition) - // First, gather all the messages defined at the top level. - for i, d := range f.MessageType { - path := []int32{messagePath, int32(i)} - def := &MessageDefinition{ - Descriptor: d, - File: f, - Parent: nil, - Comments: commentsAtPath(path, f), - path: path, - } - - byProtoName[def.ProtoName()] = def - // Next, all nested message definitions. - for _, child := range def.descendants() { - byProtoName[child.ProtoName()] = child - } - } - - // Finally, all messages imported publicly. - for _, depIdx := range f.PublicDependency { - depFileName := f.Dependency[depIdx] - depFile := filesByName[depFileName] - depDefs := messageDefsForFile(depFile, filesByName) - for _, def := range depDefs { - imported := &MessageDefinition{ - Descriptor: def.Descriptor, - File: f, - Parent: def.Parent, - Comments: commentsAtPath(def.path, depFile), - path: def.path, - } - byProtoName[imported.ProtoName()] = imported - } - } - - return byProtoName -} - -// // ignored detached comments. -type DefinitionComments struct { - Leading string - Trailing string - LeadingDetached []string -} - -func commentsAtPath(path []int32, sourceFile *descriptor.FileDescriptorProto) DefinitionComments { - if sourceFile.SourceCodeInfo == nil { - // The compiler didn't provide us with comments. - return DefinitionComments{} - } - - for _, loc := range sourceFile.SourceCodeInfo.Location { - if pathEqual(path, loc.Path) { - return DefinitionComments{ - Leading: strings.TrimSuffix(loc.GetLeadingComments(), "\n"), - LeadingDetached: loc.GetLeadingDetachedComments(), - Trailing: loc.GetTrailingComments(), - } - } - } - return DefinitionComments{} -} - -func pathEqual(path1, path2 []int32) bool { - if len(path1) != len(path2) { - return false - } - for i, v := range path1 { - if path2[i] != v { - return false - } - } - return true -} - -const ( - // tag numbers in FileDescriptorProto - packagePath = 2 // package - messagePath = 4 // message_type - servicePath = 6 // service - // tag numbers in DescriptorProto - messageFieldPath = 2 // field - messageMessagePath = 3 // nested_type - // tag numbers in ServiceDescriptorProto - serviceMethodPath = 2 // method -) diff --git a/tool/protobuf/pkg/utils/stringutils.go b/tool/protobuf/pkg/utils/stringutils.go deleted file mode 100644 index 8da21c8fe..000000000 --- a/tool/protobuf/pkg/utils/stringutils.go +++ /dev/null @@ -1,97 +0,0 @@ -package utils - -import ( - "strings" - "unicode" -) - -// Is c an ASCII lower-case letter? -func isASCIILower(c byte) bool { - return 'a' <= c && c <= 'z' -} - -// Is c an ASCII digit? -func isASCIIDigit(c byte) bool { - return '0' <= c && c <= '9' -} - -// CamelCase converts a string from snake_case to CamelCased. -// -// If there is an interior underscore followed by a lower case letter, drop the -// underscore and convert the letter to upper case. There is a remote -// possibility of this rewrite causing a name collision, but it's so remote -// we're prepared to pretend it's nonexistent - since the C++ generator -// lowercases names, it's extremely unlikely to have two fields with different -// capitalizations. In short, _my_field_name_2 becomes XMyFieldName_2. -func CamelCase(s string) string { - if s == "" { - return "" - } - t := make([]byte, 0, 32) - i := 0 - if s[0] == '_' { - // Need a capital letter; drop the '_'. - t = append(t, 'X') - i++ - } - // Invariant: if the next letter is lower case, it must be converted - // to upper case. - // - // That is, we process a word at a time, where words are marked by _ or upper - // case letter. Digits are treated as words. - for ; i < len(s); i++ { - c := s[i] - if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { - continue // Skip the underscore in s. - } - if isASCIIDigit(c) { - t = append(t, c) - continue - } - // Assume we have a letter now - if not, it's a bogus identifier. The next - // word is a sequence of characters that must start upper case. - if isASCIILower(c) { - c ^= ' ' // Make it a capital letter. - } - t = append(t, c) // Guaranteed not lower case. - // Accept lower case sequence that follows. - for i+1 < len(s) && isASCIILower(s[i+1]) { - i++ - t = append(t, s[i]) - } - } - return string(t) -} - -// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to -// be joined with "_" and then camelcased. -func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } - -// BaseName the last path element of a slash-delimited name, with the last -// dotted suffix removed. -func BaseName(name string) string { - // First, find the last element - if i := strings.LastIndex(name, "/"); i >= 0 { - name = name[i+1:] - } - // Now drop the suffix - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[0:i] - } - return name -} - -// AlphaDigitize replaces non-letter, non-digit, non-underscore characters with -// underscore. -func AlphaDigitize(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { - return r - } - return '_' -} - -// CleanIdentifier makes sure s is a valid 'identifier' string: it contains only -// letters, numbers, and underscore. -func CleanIdentifier(s string) string { - return strings.Map(AlphaDigitize, s) -} diff --git a/tool/protobuf/pkg/utils/utils.go b/tool/protobuf/pkg/utils/utils.go deleted file mode 100644 index dcf0c3f14..000000000 --- a/tool/protobuf/pkg/utils/utils.go +++ /dev/null @@ -1,29 +0,0 @@ -package utils - -import ( - "os" - "unicode" -) - -// LcFirst lower the first letter -func LcFirst(str string) string { - for i, v := range str { - return string(unicode.ToLower(v)) + str[i+1:] - } - return "" -} - -func IsDir(name string) bool { - file, err := os.Open(name) - - if err != nil { - return false - } - defer file.Close() - - fi, err := file.Stat() - if err != nil { - return false - } - return fi.IsDir() -} diff --git a/tool/protobuf/protoc-gen-bm/generator/generator.go b/tool/protobuf/protoc-gen-bm/generator/generator.go deleted file mode 100644 index 31becf28e..000000000 --- a/tool/protobuf/protoc-gen-bm/generator/generator.go +++ /dev/null @@ -1,337 +0,0 @@ -package generator - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/generator" - "github.com/go-kratos/kratos/tool/protobuf/pkg/naming" - "github.com/go-kratos/kratos/tool/protobuf/pkg/tag" - "github.com/go-kratos/kratos/tool/protobuf/pkg/typemap" - "github.com/go-kratos/kratos/tool/protobuf/pkg/utils" -) - -type bm struct { - generator.Base - filesHandled int -} - -// BmGenerator BM generator. -func BmGenerator() *bm { - t := &bm{} - return t -} - -// Generate ... -func (t *bm) Generate(in *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse { - t.Setup(in) - - // Showtime! Generate the response. - resp := new(plugin.CodeGeneratorResponse) - for _, f := range t.GenFiles { - respFile := t.generateForFile(f) - if respFile != nil { - resp.File = append(resp.File, respFile) - } - } - return resp -} - -func (t *bm) generateForFile(file *descriptor.FileDescriptorProto) *plugin.CodeGeneratorResponse_File { - resp := new(plugin.CodeGeneratorResponse_File) - - t.generateFileHeader(file, t.GenPkgName) - t.generateImports(file) - t.generatePathConstants(file) - count := 0 - for i, service := range file.Service { - count += t.generateBMInterface(file, service) - t.generateBMRoute(file, service, i) - } - - resp.Name = proto.String(naming.GenFileName(file, ".bm.go")) - resp.Content = proto.String(t.FormattedOutput()) - t.Output.Reset() - - t.filesHandled++ - return resp -} - -func (t *bm) generatePathConstants(file *descriptor.FileDescriptorProto) { - t.P() - for _, service := range file.Service { - name := naming.ServiceName(service) - for _, method := range service.Method { - if !t.ShouldGenForMethod(file, service, method) { - continue - } - apiInfo := t.GetHttpInfoCached(file, service, method) - t.P(`var Path`, name, naming.MethodName(method), ` = "`, apiInfo.Path, `"`) - } - t.P() - } -} - -func (t *bm) generateFileHeader(file *descriptor.FileDescriptorProto, pkgName string) { - t.P("// Code generated by protoc-gen-bm ", generator.Version, ", DO NOT EDIT.") - t.P("// source: ", file.GetName()) - t.P() - if t.filesHandled == 0 { - comment, err := t.Reg.FileComments(file) - if err == nil && comment.Leading != "" { - // doc for the first file - t.P("/*") - t.P("Package ", t.GenPkgName, " is a generated blademaster stub package.") - t.P("This code was generated with kratos/tool/protobuf/protoc-gen-bm ", generator.Version, ".") - t.P() - for _, line := range strings.Split(comment.Leading, "\n") { - line = strings.TrimPrefix(line, " ") - // ensure we don't escape from the block comment - line = strings.Replace(line, "*/", "* /", -1) - t.P(line) - } - t.P() - t.P("It is generated from these files:") - for _, f := range t.GenFiles { - t.P("\t", f.GetName()) - } - t.P("*/") - } - } - t.P(`package `, pkgName) - t.P() -} - -func (t *bm) generateImports(file *descriptor.FileDescriptorProto) { - //if len(file.Service) == 0 { - // return - //} - t.P(`import (`) - //t.P(` `,t.pkgs["context"], ` "context"`) - t.P(` "context"`) - t.P() - t.P(` bm "github.com/go-kratos/kratos/pkg/net/http/blademaster"`) - t.P(` "github.com/go-kratos/kratos/pkg/net/http/blademaster/binding"`) - - t.P(`)`) - // It's legal to import a message and use it as an input or output for a - // method. Make sure to import the package of any such message. First, dedupe - // them. - deps := t.DeduceDeps(file) - for pkg, importPath := range deps { - t.P(`import `, pkg, ` `, importPath) - } - t.P() - t.P(`// to suppressed 'imported but not used warning'`) - t.P(`var _ *bm.Context`) - t.P(`var _ context.Context`) - t.P(`var _ binding.StructValidator`) -} - -// Big header comments to makes it easier to visually parse a generated file. -func (t *bm) sectionComment(sectionTitle string) { - t.P() - t.P(`// `, strings.Repeat("=", len(sectionTitle))) - t.P(`// `, sectionTitle) - t.P(`// `, strings.Repeat("=", len(sectionTitle))) - t.P() -} - -func (t *bm) generateBMRoute( - file *descriptor.FileDescriptorProto, - service *descriptor.ServiceDescriptorProto, - index int) { - // old mode is generate xx.route.go in the http pkg - // new mode is generate route code in the same .bm.go - // route rule /x{department}/{project-name}/{path_prefix}/method_name - // generate each route method - servName := naming.ServiceName(service) - versionPrefix := naming.GetVersionPrefix(t.GenPkgName) - svcName := utils.LcFirst(utils.CamelCase(versionPrefix)) + servName + "Svc" - t.P(`var `, svcName, ` `, servName, `BMServer`) - - type methodInfo struct { - midwares []string - routeFuncName string - apiInfo *generator.HTTPInfo - methodName string - } - var methList []methodInfo - var allMidwareMap = make(map[string]bool) - var isLegacyPkg = false - for _, method := range service.Method { - if !t.ShouldGenForMethod(file, service, method) { - continue - } - var midwares []string - comments, _ := t.Reg.MethodComments(file, service, method) - tags := tag.GetTagsInComment(comments.Leading) - if tag.GetTagValue("dynamic", tags) == "true" { - continue - } - apiInfo := t.GetHttpInfoCached(file, service, method) - isLegacyPkg = apiInfo.IsLegacyPath - //httpMethod, legacyPath, path := getHttpInfo(file, service, method, t.reg) - //if legacyPath != "" { - // isLegacyPkg = true - //} - - midStr := tag.GetTagValue("midware", tags) - if midStr != "" { - midwares = strings.Split(midStr, ",") - for _, m := range midwares { - allMidwareMap[m] = true - } - } - - methName := naming.MethodName(method) - inputType := t.GoTypeName(method.GetInputType()) - - routeName := utils.LcFirst(utils.CamelCase(servName) + - utils.CamelCase(methName)) - - methList = append(methList, methodInfo{ - apiInfo: apiInfo, - midwares: midwares, - routeFuncName: routeName, - methodName: method.GetName(), - }) - - t.P(fmt.Sprintf("func %s (c *bm.Context) {", routeName)) - t.P(` p := new(`, inputType, `)`) - requestBinding := "" - if t.hasHeaderTag(t.Reg.MessageDefinition(method.GetInputType())) { - requestBinding = ", binding.Request" - } - t.P(` if err := c.BindWith(p, binding.Default(c.Request.Method, c.Request.Header.Get("Content-Type"))` + - requestBinding + `); err != nil {`) - t.P(` return`) - t.P(` }`) - t.P(` resp, err := `, svcName, `.`, methName, `(c, p)`) - t.P(` c.JSON(resp, err)`) - t.P(`}`) - t.P(``) - } - - // generate route group - var midList []string - for m := range allMidwareMap { - midList = append(midList, m+" bm.HandlerFunc") - } - - sort.Strings(midList) - - // 注册老的路由的方法 - if isLegacyPkg { - funcName := `Register` + utils.CamelCase(versionPrefix) + servName + `Service` - t.P(`// `, funcName, ` Register the blademaster route with middleware map`) - t.P(`// midMap is the middleware map, the key is defined in proto`) - t.P(`func `, funcName, `(e *bm.Engine, svc `, servName, "BMServer, midMap map[string]bm.HandlerFunc)", ` {`) - var keys []string - for m := range allMidwareMap { - keys = append(keys, m) - } - // to keep generated code consistent - sort.Strings(keys) - for _, m := range keys { - t.P(m, ` := midMap["`, m, `"]`) - } - - t.P(svcName, ` = svc`) - for _, methInfo := range methList { - var midArgStr string - if len(methInfo.midwares) == 0 { - midArgStr = "" - } else { - midArgStr = strings.Join(methInfo.midwares, ", ") + ", " - } - t.P(`e.`, methInfo.apiInfo.HttpMethod, `("`, methInfo.apiInfo.LegacyPath, `", `, midArgStr, methInfo.routeFuncName, `)`) - } - t.P(` }`) - } else { - // 新的注册路由的方法 - var bmFuncName = fmt.Sprintf("Register%sBMServer", servName) - t.P(`// `, bmFuncName, ` Register the blademaster route`) - t.P(`func `, bmFuncName, `(e *bm.Engine, server `, servName, `BMServer) {`) - t.P(svcName, ` = server`) - for _, methInfo := range methList { - t.P(`e.`, methInfo.apiInfo.HttpMethod, `("`, methInfo.apiInfo.NewPath, `",`, methInfo.routeFuncName, ` )`) - } - t.P(` }`) - } -} - -func (t *bm) hasHeaderTag(md *typemap.MessageDefinition) bool { - if md.Descriptor.Field == nil { - return false - } - for _, f := range md.Descriptor.Field { - t := tag.GetMoreTags(f) - if t != nil { - st := reflect.StructTag(*t) - if st.Get("request") != "" { - return true - } - if st.Get("header") != "" { - return true - } - } - } - return false -} - -func (t *bm) generateBMInterface(file *descriptor.FileDescriptorProto, service *descriptor.ServiceDescriptorProto) int { - count := 0 - servName := naming.ServiceName(service) - t.P("// " + servName + "BMServer is the server API for " + servName + " service.") - - comments, err := t.Reg.ServiceComments(file, service) - if err == nil { - t.PrintComments(comments) - } - t.P(`type `, servName, `BMServer interface {`) - for _, method := range service.Method { - if !t.ShouldGenForMethod(file, service, method) { - continue - } - count++ - t.generateInterfaceMethod(file, service, method, comments) - t.P() - } - t.P(`}`) - return count -} - -func (t *bm) generateInterfaceMethod(file *descriptor.FileDescriptorProto, - service *descriptor.ServiceDescriptorProto, - method *descriptor.MethodDescriptorProto, - comments typemap.DefinitionComments) { - comments, err := t.Reg.MethodComments(file, service, method) - - methName := naming.MethodName(method) - outputType := t.GoTypeName(method.GetOutputType()) - inputType := t.GoTypeName(method.GetInputType()) - tags := tag.GetTagsInComment(comments.Leading) - if tag.GetTagValue("dynamic", tags) == "true" { - return - } - - if err == nil { - t.PrintComments(comments) - } - - respDynamic := tag.GetTagValue("dynamic_resp", tags) == "true" - if respDynamic { - t.P(fmt.Sprintf(` %s(ctx context.Context, req *%s) (resp interface{}, err error)`, - methName, inputType)) - } else { - t.P(fmt.Sprintf(` %s(ctx context.Context, req *%s) (resp *%s, err error)`, - methName, inputType, outputType)) - } -} diff --git a/tool/protobuf/protoc-gen-bm/generator/generator_test.go b/tool/protobuf/protoc-gen-bm/generator/generator_test.go deleted file mode 100644 index 7a5d0bc68..000000000 --- a/tool/protobuf/protoc-gen-bm/generator/generator_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package generator - -import ( - "os" - "os/exec" - "testing" - - "github.com/golang/protobuf/proto" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" -) - -func TestGenerateParseCommandLineParamsError(t *testing.T) { - if os.Getenv("BE_CRASHER") == "1" { - g := &bm{} - g.Generate(&plugin.CodeGeneratorRequest{ - Parameter: proto.String("invalid"), - }) - return - } - cmd := exec.Command(os.Args[0], "-test.run=TestGenerateParseCommandLineParamsError") - cmd.Env = append(os.Environ(), "BE_CRASHER=1") - err := cmd.Run() - if e, ok := err.(*exec.ExitError); ok && !e.Success() { - return - } - t.Fatalf("process ran with err %v, want exit status 1", err) -} diff --git a/tool/protobuf/protoc-gen-bm/main.go b/tool/protobuf/protoc-gen-bm/main.go deleted file mode 100644 index 95e8c3927..000000000 --- a/tool/protobuf/protoc-gen-bm/main.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/gen" - "github.com/go-kratos/kratos/tool/protobuf/pkg/generator" - bmgen "github.com/go-kratos/kratos/tool/protobuf/protoc-gen-bm/generator" -) - -func main() { - versionFlag := flag.Bool("version", false, "print version and exit") - flag.Parse() - if *versionFlag { - fmt.Println(generator.Version) - os.Exit(0) - } - - g := bmgen.BmGenerator() - gen.Main(g) -} diff --git a/tool/protobuf/protoc-gen-bswagger/generator.go b/tool/protobuf/protoc-gen-bswagger/generator.go deleted file mode 100644 index 342522bcd..000000000 --- a/tool/protobuf/protoc-gen-bswagger/generator.go +++ /dev/null @@ -1,342 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - "reflect" - "regexp" - "strings" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/gen" - "github.com/go-kratos/kratos/tool/protobuf/pkg/generator" - "github.com/go-kratos/kratos/tool/protobuf/pkg/naming" - "github.com/go-kratos/kratos/tool/protobuf/pkg/tag" - "github.com/go-kratos/kratos/tool/protobuf/pkg/typemap" -) - -type swaggerGen struct { - generator.Base - // defsMap will fill into swagger's definitions - // key is full qualified proto name - defsMap map[string]*typemap.MessageDefinition -} - -// NewSwaggerGenerator a swagger generator -func NewSwaggerGenerator() *swaggerGen { - return &swaggerGen{} -} - -func (t *swaggerGen) Generate(in *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse { - t.Setup(in) - resp := &plugin.CodeGeneratorResponse{} - for _, f := range t.GenFiles { - if len(f.Service) == 0 { - continue - } - respFile := t.generateSwagger(f) - if respFile != nil { - resp.File = append(resp.File, respFile) - } - } - return resp -} - -func (t *swaggerGen) generateSwagger(file *descriptor.FileDescriptorProto) *plugin.CodeGeneratorResponse_File { - var pkg = file.GetPackage() - r := regexp.MustCompile("v(\\d+)$") - strs := r.FindStringSubmatch(pkg) - var vStr string - if len(strs) >= 2 { - vStr = strs[1] - } else { - vStr = "" - } - var swaggerObj = &swaggerObject{ - Paths: swaggerPathsObject{}, - Swagger: "2.0", - Info: swaggerInfoObject{ - Title: file.GetName(), - Version: vStr, - }, - Schemes: []string{"http", "https"}, - Consumes: []string{"application/json", "multipart/form-data"}, - Produces: []string{"application/json"}, - } - t.defsMap = map[string]*typemap.MessageDefinition{} - - out := &plugin.CodeGeneratorResponse_File{} - name := naming.GenFileName(file, ".swagger.json") - for _, svc := range file.Service { - for _, meth := range svc.Method { - if !t.ShouldGenForMethod(file, svc, meth) { - continue - } - apiInfo := t.GetHttpInfoCached(file, svc, meth) - pathItem := swaggerPathItemObject{} - if originPathItem, ok := swaggerObj.Paths[apiInfo.Path]; ok { - pathItem = originPathItem - } - op := t.getOperationByHTTPMethod(apiInfo.HttpMethod, &pathItem) - op.Summary = apiInfo.Title - op.Description = apiInfo.Description - swaggerObj.Paths[apiInfo.Path] = pathItem - op.Tags = []string{pkg + "." + svc.GetName()} - - // request - request := t.Reg.MessageDefinition(meth.GetInputType()) - // request cannot represent by simple form - isComplexRequest := false - for _, field := range request.Descriptor.Field { - if !generator.IsScalar(field) { - isComplexRequest = true - break - } - } - if !isComplexRequest && apiInfo.HttpMethod == "GET" { - for _, field := range request.Descriptor.Field { - if !generator.IsScalar(field) { - continue - } - p := t.getQueryParameter(file, request, field) - op.Parameters = append(op.Parameters, p) - } - } else { - p := swaggerParameterObject{} - p.In = "body" - p.Required = true - p.Name = "body" - p.Schema = &swaggerSchemaObject{} - p.Schema.Ref = "#/definitions/" + meth.GetInputType() - op.Parameters = []swaggerParameterObject{p} - } - - // response - resp := swaggerResponseObject{} - resp.Description = "A successful response." - - // proto 里面的response只定义data里面的 - // 所以需要把code msg data 这一级加上 - resp.Schema.Type = "object" - resp.Schema.Properties = &swaggerSchemaObjectProperties{} - p := keyVal{Key: "code", Value: &schemaCore{Type: "integer"}} - *resp.Schema.Properties = append(*resp.Schema.Properties, p) - p = keyVal{Key: "message", Value: &schemaCore{Type: "string"}} - *resp.Schema.Properties = append(*resp.Schema.Properties, p) - p = keyVal{Key: "data", Value: schemaCore{Ref: "#/definitions/" + meth.GetOutputType()}} - *resp.Schema.Properties = append(*resp.Schema.Properties, p) - op.Responses = swaggerResponsesObject{"200": resp} - } - } - - // walk though definitions - t.walkThroughFileDefinition(file) - defs := swaggerDefinitionsObject{} - swaggerObj.Definitions = defs - for typ, msg := range t.defsMap { - def := swaggerSchemaObject{} - def.Properties = new(swaggerSchemaObjectProperties) - def.Description = strings.Trim(msg.Comments.Leading, "\n\r ") - for _, field := range msg.Descriptor.Field { - p := keyVal{Key: generator.GetFormOrJSONName(field)} - schema := t.schemaForField(file, msg, field) - if generator.GetFieldRequired(field, t.Reg, msg) { - def.Required = append(def.Required, p.Key) - } - p.Value = schema - *def.Properties = append(*def.Properties, p) - } - def.Type = "object" - defs[typ] = def - } - b, _ := json.MarshalIndent(swaggerObj, "", " ") - str := string(b) - out.Name = &name - out.Content = &str - return out -} - -func (t *swaggerGen) getOperationByHTTPMethod(httpMethod string, pathItem *swaggerPathItemObject) *swaggerOperationObject { - var op = &swaggerOperationObject{} - switch httpMethod { - case http.MethodGet: - pathItem.Get = op - case http.MethodPost: - pathItem.Post = op - case http.MethodPut: - pathItem.Put = op - case http.MethodDelete: - pathItem.Delete = op - case http.MethodPatch: - pathItem.Patch = op - default: - pathItem.Get = op - } - return op -} - -func (t *swaggerGen) getQueryParameter(file *descriptor.FileDescriptorProto, - input *typemap.MessageDefinition, - field *descriptor.FieldDescriptorProto) swaggerParameterObject { - p := swaggerParameterObject{} - p.Name = generator.GetFormOrJSONName(field) - fComment, _ := t.Reg.FieldComments(input, field) - cleanComment := tag.GetCommentWithoutTag(fComment.Leading) - - p.Description = strings.Trim(strings.Join(cleanComment, "\n"), "\n\r ") - validateComment := getValidateComment(field) - if p.Description != "" && validateComment != "" { - p.Description = p.Description + "," + validateComment - } else if validateComment != "" { - p.Description = validateComment - } - p.In = "query" - p.Required = generator.GetFieldRequired(field, t.Reg, input) - typ, isArray, format := getFieldSwaggerType(field) - if isArray { - p.Items = &swaggerItemsObject{} - p.Type = "array" - p.Items.Type = typ - p.Items.Format = format - } else { - p.Type = typ - p.Format = format - } - return p -} - -func (t *swaggerGen) schemaForField(file *descriptor.FileDescriptorProto, - msg *typemap.MessageDefinition, - field *descriptor.FieldDescriptorProto) swaggerSchemaObject { - schema := swaggerSchemaObject{} - fComment, err := t.Reg.FieldComments(msg, field) - if err != nil { - gen.Error(err, "comment not found err %+v") - } - schema.Description = strings.Trim(fComment.Leading, "\n\r ") - validateComment := getValidateComment(field) - if schema.Description != "" && validateComment != "" { - schema.Description = schema.Description + "," + validateComment - } else if validateComment != "" { - schema.Description = validateComment - } - typ, isArray, format := getFieldSwaggerType(field) - if !generator.IsScalar(field) { - if generator.IsMap(field, t.Reg) { - schema.Type = "object" - mapMsg := t.Reg.MessageDefinition(field.GetTypeName()) - mapValueField := mapMsg.Descriptor.Field[1] - valSchema := t.schemaForField(file, mapMsg, mapValueField) - schema.AdditionalProperties = &valSchema - } else { - if isArray { - schema.Items = &swaggerItemsObject{} - schema.Type = "array" - schema.Items.Ref = "#/definitions/" + field.GetTypeName() - } else { - schema.Ref = "#/definitions/" + field.GetTypeName() - } - } - } else { - if isArray { - schema.Items = &swaggerItemsObject{} - schema.Type = "array" - schema.Items.Type = typ - schema.Items.Format = format - } else { - schema.Type = typ - schema.Format = format - } - } - return schema -} - -func (t *swaggerGen) walkThroughFileDefinition(file *descriptor.FileDescriptorProto) { - for _, svc := range file.Service { - for _, meth := range svc.Method { - shouldGen := t.ShouldGenForMethod(file, svc, meth) - if !shouldGen { - continue - } - t.walkThroughMessages(t.Reg.MessageDefinition(meth.GetOutputType())) - t.walkThroughMessages(t.Reg.MessageDefinition(meth.GetInputType())) - } - } -} - -func (t *swaggerGen) walkThroughMessages(msg *typemap.MessageDefinition) { - _, ok := t.defsMap[msg.ProtoName()] - if ok { - return - } - if !msg.Descriptor.GetOptions().GetMapEntry() { - t.defsMap[msg.ProtoName()] = msg - } - for _, field := range msg.Descriptor.Field { - if field.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE { - t.walkThroughMessages(t.Reg.MessageDefinition(field.GetTypeName())) - } - } -} - -func getFieldSwaggerType(field *descriptor.FieldDescriptorProto) (typeName string, isArray bool, formatName string) { - typeName = "unknown" - switch field.GetType() { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - typeName = "boolean" - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - typeName = "number" - formatName = "double" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - typeName = "number" - formatName = "float" - case - descriptor.FieldDescriptorProto_TYPE_INT64, - descriptor.FieldDescriptorProto_TYPE_UINT64, - descriptor.FieldDescriptorProto_TYPE_INT32, - descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_ENUM, - descriptor.FieldDescriptorProto_TYPE_UINT32, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64: - typeName = "integer" - case - descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: - typeName = "string" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - typeName = "object" - } - if field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED { - isArray = true - } - return -} - -func getValidateComment(field *descriptor.FieldDescriptorProto) string { - var ( - tags []reflect.StructTag - ) - //get required info from gogoproto.moretags - moretags := tag.GetMoreTags(field) - if moretags != nil { - tags = []reflect.StructTag{reflect.StructTag(*moretags)} - } - validateTag := tag.GetTagValue("validate", tags) - - // trim - regStr := []string{ - "required *,*", - "omitempty *,*", - } - for _, v := range regStr { - re, _ := regexp.Compile(v) - validateTag = re.ReplaceAllString(validateTag, "") - } - return validateTag -} diff --git a/tool/protobuf/protoc-gen-bswagger/main.go b/tool/protobuf/protoc-gen-bswagger/main.go deleted file mode 100644 index 45b68dec5..000000000 --- a/tool/protobuf/protoc-gen-bswagger/main.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/gen" - "github.com/go-kratos/kratos/tool/protobuf/pkg/generator" -) - -func main() { - versionFlag := flag.Bool("version", false, "print version and exit") - flag.Parse() - if *versionFlag { - fmt.Println(generator.Version) - os.Exit(0) - } - - g := NewSwaggerGenerator() - gen.Main(g) -} diff --git a/tool/protobuf/protoc-gen-bswagger/types.go b/tool/protobuf/protoc-gen-bswagger/types.go deleted file mode 100644 index e6a7a866c..000000000 --- a/tool/protobuf/protoc-gen-bswagger/types.go +++ /dev/null @@ -1,218 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" -) - -// http://swagger.io/specification/#infoObject -type swaggerInfoObject struct { - Title string `json:"title"` - Description string `json:"description,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Version string `json:"version"` - - Contact *swaggerContactObject `json:"contact,omitempty"` - License *swaggerLicenseObject `json:"license,omitempty"` -} - -// http://swagger.io/specification/#contactObject -type swaggerContactObject struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} - -// http://swagger.io/specification/#licenseObject -type swaggerLicenseObject struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} - -// http://swagger.io/specification/#externalDocumentationObject -type swaggerExternalDocumentationObject struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} - -// http://swagger.io/specification/#swaggerObject -type swaggerObject struct { - Swagger string `json:"swagger"` - Info swaggerInfoObject `json:"info"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` - Schemes []string `json:"schemes"` - Consumes []string `json:"consumes"` - Produces []string `json:"produces"` - Paths swaggerPathsObject `json:"paths"` - Definitions swaggerDefinitionsObject `json:"definitions"` - StreamDefinitions swaggerDefinitionsObject `json:"x-stream-definitions,omitempty"` - SecurityDefinitions swaggerSecurityDefinitionsObject `json:"securityDefinitions,omitempty"` - Security []swaggerSecurityRequirementObject `json:"security,omitempty"` - ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"` -} - -// http://swagger.io/specification/#securityDefinitionsObject -type swaggerSecurityDefinitionsObject map[string]swaggerSecuritySchemeObject - -// http://swagger.io/specification/#securitySchemeObject -type swaggerSecuritySchemeObject struct { - Type string `json:"type"` - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Flow string `json:"flow,omitempty"` - AuthorizationURL string `json:"authorizationUrl,omitempty"` - TokenURL string `json:"tokenUrl,omitempty"` - Scopes swaggerScopesObject `json:"scopes,omitempty"` -} - -// http://swagger.io/specification/#scopesObject -type swaggerScopesObject map[string]string - -// http://swagger.io/specification/#securityRequirementObject -type swaggerSecurityRequirementObject map[string][]string - -// http://swagger.io/specification/#pathsObject -type swaggerPathsObject map[string]swaggerPathItemObject - -// http://swagger.io/specification/#pathItemObject -type swaggerPathItemObject struct { - Get *swaggerOperationObject `json:"get,omitempty"` - Delete *swaggerOperationObject `json:"delete,omitempty"` - Post *swaggerOperationObject `json:"post,omitempty"` - Put *swaggerOperationObject `json:"put,omitempty"` - Patch *swaggerOperationObject `json:"patch,omitempty"` -} - -// http://swagger.io/specification/#operationObject -type swaggerOperationObject struct { - Summary string `json:"summary,omitempty"` - Description string `json:"description,omitempty"` - OperationID string `json:"operationId,omitempty"` - Responses swaggerResponsesObject `json:"responses"` - Parameters swaggerParametersObject `json:"parameters,omitempty"` - Tags []string `json:"tags,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - - Security *[]swaggerSecurityRequirementObject `json:"security,omitempty"` - ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"` -} - -type swaggerParametersObject []swaggerParameterObject - -// http://swagger.io/specification/#parameterObject -type swaggerParameterObject struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required"` - Type string `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Items *swaggerItemsObject `json:"items,omitempty"` - Enum []string `json:"enum,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default string `json:"default,omitempty"` - MinItems *int `json:"minItems,omitempty"` - - // Or you can explicitly refer to another type. If this is defined all - // other fields should be empty - Schema *swaggerSchemaObject `json:"schema,omitempty"` -} - -// core part of schema, which is common to itemsObject and schemaObject. -// http://swagger.io/specification/#itemsObject -type schemaCore struct { - Type string `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Ref string `json:"$ref,omitempty"` - Example json.RawMessage `json:"example,omitempty"` - - Items *swaggerItemsObject `json:"items,omitempty"` - - // If the item is an enumeration include a list of all the *NAMES* of the - // enum values. I'm not sure how well this will work but assuming all enums - // start from 0 index it will be great. I don't think that is a good assumption. - Enum []string `json:"enum,omitempty"` - Default string `json:"default,omitempty"` -} - -type swaggerItemsObject schemaCore - -func (o *swaggerItemsObject) getType() string { - if o == nil { - return "" - } - return o.Type -} - -// http://swagger.io/specification/#responsesObject -type swaggerResponsesObject map[string]swaggerResponseObject - -// http://swagger.io/specification/#responseObject -type swaggerResponseObject struct { - Description string `json:"description"` - Schema swaggerSchemaObject `json:"schema"` -} - -type keyVal struct { - Key string - Value interface{} -} - -type swaggerSchemaObjectProperties []keyVal - -func (op swaggerSchemaObjectProperties) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.WriteString("{") - for i, kv := range op { - if i != 0 { - buf.WriteString(",") - } - key, err := json.Marshal(kv.Key) - if err != nil { - return nil, err - } - buf.Write(key) - buf.WriteString(":") - val, err := json.Marshal(kv.Value) - if err != nil { - return nil, err - } - buf.Write(val) - } - - buf.WriteString("}") - return buf.Bytes(), nil -} - -// http://swagger.io/specification/#schemaObject -type swaggerSchemaObject struct { - schemaCore - // Properties can be recursively defined - Properties *swaggerSchemaObjectProperties `json:"properties,omitempty"` - AdditionalProperties *swaggerSchemaObject `json:"additionalProperties,omitempty"` - - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - - ExternalDocs *swaggerExternalDocumentationObject `json:"externalDocs,omitempty"` - - MultipleOf float64 `json:"multipleOf,omitempty"` - Maximum float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength uint64 `json:"maxLength,omitempty"` - MinLength uint64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems uint64 `json:"maxItems,omitempty"` - MinItems uint64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MaxProperties uint64 `json:"maxProperties,omitempty"` - MinProperties uint64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` -} - -// http://swagger.io/specification/#definitionsObject -type swaggerDefinitionsObject map[string]swaggerSchemaObject diff --git a/tool/protobuf/protoc-gen-ecode/generator/generator.go b/tool/protobuf/protoc-gen-ecode/generator/generator.go deleted file mode 100644 index e65487bff..000000000 --- a/tool/protobuf/protoc-gen-ecode/generator/generator.go +++ /dev/null @@ -1,118 +0,0 @@ -package generator - -import ( - "strconv" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/generator" - "github.com/go-kratos/kratos/tool/protobuf/pkg/naming" -) - -type ecode struct { - generator.Base - filesHandled int -} - -// EcodeGenerator ecode generator. -func EcodeGenerator() *ecode { - t := &ecode{} - return t -} - -// Generate ... -func (t *ecode) Generate(in *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse { - t.Setup(in) - - // Showtime! Generate the response. - resp := new(plugin.CodeGeneratorResponse) - for _, f := range t.GenFiles { - respFile := t.generateForFile(f) - if respFile != nil { - resp.File = append(resp.File, respFile) - } - } - return resp -} - -func (t *ecode) generateForFile(file *descriptor.FileDescriptorProto) *plugin.CodeGeneratorResponse_File { - var enums []*descriptor.EnumDescriptorProto - for _, enum := range file.EnumType { - if strings.HasSuffix(*enum.Name, "ErrCode") { - enums = append(enums, enum) - } - } - if len(enums) == 0 { - return nil - } - resp := new(plugin.CodeGeneratorResponse_File) - t.generateFileHeader(file, t.GenPkgName) - t.generateImports(file) - for _, enum := range enums { - t.generateEcode(file, enum) - } - - resp.Name = proto.String(naming.GenFileName(file, ".ecode.go")) - resp.Content = proto.String(t.FormattedOutput()) - t.Output.Reset() - - t.filesHandled++ - return resp -} - -func (t *ecode) generateFileHeader(file *descriptor.FileDescriptorProto, pkgName string) { - t.P("// Code generated by protoc-gen-ecode ", generator.Version, ", DO NOT EDIT.") - t.P("// source: ", file.GetName()) - t.P() - if t.filesHandled == 0 { - comment, err := t.Reg.FileComments(file) - if err == nil && comment.Leading != "" { - // doc for the first file - t.P("/*") - t.P("Package ", t.GenPkgName, " is a generated ecode package.") - t.P("This code was generated with kratos/tool/protobuf/protoc-gen-ecode ", generator.Version, ".") - t.P() - for _, line := range strings.Split(comment.Leading, "\n") { - line = strings.TrimPrefix(line, " ") - // ensure we don't escape from the block comment - line = strings.Replace(line, "*/", "* /", -1) - t.P(line) - } - t.P() - t.P("It is generated from these files:") - for _, f := range t.GenFiles { - t.P("\t", f.GetName()) - } - t.P("*/") - } - } - t.P(`package `, pkgName) - t.P() -} - -func (t *ecode) generateImports(file *descriptor.FileDescriptorProto) { - t.P(`import (`) - t.P(` "github.com/go-kratos/kratos/pkg/ecode"`) - t.P(`)`) - t.P() - t.P(`// to suppressed 'imported but not used warning'`) - t.P(`var _ ecode.Codes`) -} - -func (t *ecode) generateEcode(file *descriptor.FileDescriptorProto, enum *descriptor.EnumDescriptorProto) { - t.P("// ", *enum.Name, " ecode") - t.P("var (") - - for _, item := range enum.Value { - if *item.Number == 0 { - continue - } - // NOTE: eg: t.P("UserNotExist = New(-404) ") - t.P(*item.Name, " = ", "ecode.New(", strconv.Itoa(int(*item.Number)), ")") - } - - t.P(")") -} diff --git a/tool/protobuf/protoc-gen-ecode/generator/generator_test.go b/tool/protobuf/protoc-gen-ecode/generator/generator_test.go deleted file mode 100644 index 35e1a9240..000000000 --- a/tool/protobuf/protoc-gen-ecode/generator/generator_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package generator - -import ( - "os" - "os/exec" - "testing" - - "github.com/golang/protobuf/proto" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" -) - -func TestGenerateParseCommandLineParamsError(t *testing.T) { - if os.Getenv("BE_CRASHER") == "1" { - g := &ecode{} - g.Generate(&plugin.CodeGeneratorRequest{ - Parameter: proto.String("invalid"), - }) - return - } - cmd := exec.Command(os.Args[0], "-test.run=TestGenerateParseCommandLineParamsError") - cmd.Env = append(os.Environ(), "BE_CRASHER=1") - err := cmd.Run() - if e, ok := err.(*exec.ExitError); ok && !e.Success() { - return - } - t.Fatalf("process ran with err %v, want exit status 1", err) -} diff --git a/tool/protobuf/protoc-gen-ecode/main.go b/tool/protobuf/protoc-gen-ecode/main.go deleted file mode 100644 index 7c408bed9..000000000 --- a/tool/protobuf/protoc-gen-ecode/main.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - - "github.com/go-kratos/kratos/tool/protobuf/pkg/gen" - "github.com/go-kratos/kratos/tool/protobuf/pkg/generator" - ecodegen "github.com/go-kratos/kratos/tool/protobuf/protoc-gen-ecode/generator" -) - -func main() { - versionFlag := flag.Bool("version", false, "print version and exit") - flag.Parse() - if *versionFlag { - fmt.Println(generator.Version) - os.Exit(0) - } - - g := ecodegen.EcodeGenerator() - gen.Main(g) -} diff --git a/tool/testcli/README.md b/tool/testcli/README.md deleted file mode 100644 index 3b56fcad6..000000000 --- a/tool/testcli/README.md +++ /dev/null @@ -1,154 +0,0 @@ -## testcli UT运行环境构建工具 -基于 docker-compose 实现跨平台跨语言环境的容器依赖管理方案,以解决运行ut场景下的 (mysql, redis, mc)容器依赖问题。 - -*这个是testing/lich的二进制工具版本(Go请直接使用库版本:github.com/go-kratos/kratos/pkg/testing/lich)* - -### 功能和特性 -- 自动读取 test 目录下的 yaml 并启动依赖 -- 自动导入 test 目录下的 DB 初始化 SQL -- 提供特定容器内的 healthcheck (mysql, mc, redis) -- 提供一站式解决 UT 服务依赖的工具版本 (testcli) - -### 编译安装 -*使用本工具/库需要前置安装好 docker & docker-compose@v1.24.1^* - -#### Method 1. With go get -```shell -go get -u github.com/go-kratos/kratos/tool/testcli -$GOPATH/bin/testcli -h -``` -#### Method 2. Build with Go -```shell -cd github.com/go-kratos/kratos/tool/testcli -go build -o $GOPATH/bin/testcli -$GOPATH/bin/testcli -h -``` -#### Method 3. Import with Kratos pkg -```Go -import "github.com/go-kratos/kratos/pkg/testing/lich" -``` - -### 构建数据 -#### Step 1. create docker-compose.yml -创建依赖服务的 docker-compose.yml,并把它放在项目路径下的 test 文件夹下面。例如: -```shell -mkdir -p $YOUR_PROJECT/test -``` -```yaml -version: "3.7" - -services: - db: - image: mysql:5.6 - ports: - - 3306:3306 - environment: - - MYSQL_ROOT_PASSWORD=root - volumes: - - .:/docker-entrypoint-initdb.d - command: [ - '--character-set-server=utf8', - '--collation-server=utf8_unicode_ci' - ] - - redis: - image: redis - ports: - - 6379:6379 -``` -一般来讲,我们推荐在项目根目录创建 test 目录,里面存放描述服务的yml,以及需要初始化的数据(database.sql等)。 - -同时也需要注意,正确的对容器内服务进行健康检测,testcli会在容器的health状态执行UT,其实我们也内置了针对几个较为通用镜像(mysql mariadb mc redis)的健康检测,也就是不写也没事(^^;; - -#### Step 2. export database.sql -构造初始化的数据(database.sql等),当然也把它也在 test 文件夹里。 -```sql -CREATE DATABASE IF NOT EXISTS `YOUR_DATABASE_NAME`; - -SET NAMES 'utf8'; -USE `YOUR_DATABASE_NAME`; - -CREATE TABLE IF NOT EXISTS `YOUR_TABLE_NAME` ( - `id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '主键', - PRIMARY KEY (`id`), -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='YOUR_TABLE_NAME'; -``` -这里需要注意,在创建库/表的时候尽量加上 IF NOT EXISTS,以给予一定程度的容错,以及 SET NAMES 'utf8'; 用于解决客户端连接乱码问题。 - -#### Step 3. change your project mysql config -```toml -[mysql] - addr = "127.0.0.1:3306" - dsn = "root:root@tcp(127.0.0.1:3306)/YOUR_DATABASE?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8" - active = 20 - idle = 10 - idleTimeout ="1s" - queryTimeout = "1s" - execTimeout = "1s" - tranTimeout = "1s" -``` -在 *Step 1* 我们已经指定了服务对外暴露的端口为3306(这当然也可以是你指定的任何值),那理所应当的我们也要修改项目连接数据库的配置~ - -Great! 至此你已经完成了运行所需要用到的数据配置,接下来就来运行它。 - -### 运行 -开头也说过本工具支持两种运行方式:testcli 二进制工具版本和 go package 源码包,业务方可以根据需求场景进行选择。 -#### Method 1. With testcli tool -*已支持的 flag: -f,--nodown,down,run* -- -f,指定 docker-compose.yaml 文件路径,默认为当前目录下。 -- --nodown,指定是否在UT执行完成后保留容器,以供下次复用。 -- down,teardown 销毁当前项目下这个 compose 文件产生的容器。 -- run,运行你当前语言的单测执行命令(如:golang为 go test -v ./) - -example: -```shell -testcli -f ../../test/docker-compose.yaml run go test -v ./ -``` -#### Method 2. Import with Kratos pkg -- Step1. 在 Dao|Service 层中的 TestMain 单测主入口中,import "github.com/go-kratos/kratos/pkg/testing/lich" 引入testcli工具的go库版本。 -- Step2. 使用 flag.Set("f", "../../test/docker-compose.yaml") 指定 docker-compose.yaml 文件的路径。 -- Step3. 在 flag.Parse() 后即可使用 lich.Setup() 安装依赖&初始化数据(注意测试用例执行结束后 lich.Teardown() 回收下~) -- Step4. 运行 `go test -v ./ `看看效果吧~ - -example: -```Go -package dao - - -import ( - "flag" - "os" - "strings" - "testing" - - "github.com/go-kratos/kratos/pkg/conf/paladin" - "github.com/go-kratos/kratos/pkg/testing/lich" - ) - -var ( - d *Dao -) - -func TestMain(m *testing.M) { - flag.Set("conf", "../../configs") - flag.Set("f", "../../test/docker-compose.yaml") - flag.Parse() - if err := paladin.Init(); err != nil { - panic(err) - } - if err := lich.Setup(); err != nil { - panic(err) - } - defer lich.Teardown() - d = New() - if code := m.Run(); code != 0 { - panic(code) - } -} - ``` -## 注意 -因为启动mysql容器较为缓慢,健康检测的机制会重试3次,每次暂留5秒钟,基本在10s内mysql就能从creating到服务正常启动! - -当然你也可以在使用 testcli 时加上 --nodown,使其不用每次跑都新建容器,只在第一次跑的时候会初始化容器,后面都进行复用,这样速度会快很多。 - -成功启动后就欢乐奔放的玩耍吧~ Good Luck! diff --git a/tool/testcli/docker-compose.yaml b/tool/testcli/docker-compose.yaml deleted file mode 100644 index 57f78dac8..000000000 --- a/tool/testcli/docker-compose.yaml +++ /dev/null @@ -1,26 +0,0 @@ -version: "3.7" - -services: - db: - image: mysql:5.6 - ports: - - 3306:3306 - environment: - - MYSQL_ROOT_PASSWORD=root - - TZ=Asia/Shanghai - volumes: - - .:/docker-entrypoint-initdb.d - command: [ - '--character-set-server=utf8', - '--collation-server=utf8_unicode_ci' - ] - - redis: - image: redis - ports: - - 6379:6379 - - memcached: - image: memcached - ports: - - 11211:11211 \ No newline at end of file diff --git a/tool/testcli/main.go b/tool/testcli/main.go deleted file mode 100644 index 7efde6ca5..000000000 --- a/tool/testcli/main.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "flag" - "os" - "os/exec" - "strings" - - "github.com/go-kratos/kratos/pkg/testing/lich" -) - -func parseArgs() (flags map[string]string) { - flags = make(map[string]string) - for idx, arg := range os.Args { - if idx == 0 { - continue - } - if arg == "down" { - flags["down"] = "" - return - } - if cmds := os.Args[idx+1:]; arg == "run" { - flags["run"] = strings.Join(cmds, " ") - return - } - } - return -} - -func main() { - flag.Parse() - flags := parseArgs() - if _, ok := flags["down"]; ok { - lich.Teardown() - return - } - if cmd, ok := flags["run"]; !ok || cmd == "" { - panic("Your need 'run' flag assign to be run commands.") - } - if err := lich.Setup(); err != nil { - panic(err) - } - defer lich.Teardown() - cmds := strings.Split(flags["run"], " ") - cmd := exec.Command(cmds[0], cmds[1:]...) - cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr - if err := cmd.Run(); err != nil { - panic(err) - } -} diff --git a/tool/testgen/README.md b/tool/testgen/README.md deleted file mode 100644 index 890992ae8..000000000 --- a/tool/testgen/README.md +++ /dev/null @@ -1,52 +0,0 @@ -## testgen UT代码自动生成器 -解放你的双手,让你的UT一步到位! - -### 功能和特性 -- 支持生成 Dao|Service 层UT代码功能(每个方法包含一个正向用例) -- 支持生成 Dao|Service 层测试入口文件dao_test.go, service_test.go(用于控制初始化,控制测试流程等) -- 支持生成Mock代码(使用GoMock框架) -- 支持选择不同模式生成不同代码(使用"–m mode"指定) -- 生成单元测试代码时,同时支持传入目录或文件 -- 支持指定方法追加生成测试用例(使用"–func funcName"指定) - -### 编译安装 -#### Method 1. With go get -```shell -go get -u github.com/go-kratos/kratos/tool/testgen -$GOPATH/bin/testgen -h -``` -#### Method 2. Build with Go -```shell -cd github.com/go-kratos/kratos/tool/testgen -go build -o $GOPATH/bin/testgen -$GOPATH/bin/testgen -h -``` -### 运行 -#### 生成Dao/Service层单元UT -```shell -$GOPATH/bin/testgen YOUR_PROJECT/dao # default mode -$GOPATH/bin/testgen --m test path/to/your/pkg -$GOPATH/bin/testgen --func functionName path/to/your/pkg -``` - -#### 生成接口类型 -```shell -$GOPATH/bin/testgen --m interface YOUR_PROJECT/dao #当前仅支持传目录,如目录包含子目录也会做处理 -``` - -#### 生成Mock代码 - ```shell -$GOPATH/bin/testgen --m mock YOUR_PROJECT/dao #仅传入包路径即可 -``` - -#### 生成Monkey代码 -```shell -$GOPATH/bin/testgen --m monkey yourCodeDirPath #仅传入包路径即可 -``` -### 赋诗一首 -``` -莫生气 莫生气 -代码辣鸡非我意 -自己动手分田地 -谈笑风生活长命 -``` \ No newline at end of file diff --git a/tool/testgen/gen.go b/tool/testgen/gen.go deleted file mode 100644 index b515fda29..000000000 --- a/tool/testgen/gen.go +++ /dev/null @@ -1,419 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/otokaze/mock/mockgen" - "github.com/otokaze/mock/mockgen/model" -) - -func genTest(parses []*parse) (err error) { - for _, p := range parses { - switch { - case strings.HasSuffix(p.Path, "_mock.go") || - strings.HasSuffix(p.Path, ".intf.go"): - continue - case strings.HasSuffix(p.Path, "dao.go") || - strings.HasSuffix(p.Path, "service.go"): - err = p.genTestMain() - default: - err = p.genUTTest() - } - if err != nil { - break - } - } - return -} - -func (p *parse) genUTTest() (err error) { - var ( - buffer bytes.Buffer - impts = strings.Join([]string{ - `"context"`, - `"testing"`, - `. "github.com/smartystreets/goconvey/convey"`, - }, "\n\t") - content []byte - ) - filename := strings.Replace(p.Path, ".go", "_test.go", -1) - if _, err = os.Stat(filename); (_func == "" && err == nil) || - (err != nil && os.IsExist(err)) { - err = nil - return - } - for _, impt := range p.Imports { - impts += "\n\t\"" + impt.V + "\"" - } - if _func == "" { - buffer.WriteString(fmt.Sprintf(tpPackage, p.Package)) - buffer.WriteString(fmt.Sprintf(tpImport, impts)) - } - for _, parseFunc := range p.Funcs { - if _func != "" && _func != parseFunc.Name { - continue - } - var ( - methodK string - tpVars string - vars []string - val []string - notice = "Then " - reset string - ) - if method := ConvertMethod(p.Path); method != "" { - methodK = method + "." - } - tpTestFuncs := fmt.Sprintf(tpTestFunc, strings.Title(p.Package), parseFunc.Name, "", parseFunc.Name, "%s", "%s", "%s") - tpTestFuncBeCall := methodK + parseFunc.Name + "(%s)\n\t\t\tConvey(\"%s\", func() {" - if parseFunc.Result == nil { - tpTestFuncBeCall = fmt.Sprintf(tpTestFuncBeCall, "%s", "No return values") - tpTestFuncs = fmt.Sprintf(tpTestFuncs, "%s", tpTestFuncBeCall, "%s") - } - for k, res := range parseFunc.Result { - if res.K == "" { - res.K = fmt.Sprintf("p%d", k+1) - } - var so string - if res.V == "error" { - res.K = "err" - so = fmt.Sprintf("\tSo(%s, ShouldBeNil)", res.K) - notice += "err should be nil." - } else { - so = fmt.Sprintf("\tSo(%s, ShouldNotBeNil)", res.K) - val = append(val, res.K) - } - if len(parseFunc.Result) <= k+1 { - if len(val) != 0 { - notice += strings.Join(val, ",") + " should not be nil." - } - tpTestFuncBeCall = fmt.Sprintf(tpTestFuncBeCall, "%s", notice) - res.K += " := " + tpTestFuncBeCall - } else { - res.K += ", %s" - } - tpTestFuncs = fmt.Sprintf(tpTestFuncs, "%s", res.K+"\n\t\t\t%s", "%s") - tpTestFuncs = fmt.Sprintf(tpTestFuncs, "%s", "%s", so, "%s") - } - if parseFunc.Params == nil { - tpTestFuncs = fmt.Sprintf(tpTestFuncs, "%s", "", "%s") - } - for k, pType := range parseFunc.Params { - if pType.K == "" { - pType.K = fmt.Sprintf("a%d", k+1) - } - var ( - init string - params = pType.K - ) - switch { - case strings.HasPrefix(pType.V, "context"): - init = params + " = context.Background()" - case strings.HasPrefix(pType.V, "[]byte"): - init = params + " = " + pType.V + "(\"\")" - case strings.HasPrefix(pType.V, "[]"): - init = params + " = " + pType.V + "{}" - case strings.HasPrefix(pType.V, "int") || - strings.HasPrefix(pType.V, "uint") || - strings.HasPrefix(pType.V, "float") || - strings.HasPrefix(pType.V, "double"): - init = params + " = " + pType.V + "(0)" - case strings.HasPrefix(pType.V, "string"): - init = params + " = \"\"" - case strings.Contains(pType.V, "*xsql.Tx"): - init = params + ",_ = " + methodK + "BeginTran(c)" - reset += "\n\t" + params + ".Commit()" - case strings.HasPrefix(pType.V, "*"): - init = params + " = " + strings.Replace(pType.V, "*", "&", -1) + "{}" - case strings.Contains(pType.V, "chan"): - init = params + " = " + pType.V - case pType.V == "time.Time": - init = params + " = time.Now()" - case strings.Contains(pType.V, "chan"): - init = params + " = " + pType.V - default: - init = params + " " + pType.V - } - vars = append(vars, "\t\t"+init) - if len(parseFunc.Params) > k+1 { - params += ", %s" - } - tpTestFuncs = fmt.Sprintf(tpTestFuncs, "%s", params, "%s") - } - if len(vars) > 0 { - tpVars = fmt.Sprintf(tpVar, strings.Join(vars, "\n\t")) - } - tpTestFuncs = fmt.Sprintf(tpTestFuncs, tpVars, "%s") - if reset != "" { - tpTestResets := fmt.Sprintf(tpTestReset, reset) - tpTestFuncs = fmt.Sprintf(tpTestFuncs, tpTestResets) - } else { - tpTestFuncs = fmt.Sprintf(tpTestFuncs, "") - } - buffer.WriteString(tpTestFuncs) - } - var ( - file *os.File - flag = os.O_RDWR | os.O_CREATE | os.O_APPEND - ) - if file, err = os.OpenFile(filename, flag, 0644); err != nil { - return - } - if _func == "" { - content, _ = GoImport(filename, buffer.Bytes()) - } else { - content = buffer.Bytes() - } - if _, err = file.Write(content); err != nil { - return - } - if err = file.Close(); err != nil { - return - } - return -} - -func (p *parse) genTestMain() (err error) { - var ( - new bool - buffer bytes.Buffer - impts string - vars, mainFunc string - content []byte - instance, confFunc string - tomlPath = "**PUT PATH TO YOUR CONFIG FILES HERE**" - filename = strings.Replace(p.Path, ".go", "_test.go", -1) - ) - if p.Imports["paladin"] != nil { - new = true - } - // if _intfMode { - // imptsList = append(imptsList, `"github.com/golang/mock/gomock"`) - // for _, field := range p.Structs { - // var hit bool - // pkgName := strings.Split(field.V, ".")[0] - // interfaceName := strings.Split(field.V, ".")[1] - // if p.Imports[pkgName] != nil { - // if hit, err = checkInterfaceMock(strings.Split(field.V, ".")[1], p.Imports[pkgName].V); err != nil { - // return - // } - // } - // if hit { - // imptsList = append(imptsList, "mock"+p.Imports[pkgName].K+" \""+p.Imports[pkgName].V+"/mock\"") - // pkgName = "mock" + strings.Title(pkgName) - // interfaceName = "Mock" + interfaceName - // varsList = append(varsList, "mock"+strings.Title(field.K)+" *"+pkgName+"."+interfaceName) - // mockStmt += "\tmock" + strings.Title(field.K) + " = " + pkgName + ".New" + interfaceName + "(mockCtrl)\n" - // newStmt += "\t\t" + field.K + ":\tmock" + strings.Title(field.K) + ",\n" - // } else { - // pkgName = subString(field.V, "*", ".") - // if p.Imports[pkgName] != nil && pkgName != "conf" { - // imptsList = append(imptsList, p.Imports[pkgName].K+" \""+p.Imports[pkgName].V+"\"") - // } - // switch { - // case strings.HasPrefix(field.V, "*conf."): - // newStmt += "\t\t" + field.K + ":\tconf.Conf,\n" - // case strings.HasPrefix(field.V, "*"): - // newStmt += "\t\t" + field.K + ":\t" + strings.Replace(field.V, "*", "&", -1) + "{},\n" - // default: - // newStmt += "\t\t" + field.K + ":\t" + field.V + ",\n" - // } - // } - // } - // mockStmt = fmt.Sprintf(_tpTestServiceMainMockStmt, mockStmt) - // newStmt = fmt.Sprintf(_tpTestServiceMainNewStmt, newStmt) - // } - if instance = ConvertMethod(p.Path); instance == "s" { - vars = strings.Join([]string{"s *Service"}, "\n\t") - mainFunc = tpTestServiceMain - } else { - vars = strings.Join([]string{"d *Dao"}, "\n\t") - mainFunc = tpTestDaoMain - } - if new { - impts = strings.Join([]string{`"os"`, `"flag"`, `"testing"`, p.Imports["paladin"].V}, "\n\t") - confFunc = fmt.Sprintf(tpTestMainNew, instance+" = New()") - } else { - impts = strings.Join(append([]string{`"os"`, `"flag"`, `"testing"`}), "\n\t") - confFunc = fmt.Sprintf(tpTestMainOld, instance+" = New(conf.Conf)") - } - if _, err := os.Stat(filename); os.IsNotExist(err) { - buffer.WriteString(fmt.Sprintf(tpPackage, p.Package)) - buffer.WriteString(fmt.Sprintf(tpImport, impts)) - buffer.WriteString(fmt.Sprintf(tpVar, vars)) - buffer.WriteString(fmt.Sprintf(mainFunc, tomlPath, confFunc)) - content, _ = GoImport(filename, buffer.Bytes()) - ioutil.WriteFile(filename, content, 0644) - } - return -} - -func genInterface(parses []*parse) (err error) { - var ( - parse *parse - pkg = make(map[string]string) - ) - for _, parse = range parses { - if strings.Contains(parse.Path, ".intf.go") { - continue - } - dirPath := filepath.Dir(parse.Path) - for _, parseFunc := range parse.Funcs { - if (parseFunc.Method == nil) || - !(parseFunc.Name[0] >= 'A' && parseFunc.Name[0] <= 'Z') { - continue - } - var ( - params string - results string - ) - for k, param := range parseFunc.Params { - params += param.K + " " + param.P + param.V - if len(parseFunc.Params) > k+1 { - params += ", " - } - } - for k, res := range parseFunc.Result { - results += res.K + " " + res.P + res.V - if len(parseFunc.Result) > k+1 { - results += ", " - } - } - if len(results) != 0 { - results = "(" + results + ")" - } - pkg[dirPath] += "\t" + fmt.Sprintf(tpIntfcFunc, parseFunc.Name, params, results) - } - } - for k, v := range pkg { - var buffer bytes.Buffer - pathSplit := strings.Split(k, "/") - filename := k + "/" + pathSplit[len(pathSplit)-1] + ".intf.go" - if _, exist := os.Stat(filename); os.IsExist(exist) { - continue - } - buffer.WriteString(fmt.Sprintf(tpPackage, pathSplit[len(pathSplit)-1])) - buffer.WriteString(fmt.Sprintf(tpInterface, strings.Title(pathSplit[len(pathSplit)-1]), v)) - content, _ := GoImport(filename, buffer.Bytes()) - err = ioutil.WriteFile(filename, content, 0644) - } - return -} - -func genMock(files ...string) (err error) { - for _, file := range files { - var pkg *model.Package - if pkg, err = mockgen.ParseFile(file); err != nil { - return - } - if len(pkg.Interfaces) == 0 { - continue - } - var mockDir = pkg.SrcDir + "/mock" - if _, err = os.Stat(mockDir); os.IsNotExist(err) { - err = nil - os.Mkdir(mockDir, 0744) - } - var mockPath = mockDir + "/" + pkg.Name + "_mock.go" - if _, exist := os.Stat(mockPath); os.IsExist(exist) { - continue - } - var g = &mockgen.Generator{Filename: file} - if err = g.Generate(pkg, "mock", mockPath); err != nil { - return - } - if err = ioutil.WriteFile(mockPath, g.Output(), 0644); err != nil { - return - } - } - return -} - -func genMonkey(parses []*parse) (err error) { - var ( - pkg = make(map[string]string) - ) - for _, parse := range parses { - if strings.Contains(parse.Path, "monkey.go") || - strings.Contains(parse.Path, "/mock/") { - continue - } - var ( - path = strings.Split(filepath.Dir(parse.Path), "/") - pack = ConvertHump(path[len(path)-1]) - refer = path[len(path)-1] - mockVar, mockType, srcDir string - ) - for i := len(path) - 1; i > len(path)-4; i-- { - if path[i] == "dao" || path[i] == "service" { - srcDir = strings.Join(path[:i+1], "/") - break - } - pack = ConvertHump(path[i-1]) + pack - } - if mockVar = ConvertMethod(parse.Path); mockType == "d" { - mockType = "*" + refer + ".Dao" - } else { - mockType = "*" + refer + ".Service" - } - for _, parseFunc := range parse.Funcs { - if (parseFunc.Method == nil) || (parseFunc.Result == nil) || - !(parseFunc.Name[0] >= 'A' && parseFunc.Name[0] <= 'Z') { - continue - } - var ( - funcParams, funcResults, mockKey, mockValue, funcName string - ) - funcName = pack + parseFunc.Name - for k, param := range parseFunc.Params { - funcParams += "_ " + param.V - if len(parseFunc.Params) > k+1 { - funcParams += ", " - } - } - for k, res := range parseFunc.Result { - if res.K == "" { - if res.V == "error" { - res.K = "err" - } else { - res.K = fmt.Sprintf("p%d", k+1) - } - } - mockKey += res.K - mockValue += res.V - funcResults += res.K + " " + res.P + res.V - if len(parseFunc.Result) > k+1 { - mockKey += ", " - mockValue += ", " - funcResults += ", " - } - } - pkg[srcDir+"."+refer] += fmt.Sprintf(tpMonkeyFunc, funcName, funcName, mockVar, mockType, funcResults, mockVar, parseFunc.Name, mockType, funcParams, mockValue, mockKey) - } - } - for path, content := range pkg { - var ( - buffer bytes.Buffer - dir = strings.Split(path, ".") - mockDir = dir[0] + "/mock" - filename = mockDir + "/monkey_" + dir[1] + ".go" - ) - if _, err = os.Stat(mockDir); os.IsNotExist(err) { - err = nil - os.Mkdir(mockDir, 0744) - } - if _, err := os.Stat(filename); os.IsExist(err) { - continue - } - buffer.WriteString(fmt.Sprintf(tpPackage, "mock")) - buffer.WriteString(content) - content, _ := GoImport(filename, buffer.Bytes()) - ioutil.WriteFile(filename, content, 0644) - } - return -} diff --git a/tool/testgen/main.go b/tool/testgen/main.go deleted file mode 100644 index 4d94e6aec..000000000 --- a/tool/testgen/main.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" -) - -var ( - err error - _mode, _func string - files []string - parses []*parse -) - -func main() { - flag.StringVar(&_mode, "m", "test", "Generating code by Working mode. [test|interface|mock...]") - flag.StringVar(&_func, "func", "", "Generating code by function.") - flag.Parse() - if len(os.Args) == 1 { - println("Creater is a tool for generating code.\n\nUsage: creater [-m]") - flag.PrintDefaults() - return - } - if err = parseArgs(os.Args[1:], &files, 0); err != nil { - panic(err) - } - switch _mode { - case "monkey": - if parses, err = parseFile(files...); err != nil { - panic(err) - } - if err = genMonkey(parses); err != nil { - panic(err) - } - case "test": - if parses, err = parseFile(files...); err != nil { - panic(err) - } - if err = genTest(parses); err != nil { - panic(err) - } - case "interface": - if parses, err = parseFile(files...); err != nil { - panic(err) - } - if err = genInterface(parses); err != nil { - panic(err) - } - case "mock": - if err = genMock(files...); err != nil { - panic(err) - } - default: - } - fmt.Println(print) -} diff --git a/tool/testgen/parser.go b/tool/testgen/parser.go deleted file mode 100644 index ec363fd04..000000000 --- a/tool/testgen/parser.go +++ /dev/null @@ -1,193 +0,0 @@ -package main - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "io/ioutil" - "os" - "path/filepath" - "strings" -) - -type param struct{ K, V, P string } - -type parse struct { - Path string - Package string - // Imports []string - Imports map[string]*param - // Structs []*param - // Interfaces []string - Funcs []*struct { - Name string - Method, Params, Result []*param - } -} - -func parseArgs(args []string, res *[]string, index int) (err error) { - if len(args) <= index { - return - } - if strings.HasPrefix(args[index], "-") { - index += 2 - parseArgs(args, res, index) - return - } - var f os.FileInfo - if f, err = os.Stat(args[index]); err != nil { - return - } - if f.IsDir() { - if !strings.HasSuffix(args[index], "/") { - args[index] += "/" - } - var fs []os.FileInfo - if fs, err = ioutil.ReadDir(args[index]); err != nil { - return - } - for _, f = range fs { - path, _ := filepath.Abs(args[index] + f.Name()) - args = append(args, path) - } - } else { - if strings.HasSuffix(args[index], ".go") && - !strings.HasSuffix(args[index], "_test.go") { - *res = append(*res, args[index]) - } - } - index++ - return parseArgs(args, res, index) -} - -func parseFile(files ...string) (parses []*parse, err error) { - for _, file := range files { - var ( - astFile *ast.File - fSet = token.NewFileSet() - parse = &parse{ - Imports: make(map[string]*param), - } - ) - if astFile, err = parser.ParseFile(fSet, file, nil, 0); err != nil { - return - } - if astFile.Name != nil { - parse.Path = file - parse.Package = astFile.Name.Name - } - for _, decl := range astFile.Decls { - switch decl.(type) { - case *ast.GenDecl: - if specs := decl.(*ast.GenDecl).Specs; len(specs) > 0 { - parse.Imports = parseImports(specs) - } - case *ast.FuncDecl: - var ( - dec = decl.(*ast.FuncDecl) - parseFunc = &struct { - Name string - Method, Params, Result []*param - }{Name: dec.Name.Name} - ) - if dec.Recv != nil { - parseFunc.Method = parserParams(dec.Recv.List) - } - if dec.Type.Params != nil { - parseFunc.Params = parserParams(dec.Type.Params.List) - } - if dec.Type.Results != nil { - parseFunc.Result = parserParams(dec.Type.Results.List) - } - parse.Funcs = append(parse.Funcs, parseFunc) - } - } - parses = append(parses, parse) - } - return -} - -func parserParams(fields []*ast.Field) (params []*param) { - for _, field := range fields { - p := ¶m{} - p.V = parseType(field.Type) - if field.Names == nil { - params = append(params, p) - } - for _, name := range field.Names { - sp := ¶m{} - sp.K = name.Name - sp.V = p.V - sp.P = p.P - params = append(params, sp) - } - } - return -} - -func parseType(expr ast.Expr) string { - switch expr.(type) { - case *ast.Ident: - return expr.(*ast.Ident).Name - case *ast.StarExpr: - return "*" + parseType(expr.(*ast.StarExpr).X) - case *ast.ArrayType: - return "[" + parseType(expr.(*ast.ArrayType).Len) + "]" + parseType(expr.(*ast.ArrayType).Elt) - case *ast.SelectorExpr: - return parseType(expr.(*ast.SelectorExpr).X) + "." + expr.(*ast.SelectorExpr).Sel.Name - case *ast.MapType: - return "map[" + parseType(expr.(*ast.MapType).Key) + "]" + parseType(expr.(*ast.MapType).Value) - case *ast.StructType: - return "struct{}" - case *ast.InterfaceType: - return "interface{}" - case *ast.FuncType: - var ( - pTemp string - rTemp string - ) - pTemp = parseFuncType(pTemp, expr.(*ast.FuncType).Params) - if expr.(*ast.FuncType).Results != nil { - rTemp = parseFuncType(rTemp, expr.(*ast.FuncType).Results) - return fmt.Sprintf("func(%s) (%s)", pTemp, rTemp) - } - return fmt.Sprintf("func(%s)", pTemp) - case *ast.ChanType: - return fmt.Sprintf("make(chan %s)", parseType(expr.(*ast.ChanType).Value)) - case *ast.Ellipsis: - return parseType(expr.(*ast.Ellipsis).Elt) - } - return "" -} - -func parseFuncType(temp string, data *ast.FieldList) string { - var params = parserParams(data.List) - for i, param := range params { - if i == 0 { - temp = param.K + " " + param.V - continue - } - t := param.K + " " + param.V - temp = fmt.Sprintf("%s, %s", temp, t) - } - return temp -} - -func parseImports(specs []ast.Spec) (params map[string]*param) { - params = make(map[string]*param) - for _, spec := range specs { - switch spec.(type) { - case *ast.ImportSpec: - p := ¶m{V: strings.Replace(spec.(*ast.ImportSpec).Path.Value, "\"", "", -1)} - if spec.(*ast.ImportSpec).Name != nil { - p.K = spec.(*ast.ImportSpec).Name.Name - params[p.K] = p - } else { - vs := strings.Split(p.V, "/") - params[vs[len(vs)-1]] = p - } - } - } - return -} diff --git a/tool/testgen/templete.go b/tool/testgen/templete.go deleted file mode 100644 index bb42d5831..000000000 --- a/tool/testgen/templete.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -var ( - tpPackage = "package %s\n\n" - tpImport = "import (\n\t%s\n)\n\n" - tpVar = "var (\n\t%s\n)\n" - tpInterface = "type %sInterface interface {\n%s}\n" - tpIntfcFunc = "%s(%s) %s\n" - tpMonkeyFunc = "// Mock%s .\nfunc Mock%s(%s %s,%s) (guard *monkey.PatchGuard) {\n\treturn monkey.PatchInstanceMethod(reflect.TypeOf(%s), \"%s\", func(_ %s, %s) (%s) {\n\t\treturn %s\n\t})\n}\n\n" - tpTestReset = "\n\t\tReset(func() {%s\n\t\t})" - tpTestFunc = "func Test%s%s(t *testing.T){%s\n\tConvey(\"%s\", t, func(){\n\t\t%s\tConvey(\"When everything goes positive\", func(){\n\t\t\t%s\n\t\t\t})\n\t\t})%s\n\t})\n}\n\n" - tpTestDaoMain = `func TestMain(m *testing.M) { - flag.Set("conf", "%s") - flag.Parse() - %s - os.Exit(m.Run()) -} -` - tpTestServiceMain = `func TestMain(m *testing.M){ - flag.Set("conf", "%s") - flag.Parse() - %s - os.Exit(m.Run()) -} -` - tpTestMainNew = `if err := paladin.Init(); err != nil { - panic(err) - } - %s` - tpTestMainOld = `if err := conf.Init(); err != nil { - panic(err) - } - %s` - print = `Generation success! - 莫生气 - 代码辣鸡非我意, - 自己动手分田地; - 你若气死谁如意? - 谈笑风生活长命. -// Release 1.2.3. Powered by Kratos` -) diff --git a/tool/testgen/utils.go b/tool/testgen/utils.go deleted file mode 100644 index 025fb91a7..000000000 --- a/tool/testgen/utils.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "fmt" - "strings" - - "golang.org/x/tools/imports" -) - -// GoImport Use golang.org/x/tools/imports auto import pkg -func GoImport(file string, bytes []byte) (res []byte, err error) { - options := &imports.Options{ - TabWidth: 8, - TabIndent: true, - Comments: true, - Fragment: true, - } - if res, err = imports.Process(file, bytes, options); err != nil { - fmt.Printf("GoImport(%s) error(%v)", file, err) - res = bytes - return - } - return -} - -// ConvertMethod checkout the file belongs to dao or not -func ConvertMethod(path string) (method string) { - switch { - case strings.Contains(path, "/dao"): - method = "d" - case strings.Contains(path, "/service"): - method = "s" - default: - method = "" - } - return -} - -//ConvertHump convert words to hump style -func ConvertHump(words string) string { - return strings.ToUpper(words[0:1]) + words[1:] -} diff --git a/transport/grpc/client.go b/transport/grpc/client.go new file mode 100644 index 000000000..c89ec488d --- /dev/null +++ b/transport/grpc/client.go @@ -0,0 +1,115 @@ +package grpc + +import ( + "context" + "time" + + "github.com/go-kratos/kratos/v2/middleware" + "github.com/go-kratos/kratos/v2/middleware/recovery" + "github.com/go-kratos/kratos/v2/middleware/status" + "github.com/go-kratos/kratos/v2/registry" + "github.com/go-kratos/kratos/v2/transport" + "github.com/go-kratos/kratos/v2/transport/grpc/resolver/discovery" + + "google.golang.org/grpc" +) + +// ClientOption is gRPC client option. +type ClientOption func(o *clientOptions) + +// WithEndpoint with client endpoint. +func WithEndpoint(endpoint string) ClientOption { + return func(o *clientOptions) { + o.endpoint = endpoint + } +} + +// WithTimeout with client timeout. +func WithTimeout(timeout time.Duration) ClientOption { + return func(o *clientOptions) { + o.timeout = timeout + } +} + +// WithMiddleware with client middleware. +func WithMiddleware(m middleware.Middleware) ClientOption { + return func(o *clientOptions) { + o.middleware = m + } +} + +// WithRegistry with client registry. +func WithRegistry(r registry.Registry) ClientOption { + return func(o *clientOptions) { + o.registry = r + } +} + +// WithOptions with gRPC options. +func WithOptions(opts ...grpc.DialOption) ClientOption { + return func(o *clientOptions) { + o.grpcOpts = opts + } +} + +// clientOptions is gRPC Client +type clientOptions struct { + endpoint string + timeout time.Duration + middleware middleware.Middleware + registry registry.Registry + grpcOpts []grpc.DialOption +} + +// Dial returns a GRPC connection. +func Dial(ctx context.Context, opts ...ClientOption) (*grpc.ClientConn, error) { + return dial(ctx, false, opts...) +} + +// DialInsecure returns an insecure GRPC connection. +func DialInsecure(ctx context.Context, opts ...ClientOption) (*grpc.ClientConn, error) { + return dial(ctx, true, opts...) +} + +func dial(ctx context.Context, insecure bool, opts ...ClientOption) (*grpc.ClientConn, error) { + options := clientOptions{ + timeout: 500 * time.Millisecond, + middleware: middleware.Chain( + recovery.Recovery(), + status.Client(), + ), + } + for _, o := range opts { + o(&options) + } + var grpcOpts = []grpc.DialOption{ + grpc.WithTimeout(options.timeout), + grpc.WithUnaryInterceptor(UnaryClientInterceptor(options.middleware)), + } + if options.registry != nil { + grpc.WithResolvers(discovery.NewBuilder(options.registry)) + } + if insecure { + grpcOpts = append(grpcOpts, grpc.WithInsecure()) + } + if len(options.grpcOpts) > 0 { + grpcOpts = append(grpcOpts, options.grpcOpts...) + } + return grpc.DialContext(ctx, options.endpoint, grpcOpts...) +} + +// UnaryClientInterceptor retruns a unary client interceptor. +func UnaryClientInterceptor(m middleware.Middleware) grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ctx = transport.NewContext(ctx, transport.Transport{Kind: "gRPC"}) + ctx = NewClientContext(ctx, ClientInfo{FullMethod: method}) + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return reply, invoker(ctx, method, req, reply, cc, opts...) + } + if m != nil { + h = m(h) + } + _, err := h(ctx, req) + return err + } +} diff --git a/transport/grpc/context.go b/transport/grpc/context.go new file mode 100644 index 000000000..b9dc83790 --- /dev/null +++ b/transport/grpc/context.go @@ -0,0 +1,43 @@ +package grpc + +import "context" + +// ServerInfo is gRPC server infomation. +type ServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +type serverKey struct{} + +// NewServerContext returns a new Context that carries value. +func NewServerContext(ctx context.Context, info ServerInfo) context.Context { + return context.WithValue(ctx, serverKey{}, info) +} + +// FromServerContext returns the Transport value stored in ctx, if any. +func FromServerContext(ctx context.Context) (info ServerInfo, ok bool) { + info, ok = ctx.Value(serverKey{}).(ServerInfo) + return +} + +// ClientInfo is gRPC server infomation. +type ClientInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +type clientKey struct{} + +// NewClientContext returns a new Context that carries value. +func NewClientContext(ctx context.Context, info ClientInfo) context.Context { + return context.WithValue(ctx, serverKey{}, info) +} + +// FromClientContext returns the Transport value stored in ctx, if any. +func FromClientContext(ctx context.Context) (info ClientInfo, ok bool) { + info, ok = ctx.Value(serverKey{}).(ClientInfo) + return +} diff --git a/transport/grpc/resolver/direct/builder.go b/transport/grpc/resolver/direct/builder.go new file mode 100644 index 000000000..fefbe7474 --- /dev/null +++ b/transport/grpc/resolver/direct/builder.go @@ -0,0 +1,35 @@ +package direct + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +type directBuilder struct{} + +// NewBuilder creates a directBuilder which is used to factory direct resolvers. +// example: +// direct:///127.0.0.1:9000,127.0.0.2:9000 +func NewBuilder() resolver.Builder { + return &directBuilder{} +} + +func (d *directBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + var addrs []resolver.Address + for _, addr := range strings.Split(target.Endpoint, ",") { + addrs = append(addrs, resolver.Address{Addr: addr}) + } + cc.UpdateState(resolver.State{ + Addresses: addrs, + }) + return newDirectResolver(), nil +} + +func (d *directBuilder) Scheme() string { + return "direct" +} diff --git a/transport/grpc/resolver/direct/resolver.go b/transport/grpc/resolver/direct/resolver.go new file mode 100644 index 000000000..7d18228c8 --- /dev/null +++ b/transport/grpc/resolver/direct/resolver.go @@ -0,0 +1,15 @@ +package direct + +import "google.golang.org/grpc/resolver" + +type directResolver struct{} + +func newDirectResolver() resolver.Resolver { + return &directResolver{} +} + +func (r *directResolver) Close() { +} + +func (r *directResolver) ResolveNow(options resolver.ResolveNowOptions) { +} diff --git a/transport/grpc/resolver/discovery/builder.go b/transport/grpc/resolver/discovery/builder.go new file mode 100644 index 000000000..f5fa279ab --- /dev/null +++ b/transport/grpc/resolver/discovery/builder.go @@ -0,0 +1,54 @@ +package discovery + +import ( + "github.com/go-kratos/kratos/v2/log" + "github.com/go-kratos/kratos/v2/registry" + "google.golang.org/grpc/resolver" +) + +const name = "discovery" + +// Option is builder option. +type Option func(o *builder) + +// WithLogger with builder logger. +func WithLogger(logger log.Logger) Option { + return func(o *builder) { + o.logger = logger + } +} + +type builder struct { + registry registry.Registry + logger log.Logger +} + +// NewBuilder creates a builder which is used to factory registry resolvers. +func NewBuilder(r registry.Registry, opts ...Option) resolver.Builder { + b := &builder{ + registry: r, + logger: log.DefaultLogger, + } + for _, o := range opts { + o(b) + } + return b +} + +func (d *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + w, err := d.registry.Watch(target.Endpoint) + if err != nil { + return nil, err + } + r := &discoveryResolver{ + w: w, + cc: cc, + log: log.NewHelper("grpc/resolver/discovery", d.logger), + } + go r.watch() + return r, nil +} + +func (d *builder) Scheme() string { + return name +} diff --git a/transport/grpc/resolver/discovery/resolver.go b/transport/grpc/resolver/discovery/resolver.go new file mode 100644 index 000000000..a25f127aa --- /dev/null +++ b/transport/grpc/resolver/discovery/resolver.go @@ -0,0 +1,74 @@ +package discovery + +import ( + "net/url" + "time" + + "github.com/go-kratos/kratos/v2/log" + "github.com/go-kratos/kratos/v2/registry" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +type discoveryResolver struct { + w registry.Watcher + cc resolver.ClientConn + log *log.Helper +} + +func (r *discoveryResolver) watch() { + for { + ins, err := r.w.Next() + if err != nil { + r.log.Errorf("Failed to watch discovery endpoint: %v", err) + time.Sleep(time.Second) + continue + } + r.update(ins) + } +} + +func (r *discoveryResolver) update(ins []*registry.ServiceInstance) { + var addrs []resolver.Address + for _, in := range ins { + endpoint, err := parseEndpoint(in.Endpoints) + if err != nil { + r.log.Errorf("Failed to parse discovery endpoint: %v", err) + continue + } + addr := resolver.Address{ + ServerName: in.Name, + Attributes: parseAttributes(in.Metadata), + Addr: endpoint, + } + addrs = append(addrs, addr) + } + r.cc.UpdateState(resolver.State{Addresses: addrs}) +} + +func (r *discoveryResolver) Close() { + r.w.Close() +} + +func (r *discoveryResolver) ResolveNow(options resolver.ResolveNowOptions) {} + +func parseEndpoint(endpoints []string) (string, error) { + for _, e := range endpoints { + u, err := url.Parse(e) + if err != nil { + return "", err + } + if u.Scheme == "grpc" { + return u.Host, nil + } + } + return "", nil +} + +func parseAttributes(md map[string]string) *attributes.Attributes { + var pairs []interface{} + for k, v := range md { + pairs = append(pairs, k, v) + } + return attributes.New(pairs) +} diff --git a/transport/grpc/server.go b/transport/grpc/server.go new file mode 100644 index 000000000..ce118660a --- /dev/null +++ b/transport/grpc/server.go @@ -0,0 +1,159 @@ +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/go-kratos/kratos/v2/internal/host" + "github.com/go-kratos/kratos/v2/log" + "github.com/go-kratos/kratos/v2/middleware" + "github.com/go-kratos/kratos/v2/middleware/recovery" + "github.com/go-kratos/kratos/v2/middleware/status" + "github.com/go-kratos/kratos/v2/transport" + + "google.golang.org/grpc" +) + +const loggerName = "transport/grpc" + +var _ transport.Server = (*Server)(nil) + +// ServerOption is gRPC server option. +type ServerOption func(o *Server) + +// Network with server network. +func Network(network string) ServerOption { + return func(s *Server) { + s.network = network + } +} + +// Address with server address. +func Address(addr string) ServerOption { + return func(s *Server) { + s.address = addr + } +} + +// Timeout with server timeout. +func Timeout(timeout time.Duration) ServerOption { + return func(s *Server) { + s.timeout = timeout + } +} + +// Logger with server logger. +func Logger(logger log.Logger) ServerOption { + return func(s *Server) { + s.log = log.NewHelper(loggerName, logger) + } +} + +// Middleware with server middleware. +func Middleware(m middleware.Middleware) ServerOption { + return func(s *Server) { + s.middleware = m + } +} + +// Options with grpc options. +func Options(opts ...grpc.ServerOption) ServerOption { + return func(s *Server) { + s.grpcOpts = opts + } +} + +// Server is a gRPC server wrapper. +type Server struct { + *grpc.Server + lis net.Listener + network string + address string + timeout time.Duration + log *log.Helper + middleware middleware.Middleware + grpcOpts []grpc.ServerOption +} + +// NewServer creates a gRPC server by options. +func NewServer(opts ...ServerOption) *Server { + srv := &Server{ + network: "tcp", + address: ":0", + timeout: time.Second, + log: log.NewHelper(loggerName, log.DefaultLogger), + middleware: middleware.Chain( + recovery.Recovery(), + status.Server(), + ), + } + for _, o := range opts { + o(srv) + } + var grpcOpts = []grpc.ServerOption{ + grpc.ChainUnaryInterceptor( + UnaryServerInterceptor(srv.middleware), + UnaryTimeoutInterceptor(srv.timeout), + ), + } + if len(srv.grpcOpts) > 0 { + grpcOpts = append(grpcOpts, srv.grpcOpts...) + } + srv.Server = grpc.NewServer(grpcOpts...) + return srv +} + +// Endpoint return a real address to registry endpoint. +// examples: +// grpc://127.0.0.1:9000?isSecure=false +func (s *Server) Endpoint() (string, error) { + addr, err := host.Extract(s.address, s.lis) + if err != nil { + return "", err + } + return fmt.Sprintf("grpc://%s", addr), nil +} + +// Start start the gRPC server. +func (s *Server) Start() error { + lis, err := net.Listen(s.network, s.address) + if err != nil { + return err + } + s.lis = lis + s.log.Infof("[gRPC] server listening on: %s", lis.Addr().String()) + return s.Serve(lis) +} + +// Stop stop the gRPC server. +func (s *Server) Stop() error { + s.GracefulStop() + s.log.Info("[gRPC] server stopping") + return nil +} + +// UnaryTimeoutInterceptor returns a unary timeout interceptor. +func UnaryTimeoutInterceptor(timeout time.Duration) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return handler(ctx, req) + } +} + +// UnaryServerInterceptor returns a unary server interceptor. +func UnaryServerInterceptor(m middleware.Middleware) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + ctx = transport.NewContext(ctx, transport.Transport{Kind: "gRPC"}) + ctx = NewServerContext(ctx, ServerInfo{Server: info.Server, FullMethod: info.FullMethod}) + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return handler(ctx, req) + } + if m != nil { + h = m(h) + } + return h(ctx, req) + } +} diff --git a/transport/grpc/server_test.go b/transport/grpc/server_test.go new file mode 100644 index 000000000..23b914f82 --- /dev/null +++ b/transport/grpc/server_test.go @@ -0,0 +1,40 @@ +package grpc + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/go-kratos/kratos/v2/internal/host" +) + +func TestServer(t *testing.T) { + srv := NewServer() + if endpoint, err := srv.Endpoint(); err != nil || endpoint == "" { + t.Fatal(endpoint, err) + } + + time.AfterFunc(time.Second, func() { + defer srv.Stop() + testClient(t, srv) + }) + // start server + if err := srv.Start(); err != nil { + t.Fatal(err) + } +} + +func testClient(t *testing.T, srv *Server) { + port, ok := host.Port(srv.lis) + if !ok { + t.Fatalf("extract port error: %v", srv.lis) + } + endpoint := fmt.Sprintf("127.0.0.1:%d", port) + // new a gRPC client + conn, err := DialInsecure(context.Background(), WithEndpoint(endpoint)) + if err != nil { + t.Fatal(err) + } + conn.Close() +} diff --git a/transport/http/bind.go b/transport/http/bind.go new file mode 100644 index 000000000..d0a1d7fed --- /dev/null +++ b/transport/http/bind.go @@ -0,0 +1,287 @@ +package http + +import ( + "encoding/base64" + "errors" + "fmt" + "log" + "net/http" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// BindVars parses url parameters. +func BindVars(req *http.Request, msg proto.Message) error { + for key, value := range Vars(req) { + if err := populateFieldValues(msg.ProtoReflect(), strings.Split(key, "."), []string{value}); err != nil { + return err + } + } + return nil +} + +// BindForm parses form parameters. +func BindForm(req *http.Request, msg proto.Message) error { + if err := req.ParseForm(); err != nil { + return err + } + for key, values := range req.Form { + if err := populateFieldValues(msg.ProtoReflect(), strings.Split(key, "."), values); err != nil { + return err + } + } + return nil +} + +func populateFieldValues(v protoreflect.Message, fieldPath []string, values []string) error { + if len(fieldPath) < 1 { + return errors.New("no field path") + } + if len(values) < 1 { + return errors.New("no value provided") + } + var fd protoreflect.FieldDescriptor + for i, fieldName := range fieldPath { + fields := v.Descriptor().Fields() + + if fd = fields.ByName(protoreflect.Name(fieldName)); fd == nil { + fd = fields.ByJSONName(fieldName) + if fd == nil { + log.Printf("field not found in %q: %q\n", v.Descriptor().FullName(), strings.Join(fieldPath, ".")) + return nil + } + } + + if i == len(fieldPath)-1 { + break + } + + if fd.Message() == nil || fd.Cardinality() == protoreflect.Repeated { + return fmt.Errorf("invalid path: %q is not a message", fieldName) + } + + v = v.Mutable(fd).Message() + } + if of := fd.ContainingOneof(); of != nil { + if f := v.WhichOneof(of); f != nil { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + switch { + case fd.IsList(): + return populateRepeatedField(fd, v.Mutable(fd).List(), values) + case fd.IsMap(): + return populateMapField(fd, v.Mutable(fd).Map(), values) + } + if len(values) > 1 { + return fmt.Errorf("too many values for field %q: %s", fd.FullName().Name(), strings.Join(values, ", ")) + } + return populateField(fd, v, values[0]) +} + +func populateField(fd protoreflect.FieldDescriptor, v protoreflect.Message, value string) error { + val, err := parseField(fd, value) + if err != nil { + return fmt.Errorf("parsing field %q: %w", fd.FullName().Name(), err) + } + v.Set(fd, val) + return nil +} + +func populateRepeatedField(fd protoreflect.FieldDescriptor, list protoreflect.List, values []string) error { + for _, value := range values { + v, err := parseField(fd, value) + if err != nil { + return fmt.Errorf("parsing list %q: %w", fd.FullName().Name(), err) + } + list.Append(v) + } + return nil +} + +func populateMapField(fd protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fd.FullName()) + } + key, err := parseField(fd.MapKey(), values[0]) + if err != nil { + return fmt.Errorf("parsing map key %q: %w", fd.FullName().Name(), err) + } + value, err := parseField(fd.MapValue(), values[1]) + if err != nil { + return fmt.Errorf("parsing map value %q: %w", fd.FullName().Name(), err) + } + mp.Set(key.MapKey(), value) + return nil +} + +func parseField(fd protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()) + switch { + case errors.Is(err, protoregistry.NotFound): + return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fd.Enum().FullName()) + case err != nil: + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + v := enum.Descriptor().Values().ByName(protoreflect.Name(value)) + if v == nil { + i, err := strconv.Atoi(value) + if err != nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)) + if v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } + return protoreflect.ValueOfEnum(v.Number()), nil + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt32(int32(v)), nil + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt64(v), nil + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint32(uint32(v)), nil + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint64(v), nil + case protoreflect.FloatKind: + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat32(float32(v)), nil + case protoreflect.DoubleKind: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat64(v), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: + v, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBytes(v), nil + case protoreflect.MessageKind, protoreflect.GroupKind: + return parseMessage(fd.Message(), value) + default: + panic(fmt.Sprintf("unknown field kind: %v", fd.Kind())) + } +} + +func parseMessage(md protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) { + var msg proto.Message + switch md.FullName() { + case "google.protobuf.Timestamp": + if value == "null" { + break + } + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + msg, err = ptypes.TimestampProto(t) + if err != nil { + return protoreflect.Value{}, err + } + case "google.protobuf.Duration": + if value == "null" { + break + } + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = ptypes.DurationProto(d) + case "google.protobuf.DoubleValue": + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrappers.DoubleValue{Value: v} + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrappers.FloatValue{Value: float32(v)} + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrappers.Int64Value{Value: v} + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrappers.Int32Value{Value: int32(v)} + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrappers.UInt64Value{Value: v} + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrappers.UInt32Value{Value: uint32(v)} + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrappers.BoolValue{Value: v} + case "google.protobuf.StringValue": + msg = &wrappers.StringValue{Value: value} + case "google.protobuf.BytesValue": + v, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrappers.BytesValue{Value: v} + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(md.FullName())) + } + return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil +} diff --git a/transport/http/client.go b/transport/http/client.go new file mode 100644 index 000000000..358bd3b0b --- /dev/null +++ b/transport/http/client.go @@ -0,0 +1,136 @@ +package http + +import ( + "context" + "io/ioutil" + "net/http" + "time" + + "github.com/go-kratos/kratos/v2/encoding" + "github.com/go-kratos/kratos/v2/errors" + "github.com/go-kratos/kratos/v2/middleware" + "github.com/go-kratos/kratos/v2/transport" +) + +// ClientOption is HTTP client option. +type ClientOption func(*clientOptions) + +// WithTimeout with client request timeout. +func WithTimeout(d time.Duration) ClientOption { + return func(o *clientOptions) { + o.timeout = d + } +} + +// WithUserAgent with client user agent. +func WithUserAgent(ua string) ClientOption { + return func(o *clientOptions) { + o.userAgent = ua + } +} + +// WithTransport with client transport. +func WithTransport(trans http.RoundTripper) ClientOption { + return func(o *clientOptions) { + o.transport = trans + } +} + +// WithMiddleware with client middleware. +func WithMiddleware(m middleware.Middleware) ClientOption { + return func(o *clientOptions) { + o.middleware = m + } +} + +// Client is a HTTP transport client. +type clientOptions struct { + ctx context.Context + timeout time.Duration + userAgent string + transport http.RoundTripper + middleware middleware.Middleware +} + +// NewClient returns an HTTP client. +func NewClient(ctx context.Context, opts ...ClientOption) (*http.Client, error) { + trans, err := NewTransport(ctx, opts...) + if err != nil { + return nil, err + } + return &http.Client{Transport: trans}, nil +} + +// NewTransport creates an http.RoundTripper. +func NewTransport(ctx context.Context, opts ...ClientOption) (http.RoundTripper, error) { + options := &clientOptions{ + ctx: ctx, + timeout: 500 * time.Millisecond, + transport: http.DefaultTransport, + } + for _, o := range opts { + o(options) + } + return &baseTransport{ + middleware: options.middleware, + userAgent: options.userAgent, + timeout: options.timeout, + base: options.transport, + }, nil +} + +type baseTransport struct { + userAgent string + timeout time.Duration + base http.RoundTripper + middleware middleware.Middleware +} + +func (t *baseTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.userAgent != "" && req.Header.Get("User-Agent") == "" { + req.Header.Set("User-Agent", t.userAgent) + } + ctx := transport.NewContext(req.Context(), transport.Transport{Kind: "HTTP"}) + ctx = NewClientContext(ctx, ClientInfo{Request: req}) + ctx, cancel := context.WithTimeout(ctx, t.timeout) + defer cancel() + + h := func(ctx context.Context, in interface{}) (interface{}, error) { + return t.base.RoundTrip(req) + } + if t.middleware != nil { + h = t.middleware(h) + } + res, err := h(ctx, req) + if err != nil { + return nil, err + } + return res.(*http.Response), nil +} + +// Do send an HTTP request and decodes the body of response into target. +// returns an error (of type *Error) if the response status code is not 2xx. +func Do(client *http.Client, req *http.Request, target interface{}) error { + res, err := client.Do(req) + if err != nil { + return err + } + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + defer res.Body.Close() + subtype := contentSubtype(res.Header.Get("content-type")) + codec := encoding.GetCodec(subtype) + if codec == nil { + codec = encoding.GetCodec("json") + } + if res.StatusCode < 200 || res.StatusCode > 299 { + se := &errors.StatusError{} + if err := codec.Unmarshal(data, se); err != nil { + return err + } + return se + } + return codec.Unmarshal(data, target) +} diff --git a/transport/http/context.go b/transport/http/context.go new file mode 100644 index 000000000..d12856e34 --- /dev/null +++ b/transport/http/context.go @@ -0,0 +1,50 @@ +package http + +import ( + "context" + "net/http" + + "github.com/gorilla/mux" +) + +// ServerInfo is HTTP server infomation. +type ServerInfo struct { + Request *http.Request + Response http.ResponseWriter +} + +type serverKey struct{} + +// NewServerContext returns a new Context that carries value. +func NewServerContext(ctx context.Context, info ServerInfo) context.Context { + return context.WithValue(ctx, serverKey{}, info) +} + +// FromServerContext returns the Transport value stored in ctx, if any. +func FromServerContext(ctx context.Context) (info ServerInfo, ok bool) { + info, ok = ctx.Value(serverKey{}).(ServerInfo) + return +} + +// ClientInfo is HTTP client infomation. +type ClientInfo struct { + Request *http.Request +} + +type clientKey struct{} + +// NewClientContext returns a new Context that carries value. +func NewClientContext(ctx context.Context, info ClientInfo) context.Context { + return context.WithValue(ctx, clientKey{}, info) +} + +// FromClientContext returns the Transport value stored in ctx, if any. +func FromClientContext(ctx context.Context) (info ClientInfo, ok bool) { + info, ok = ctx.Value(clientKey{}).(ClientInfo) + return +} + +// Vars returns the route variables for the current request, if any. +func Vars(req *http.Request) map[string]string { + return mux.Vars(req) +} diff --git a/transport/http/default.go b/transport/http/default.go new file mode 100644 index 000000000..61f7e1576 --- /dev/null +++ b/transport/http/default.go @@ -0,0 +1,81 @@ +package http + +import ( + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/go-kratos/kratos/v2/encoding" +) + +const baseContentType = "application" + +func contentType(subtype string) string { + return strings.Join([]string{baseContentType, subtype}, "/") +} + +func contentSubtype(contentType string) string { + if contentType == baseContentType { + return "" + } + if !strings.HasPrefix(contentType, baseContentType) { + return "" + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '/', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:] + default: + return "" + } +} + +func defaultRequestDecoder(req *http.Request, v interface{}) error { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + return err + } + defer req.Body.Close() + subtype := contentSubtype(req.Header.Get("content-type")) + codec := encoding.GetCodec(subtype) + if codec == nil { + return fmt.Errorf("decoding request failed unknown content-type: %s", subtype) + } + return codec.Unmarshal(data, v) +} + +func defaultResponseEncoder(res http.ResponseWriter, req *http.Request, v interface{}) error { + subtype := contentSubtype(req.Header.Get("accept")) + codec := encoding.GetCodec(subtype) + if codec == nil { + codec = encoding.GetCodec("json") + } + data, err := codec.Marshal(v) + if err != nil { + return err + } + res.Header().Set("content-type", contentType(codec.Name())) + res.Write(data) + return nil +} + +func defaultErrorEncoder(res http.ResponseWriter, req *http.Request, err error) { + se, code := StatusError(err) + subtype := contentSubtype(req.Header.Get("accept")) + codec := encoding.GetCodec(subtype) + if codec == nil { + codec = encoding.GetCodec("json") + } + data, err := codec.Marshal(se) + if err != nil { + res.WriteHeader(http.StatusInternalServerError) + return + } + res.Header().Set("content-type", contentType(codec.Name())) + res.WriteHeader(code) + res.Write(data) +} diff --git a/transport/http/errors.go b/transport/http/errors.go new file mode 100644 index 000000000..a0c016530 --- /dev/null +++ b/transport/http/errors.go @@ -0,0 +1,59 @@ +package http + +import ( + "net/http" + + "github.com/go-kratos/kratos/v2/errors" +) + +var ( + // References: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto + codesMapping = map[int32]int{ + 0: http.StatusOK, + 1: http.StatusInternalServerError, + 2: http.StatusInternalServerError, + 3: http.StatusBadRequest, + 4: http.StatusRequestTimeout, + 5: http.StatusNotFound, + 6: http.StatusConflict, + 7: http.StatusForbidden, + 8: http.StatusTooManyRequests, + 9: http.StatusPreconditionFailed, + 10: http.StatusConflict, + 11: http.StatusBadRequest, + 12: http.StatusNotImplemented, + 13: http.StatusInternalServerError, + 14: http.StatusServiceUnavailable, + 15: http.StatusInternalServerError, + 16: http.StatusUnauthorized, + } + statusMapping = map[int]int32{ + http.StatusOK: 0, + http.StatusBadRequest: 3, + http.StatusRequestTimeout: 4, + http.StatusNotFound: 5, + http.StatusConflict: 6, + http.StatusForbidden: 7, + http.StatusUnauthorized: 16, + http.StatusPreconditionFailed: 9, + http.StatusNotImplemented: 12, + http.StatusInternalServerError: 13, + http.StatusServiceUnavailable: 14, + } +) + +// StatusError converts error to status error. +func StatusError(err error) (*errors.StatusError, int) { + se, ok := errors.FromError(err) + if !ok { + se = &errors.StatusError{ + Code: 2, + Reason: "Unknown", + Message: "Unknown: " + err.Error(), + } + } + if status, ok := codesMapping[se.Code]; ok { + return se, status + } + return se, http.StatusInternalServerError +} diff --git a/transport/http/route.go b/transport/http/route.go new file mode 100644 index 000000000..86bbcfc3f --- /dev/null +++ b/transport/http/route.go @@ -0,0 +1,55 @@ +package http + +import ( + "net/http" + + "github.com/gorilla/mux" +) + +// RouteGroup adds a matcher for the URL path and method. This matches if the given +// template is a prefix of the full URL path. See route.Path() for details on +// the tpl argument. +type RouteGroup struct { + prefix string + router *mux.Router +} + +// ANY maps an HTTP Any request to the path and the specified handler. +func (r *RouteGroup) ANY(path string, handler http.HandlerFunc) { + r.router.PathPrefix(r.prefix).Path(path).HandlerFunc(handler) +} + +// GET maps an HTTP Get request to the path and the specified handler. +func (r *RouteGroup) GET(path string, handler http.HandlerFunc) { + r.router.PathPrefix(r.prefix).Path(path).HandlerFunc(handler).Methods("GET") +} + +// HEAD maps an HTTP Head request to the path and the specified handler. +func (r *RouteGroup) HEAD(path string, handler http.HandlerFunc) { + r.router.PathPrefix(r.prefix).Path(path).HandlerFunc(handler).Methods("HEAD") +} + +// POST maps an HTTP Post request to the path and the specified handler. +func (r *RouteGroup) POST(path string, handler http.HandlerFunc) { + r.router.PathPrefix(r.prefix).Path(path).HandlerFunc(handler).Methods("POST") +} + +// PUT maps an HTTP Put request to the path and the specified handler. +func (r *RouteGroup) PUT(path string, handler http.HandlerFunc) { + r.router.PathPrefix(r.prefix).Path(path).HandlerFunc(handler).Methods("PUT") +} + +// DELETE maps an HTTP Delete request to the path and the specified handler. +func (r *RouteGroup) DELETE(path string, handler http.HandlerFunc) { + r.router.PathPrefix(r.prefix).Path(path).HandlerFunc(handler).Methods("DELETE") +} + +// PATCH maps an HTTP Patch request to the path and the specified handler. +func (r *RouteGroup) PATCH(path string, handler http.HandlerFunc) { + r.router.PathPrefix(r.prefix).Path(path).HandlerFunc(handler).Methods("PATCH") +} + +// OPTIONS maps an HTTP Options request to the path and the specified handler. +func (r *RouteGroup) OPTIONS(path string, handler http.HandlerFunc) { + r.router.PathPrefix(r.prefix).Path(path).HandlerFunc(handler).Methods("OPTIONS") +} diff --git a/transport/http/server.go b/transport/http/server.go new file mode 100644 index 000000000..8744ef420 --- /dev/null +++ b/transport/http/server.go @@ -0,0 +1,162 @@ +package http + +import ( + "context" + "fmt" + "net" + "net/http" + "time" + + "github.com/go-kratos/kratos/v2/internal/host" + "github.com/go-kratos/kratos/v2/log" + "github.com/go-kratos/kratos/v2/middleware" + "github.com/go-kratos/kratos/v2/middleware/recovery" + "github.com/go-kratos/kratos/v2/transport" + + "github.com/gorilla/mux" +) + +const loggerName = "transport/http" + +var _ transport.Server = (*Server)(nil) + +// DecodeRequestFunc deocder request func. +type DecodeRequestFunc func(req *http.Request, v interface{}) error + +// EncodeResponseFunc is encode response func. +type EncodeResponseFunc func(res http.ResponseWriter, req *http.Request, v interface{}) error + +// EncodeErrorFunc is encode error func. +type EncodeErrorFunc func(res http.ResponseWriter, req *http.Request, err error) + +// ServerOption is HTTP server option. +type ServerOption func(*Server) + +// Network with server network. +func Network(network string) ServerOption { + return func(s *Server) { + s.network = network + } +} + +// Address with server address. +func Address(addr string) ServerOption { + return func(s *Server) { + s.address = addr + } +} + +// Timeout with server timeout. +func Timeout(timeout time.Duration) ServerOption { + return func(s *Server) { + s.timeout = timeout + } +} + +// Logger with server logger. +func Logger(logger log.Logger) ServerOption { + return func(s *Server) { + s.log = log.NewHelper(loggerName, logger) + } +} + +// Middleware with server middleware option. +func Middleware(m middleware.Middleware) ServerOption { + return func(s *Server) { + s.middleware = m + } +} + +// ErrorEncoder with error handler option. +func ErrorEncoder(fn EncodeErrorFunc) ServerOption { + return func(s *Server) { + s.errorEncoder = fn + } +} + +// Server is a HTTP server wrapper. +type Server struct { + *http.Server + lis net.Listener + network string + address string + timeout time.Duration + middleware middleware.Middleware + requestDecoder DecodeRequestFunc + responseEncoder EncodeResponseFunc + errorEncoder EncodeErrorFunc + router *mux.Router + log *log.Helper +} + +// NewServer creates a HTTP server by options. +func NewServer(opts ...ServerOption) *Server { + srv := &Server{ + network: "tcp", + address: ":0", + timeout: time.Second, + requestDecoder: defaultRequestDecoder, + responseEncoder: defaultResponseEncoder, + errorEncoder: defaultErrorEncoder, + middleware: recovery.Recovery(), + log: log.NewHelper(loggerName, log.DefaultLogger), + } + for _, o := range opts { + o(srv) + } + srv.router = mux.NewRouter() + srv.Server = &http.Server{Handler: srv} + return srv +} + +// RouteGroup . +func (s *Server) RouteGroup(prefix string) *RouteGroup { + return &RouteGroup{prefix: prefix, router: s.router} +} + +// Handle registers a new route with a matcher for the URL path. +func (s *Server) Handle(path string, h http.Handler) { + s.router.Handle(path, h) +} + +// HandleFunc registers a new route with a matcher for the URL path. +func (s *Server) HandleFunc(path string, h http.HandlerFunc) { + s.router.HandleFunc(path, h) +} + +// ServeHTTP should write reply headers and data to the ResponseWriter and then return. +func (s *Server) ServeHTTP(res http.ResponseWriter, req *http.Request) { + ctx, cancel := context.WithTimeout(req.Context(), s.timeout) + defer cancel() + ctx = transport.NewContext(ctx, transport.Transport{Kind: "HTTP"}) + ctx = NewServerContext(ctx, ServerInfo{Request: req, Response: res}) + s.router.ServeHTTP(res, req.WithContext(ctx)) +} + +// Endpoint return a real address to registry endpoint. +// examples: +// http://127.0.0.1:8000?isSecure=false +func (s *Server) Endpoint() (string, error) { + addr, err := host.Extract(s.address, s.lis) + if err != nil { + return "", err + } + return fmt.Sprintf("http://%s", addr), nil +} + +// Start start the HTTP server. +func (s *Server) Start() error { + lis, err := net.Listen(s.network, s.address) + if err != nil { + return err + } + s.lis = lis + s.log.Infof("[HTTP] server listening on: %s", lis.Addr().String()) + return s.Serve(lis) +} + +// Stop stop the HTTP server. +func (s *Server) Stop() error { + s.log.Info("[HTTP] server stopping") + return s.Shutdown(context.Background()) +} diff --git a/transport/http/server_test.go b/transport/http/server_test.go new file mode 100644 index 000000000..d8a2671d7 --- /dev/null +++ b/transport/http/server_test.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "testing" + "time" + + "github.com/go-kratos/kratos/v2/internal/host" +) + +type testData struct { + Path string `json:"path"` +} + +func TestServer(t *testing.T) { + fn := func(w http.ResponseWriter, r *http.Request) { + data := &testData{Path: r.RequestURI} + json.NewEncoder(w).Encode(data) + } + srv := NewServer() + group := srv.RouteGroup("/test") + { + group.GET("/", fn) + group.HEAD("/index", fn) + group.OPTIONS("/home", fn) + group.PUT("/products/{id}", fn) + group.POST("/products/{id}", fn) + group.PATCH("/products/{id}", fn) + group.DELETE("/products/{id}", fn) + } + + time.AfterFunc(time.Second, func() { + defer srv.Stop() + testClient(t, srv) + }) + + if err := srv.Start(); !errors.Is(err, http.ErrServerClosed) { + t.Fatal(err) + } +} + +func testClient(t *testing.T, srv *Server) { + tests := []struct { + method string + path string + }{ + {"GET", "/test/"}, + {"PUT", "/test/products/1"}, + {"POST", "/test/products/2"}, + {"PATCH", "/test/products/3"}, + {"DELETE", "/test/products/4"}, + } + client, err := NewClient(context.Background()) + if err != nil { + t.Fatal(err) + } + port, ok := host.Port(srv.lis) + if !ok { + t.Fatalf("extract port error: %v", srv.lis) + } + for _, test := range tests { + var res testData + url := fmt.Sprintf("http://127.0.0.1:%d%s", port, test.path) + req, err := http.NewRequest(test.method, url, nil) + if err != nil { + t.Fatal(err) + } + if err := Do(client, req, &res); err != nil { + t.Fatal(err) + } + if res.Path != test.path { + t.Errorf("expected %s got %s", test.path, res.Path) + } + } +} diff --git a/transport/http/service.go b/transport/http/service.go new file mode 100644 index 000000000..9a09847ff --- /dev/null +++ b/transport/http/service.go @@ -0,0 +1,51 @@ +package http + +import ( + "context" + "net/http" + + "github.com/go-kratos/kratos/v2/middleware" +) + +// SupportPackageIsVersion1 These constants should not be referenced from any other code. +const SupportPackageIsVersion1 = true + +type methodHandler func(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (out interface{}, err error) + +// MethodDesc represents a Proto service's method specification. +type MethodDesc struct { + Path string + Method string + Handler methodHandler +} + +// ServiceDesc represents a Proto service's specification. +type ServiceDesc struct { + ServiceName string + Methods []MethodDesc + Metadata interface{} +} + +// ServiceRegistrar wraps a single method that supports service registration. +type ServiceRegistrar interface { + RegisterService(desc *ServiceDesc, impl interface{}) +} + +// RegisterService . +func (s *Server) RegisterService(desc *ServiceDesc, impl interface{}) { + for _, m := range desc.Methods { + h := m.Handler + s.router.HandleFunc(m.Path, func(res http.ResponseWriter, req *http.Request) { + out, err := h(impl, req.Context(), req, func(v interface{}) error { + return s.requestDecoder(req, v) + }, s.middleware) + if err != nil { + s.errorEncoder(res, req, err) + return + } + if err := s.responseEncoder(res, req, out); err != nil { + s.errorEncoder(res, req, err) + } + }).Methods(m.Method) + } +} diff --git a/transport/http/service_test.go b/transport/http/service_test.go new file mode 100644 index 000000000..d263249d1 --- /dev/null +++ b/transport/http/service_test.go @@ -0,0 +1,98 @@ +package http + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "testing" + "time" + + "github.com/go-kratos/kratos/v2/internal/host" + "github.com/go-kratos/kratos/v2/middleware" +) + +type testRequest struct { + Name string `json:"name"` +} +type testReply struct { + Result string `json:"result"` +} +type testService struct{} + +func (s *testService) SayHello(ctx context.Context, req *testRequest) (*testReply, error) { + return &testReply{Result: req.Name}, nil +} + +func TestService(t *testing.T) { + h := func(srv interface{}, ctx context.Context, req *http.Request, dec func(interface{}) error, m middleware.Middleware) (interface{}, error) { + var in testRequest + if err := dec(&in); err != nil { + return nil, err + } + h := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(*testService).SayHello(ctx, &in) + } + out, err := m(h)(ctx, &in) + if err != nil { + return nil, err + } + return out, nil + } + sd := &ServiceDesc{ + ServiceName: "helloworld.Greeter", + Methods: []MethodDesc{ + { + Path: "/helloworld", + Method: "POST", + Handler: h, + }, + }, + } + + svc := &testService{} + srv := NewServer() + srv.RegisterService(sd, svc) + + time.AfterFunc(time.Second, func() { + defer srv.Stop() + testServiceClient(t, srv) + }) + + if err := srv.Start(); !errors.Is(err, http.ErrServerClosed) { + t.Fatal(err) + } +} + +func testServiceClient(t *testing.T, srv *Server) { + client, err := NewClient(context.Background()) + if err != nil { + t.Fatal(err) + } + port, ok := host.Port(srv.lis) + if !ok { + t.Fatalf("extract port error: %v", srv.lis) + } + var ( + in = testRequest{Name: "hello"} + out = testReply{} + url = fmt.Sprintf("http://127.0.0.1:%d/helloworld", port) + ) + data, err := json.Marshal(in) + if err != nil { + t.Fatal(err) + } + req, err := http.NewRequest("POST", url, bytes.NewReader(data)) + if err != nil { + t.Fatal(err) + } + req.Header.Set("content-type", "application/json") + if err := Do(client, req, &out); err != nil { + t.Fatal(err) + } + if out.Result != in.Name { + t.Fatalf("expected %s got %s", in.Name, out.Result) + } +} diff --git a/transport/transport.go b/transport/transport.go new file mode 100644 index 000000000..64e74734a --- /dev/null +++ b/transport/transport.go @@ -0,0 +1,34 @@ +package transport + +import ( + "context" + + // init encoding + _ "github.com/go-kratos/kratos/v2/encoding/json" + _ "github.com/go-kratos/kratos/v2/encoding/proto" +) + +// Server is transport server. +type Server interface { + Endpoint() (string, error) + Start() error + Stop() error +} + +// Transport is transport context value. +type Transport struct { + Kind string +} + +type transportKey struct{} + +// NewContext returns a new Context that carries value. +func NewContext(ctx context.Context, tr Transport) context.Context { + return context.WithValue(ctx, transportKey{}, tr) +} + +// FromContext returns the Transport value stored in ctx, if any. +func FromContext(ctx context.Context) (tr Transport, ok bool) { + tr, ok = ctx.Value(transportKey{}).(Transport) + return +}