mirror of
https://github.com/rclone/rclone.git
synced 2025-03-17 20:27:52 +02:00
Switch to using the dep tool and update all the dependencies
This commit is contained in:
parent
5135ff73cb
commit
98c2d2c41b
@ -139,14 +139,42 @@ Documentation for rclone sub commands is with their code, eg
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
file.
|
||||
|
||||
## Updating the vendor dirctory ##
|
||||
## Adding a dependency ##
|
||||
|
||||
Do these commands to update the entire build directory to the latest
|
||||
version of all the dependencies. This should be done early in the
|
||||
release cycle. Individual dependencies can be added with `godep get`.
|
||||
rclone uses the [dep](https://github.com/golang/dep) tool to manage
|
||||
its dependencies. All code that rclone needs for building is stored
|
||||
in the `vendor` directory for perfectly reproducable builds.
|
||||
|
||||
* make build_dep
|
||||
* make update
|
||||
The `vendor` directory is entirely managed by the `dep` tool.
|
||||
|
||||
To add a new dependency
|
||||
|
||||
dep ensure github.com/pkg/errors
|
||||
|
||||
You can add constraints on that package (see the `dep` documentation),
|
||||
but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by dep including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.locl` in a single commit
|
||||
separate from any other code changes. Watch out for new files in
|
||||
`vendor`.
|
||||
|
||||
## Updating a dependency ##
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
dep ensure -update github.com/pkg/errors
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
|
||||
In order to update all the dependencies then run `make update`. This
|
||||
just runs `dep ensure -update`. Check in the changes in a single
|
||||
commit as above.
|
||||
|
||||
This should be done early in the release cycle to pick up new versions
|
||||
of packages in time for them to get some testing.
|
||||
|
||||
## Writing a new backend ##
|
||||
|
||||
|
513
Godeps/Godeps.json
generated
513
Godeps/Godeps.json
generated
@ -1,513 +0,0 @@
|
||||
{
|
||||
"ImportPath": "github.com/ncw/rclone",
|
||||
"GoVersion": "go1.7",
|
||||
"GodepVersion": "v75",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bazil.org/fuse",
|
||||
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
},
|
||||
{
|
||||
"ImportPath": "bazil.org/fuse/fs",
|
||||
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
},
|
||||
{
|
||||
"ImportPath": "bazil.org/fuse/fuseutil",
|
||||
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
},
|
||||
{
|
||||
"ImportPath": "cloud.google.com/go/compute/metadata",
|
||||
"Comment": "v0.6.0-68-g0b87d14",
|
||||
"Rev": "0b87d14d90086b53a97dfbd66f3000f7f112b494"
|
||||
},
|
||||
{
|
||||
"ImportPath": "cloud.google.com/go/internal",
|
||||
"Comment": "v0.6.0-68-g0b87d14",
|
||||
"Rev": "0b87d14d90086b53a97dfbd66f3000f7f112b494"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Unknwon/goconfig",
|
||||
"Rev": "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/VividCortex/ewma",
|
||||
"Comment": "v1.0-20-gc595cd8",
|
||||
"Rev": "c595cd886c223c6c28fc9ae2727a61b5e4693d85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
|
||||
"Comment": "v1.0.6",
|
||||
"Rev": "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Comment": "v1.1.0",
|
||||
"Rev": "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-ini/ini",
|
||||
"Comment": "v1.24.0-2-gee900ca",
|
||||
"Rev": "ee900ca565931451fe4e4409bcbd4316331cec1c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/go-querystring/query",
|
||||
"Rev": "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/googleapis/gax-go",
|
||||
"Rev": "da06d194a00e19ce00d9011a13931c3f6f6887c7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/mousetrap",
|
||||
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jmespath/go-jmespath",
|
||||
"Comment": "0.2.2-14-gbd40a43",
|
||||
"Rev": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/fs",
|
||||
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ncw/go-acd",
|
||||
"Rev": "7954f1fad2bda6a7836999003e4481d6e32edc1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ncw/swift",
|
||||
"Rev": "6c1b1510538e1f00d49a558b7b9b87d71bc454d6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Comment": "v0.8.0-2-g248dadf",
|
||||
"Rev": "248dadf4e9068a0b3e79f02ed0a610d935de5302"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/sftp",
|
||||
"Rev": "ff7e52ffd762466ebd2c4e710d5436dccc539f54"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pmezard/go-difflib/difflib",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rfjakob/eme",
|
||||
"Comment": "v1.0-15-gfd00240",
|
||||
"Rev": "fd00240838d2e0fe6b2c58bf5b27db843d828ad5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/russross/blackfriday",
|
||||
"Comment": "v1.4-40-g5f33e7b",
|
||||
"Rev": "5f33e7b7878355cd2b7e6b8eefc48a5472c69f70"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shurcooL/sanitized_anchor_name",
|
||||
"Rev": "1dba4b3954bc059efc3991ec364f9f9a35f597d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/skratchdot/open-golang/open",
|
||||
"Rev": "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra",
|
||||
"Rev": "b5d8e8f46a2f829f755b6e33b454e25c61c935e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra/doc",
|
||||
"Rev": "b5d8e8f46a2f829f755b6e33b454e25c61c935e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stacktic/dropbox",
|
||||
"Rev": "58f839b21094d5e0af7caf613599830589233d20"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.1.4-27-g4d4bfba",
|
||||
"Rev": "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/require",
|
||||
"Comment": "v1.1.4-27-g4d4bfba",
|
||||
"Rev": "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tsenart/tb",
|
||||
"Rev": "19f4c3d79d2bd67d0911b2e310b999eeea4454c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/curve25519",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ed25519",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ed25519/internal/edwards25519",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/poly1305",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/agent",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/idna",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/trace",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/google",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/internal",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jws",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jwt",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "075e574b89e4c2d22f2286a7e2b919519c6f3547"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "85c29909967d7f171f821e7a42e7b7af76fb9598"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "85c29909967d7f171f821e7a42e7b7af76fb9598"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/drive/v2",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/gensupport",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/googleapi",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/storage/v1",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/app_identity",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/base",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/datastore",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/log",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/modules",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/remote_api",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "a3f3340b5840cee44f372bddb5880fcbc419b46a"
|
||||
}
|
||||
]
|
||||
}
|
229
Gopkg.lock
generated
Normal file
229
Gopkg.lock
generated
Normal file
@ -0,0 +1,229 @@
|
||||
memo = "0c86c911e62d7207fb8549d69519a5a795ce4b36aadce87b84504f109eefe60b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [".","fs","fuseutil"]
|
||||
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata","internal"]
|
||||
revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c"
|
||||
version = "v0.7.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
packages = ["."]
|
||||
revision = "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "c595cd886c223c6c28fc9ae2727a61b5e4693d85"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
|
||||
revision = "952498f4a390118ac65ad59cbe0c8b57ed69b6b5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
|
||||
version = "v1.0.6"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "e7fea39b01aea8d5671f6858f0532f56e8bff3a5"
|
||||
version = "v1.27.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","ptypes/any"]
|
||||
revision = "b50ceb1fa9818fa4d78b016c2d4ae025593a7ce3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/go-querystring"
|
||||
packages = ["query"]
|
||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9"
|
||||
version = "0.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
packages = ["."]
|
||||
revision = "96a49aad3fc3889629f2eceb004927386884bd92"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "8e9b10220613abdbc2896808ee6b43e411a4fa6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "c605e284fe17294bda444b34710735b29d1a9d90"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "a5f8514e29e90a859e93871b1582e5c81f466f82"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
revision = "da627cc50b6fb2eb623eaffe91fb29d7eddfd06a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "0b647d0506a698cca42caca173e55559b12a69f2"
|
||||
version = "v1.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/shurcooL/sanitized_anchor_name"
|
||||
packages = ["."]
|
||||
revision = "79c90efaf01eddc01945af5bc1797859189b830b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
packages = ["open"]
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "1362f95a8d6fe330d00a64380d6e0b65f4992c72"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stacktic/dropbox"
|
||||
packages = ["."]
|
||||
revision = "58f839b21094d5e0af7caf613599830589233d20"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert","require"]
|
||||
revision = "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/tsenart/tb"
|
||||
packages = ["."]
|
||||
revision = "19f4c3d79d2bd67d0911b2e310b999eeea4454c1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","nacl/secretbox","pbkdf2","poly1305","salsa20/salsa","scrypt","ssh","ssh/agent","ssh/terminal"]
|
||||
revision = "122d919ec1efcfb58483215da23f815853e24b81"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "c9b681d35165f1995d6f3034e61f8761d4b90c99"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "ad516a297a9f2a74ecc244861b298c94bdd28b9d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "9ccfe848b9db8435a24c424abbc07a921adf1df5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["drive/v2","gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "c858ef4400610cbfd097ffc5f5c6e4a1a51eac86"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "411e09b969b1170a9f0c467558eb4c4c110d9c77"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","codes","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
|
||||
revision = "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b"
|
136
Gopkg.toml
Normal file
136
Gopkg.toml
Normal file
@ -0,0 +1,136 @@
|
||||
|
||||
## Gopkg.toml example (these lines may be deleted)
|
||||
|
||||
## "required" lists a set of packages (not projects) that must be included in
|
||||
## Gopkg.lock. This list is merged with the set of packages imported by the current
|
||||
## project. Use it when your project needs a package it doesn't explicitly import -
|
||||
## including "main" packages.
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
|
||||
## "ignored" lists a set of packages (not projects) that are ignored when
|
||||
## dep statically analyzes source code. Ignored packages can be in this project,
|
||||
## or in a dependency.
|
||||
# ignored = ["github.com/user/project/badpkg"]
|
||||
|
||||
## Dependencies define constraints on dependent projects. They are respected by
|
||||
## dep whether coming from the Gopkg.toml of the current project or a dependency.
|
||||
# [[dependencies]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Recommended: the version constraint to enforce for the project.
|
||||
## Only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: an alternate location (URL or import path) for the project's source.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
## Overrides have the same structure as [[dependencies]], but supercede all
|
||||
## [[dependencies]] declarations from all projects. Only the current project's
|
||||
## [[overrides]] are applied.
|
||||
##
|
||||
## Overrides are a sledgehammer. Use them only as a last resort.
|
||||
# [[overrides]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Optional: specifying a version constraint override will cause all other
|
||||
## constraints on this project to be ignored; only the overriden constraint
|
||||
## need be satisfied.
|
||||
## Again, only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: specifying an alternate source location as an override will
|
||||
## enforce that the alternate location is used for that project, regardless of
|
||||
## what source location any dependent projects specify.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/stacktic/dropbox"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "github.com/tsenart/tb"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
|
||||
[[dependencies]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
4
Makefile
4
Makefile
@ -58,9 +58,7 @@ endif
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
rm -rf Godeps vendor
|
||||
go get -t -u -f -v ./...
|
||||
godep save ./...
|
||||
dep ensure -update -v
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
|
4
vendor/bazil.org/fuse/doc/.gitignore
generated
vendored
Normal file
4
vendor/bazil.org/fuse/doc/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
/*.seq.svg
|
||||
|
||||
# not ignoring *.seq.png; we want those committed to the repo
|
||||
# for embedding on Github
|
6
vendor/bazil.org/fuse/doc/README.md
generated
vendored
Normal file
6
vendor/bazil.org/fuse/doc/README.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# bazil.org/fuse documentation
|
||||
|
||||
See also API docs at http://godoc.org/bazil.org/fuse
|
||||
|
||||
- [The mount sequence](mount-sequence.md)
|
||||
- [Writing documentation](writing-docs.md)
|
32
vendor/bazil.org/fuse/doc/mount-linux-error-init.seq
generated
vendored
Normal file
32
vendor/bazil.org/fuse/doc/mount-linux-error-init.seq
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
seqdiag {
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
fusermount;
|
||||
kernel;
|
||||
mounts;
|
||||
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
fusermount;
|
||||
kernel;
|
||||
mounts;
|
||||
|
||||
app -> fuse [label="Mount"];
|
||||
fuse -> fusermount [label="spawn, pass socketpair fd"];
|
||||
fusermount -> kernel [label="open /dev/fuse"];
|
||||
fusermount -> kernel [label="mount(2)"];
|
||||
kernel ->> mounts [label="mount is visible"];
|
||||
fusermount <-- kernel [label="mount(2) returns"];
|
||||
fuse <<-- fusermount [diagonal, label="exit, receive /dev/fuse fd", leftnote="on Linux, successful exit here\nmeans the mount has happened,\nthough InitRequest might not have yet"];
|
||||
app <-- fuse [label="Mount returns\nConn.Ready is already closed"];
|
||||
|
||||
app -> fuse [label="fs.Serve"];
|
||||
fuse => kernel [label="read /dev/fuse fd", note="starts with InitRequest"];
|
||||
fuse -> app [label="Init"];
|
||||
fuse <-- app [color=red];
|
||||
fuse -> kernel [label="write /dev/fuse fd", color=red];
|
||||
kernel -> kernel [label="set connection\nstate to error", color=red];
|
||||
fuse <-- kernel;
|
||||
... conn.MountError == nil, so it is still mounted ...
|
||||
... call conn.Close to clean up ...
|
||||
}
|
BIN
vendor/bazil.org/fuse/doc/mount-linux-error-init.seq.png
generated
vendored
Normal file
BIN
vendor/bazil.org/fuse/doc/mount-linux-error-init.seq.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 28 KiB |
41
vendor/bazil.org/fuse/doc/mount-linux.seq
generated
vendored
Normal file
41
vendor/bazil.org/fuse/doc/mount-linux.seq
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
seqdiag {
|
||||
// seqdiag -T svg -o doc/mount-osx.svg doc/mount-osx.seq
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
fusermount;
|
||||
kernel;
|
||||
mounts;
|
||||
|
||||
app -> fuse [label="Mount"];
|
||||
fuse -> fusermount [label="spawn, pass socketpair fd"];
|
||||
fusermount -> kernel [label="open /dev/fuse"];
|
||||
fusermount -> kernel [label="mount(2)"];
|
||||
kernel ->> mounts [label="mount is visible"];
|
||||
fusermount <-- kernel [label="mount(2) returns"];
|
||||
fuse <<-- fusermount [diagonal, label="exit, receive /dev/fuse fd", leftnote="on Linux, successful exit here\nmeans the mount has happened,\nthough InitRequest might not have yet"];
|
||||
app <-- fuse [label="Mount returns\nConn.Ready is already closed", rightnote="InitRequest and StatfsRequest\nmay or may not be seen\nbefore Conn.Ready,\ndepending on platform"];
|
||||
|
||||
app -> fuse [label="fs.Serve"];
|
||||
fuse => kernel [label="read /dev/fuse fd", note="starts with InitRequest"];
|
||||
fuse => app [label="FS/Node/Handle methods"];
|
||||
fuse => kernel [label="write /dev/fuse fd"];
|
||||
... repeat ...
|
||||
|
||||
... shutting down ...
|
||||
app -> fuse [label="Unmount"];
|
||||
fuse -> fusermount [label="fusermount -u"];
|
||||
fusermount -> kernel;
|
||||
kernel <<-- mounts;
|
||||
fusermount <-- kernel;
|
||||
fuse <<-- fusermount [diagonal];
|
||||
app <-- fuse [label="Unmount returns"];
|
||||
|
||||
// actually triggers before above
|
||||
fuse <<-- kernel [diagonal, label="/dev/fuse EOF"];
|
||||
app <-- fuse [label="fs.Serve returns"];
|
||||
|
||||
app -> fuse [label="conn.Close"];
|
||||
fuse -> kernel [label="close /dev/fuse fd"];
|
||||
fuse <-- kernel;
|
||||
app <-- fuse;
|
||||
}
|
BIN
vendor/bazil.org/fuse/doc/mount-linux.seq.png
generated
vendored
Normal file
BIN
vendor/bazil.org/fuse/doc/mount-linux.seq.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 44 KiB |
32
vendor/bazil.org/fuse/doc/mount-osx-error-init.seq
generated
vendored
Normal file
32
vendor/bazil.org/fuse/doc/mount-osx-error-init.seq
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
seqdiag {
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
wait [label="callMount\nhelper goroutine"];
|
||||
mount_osxfusefs;
|
||||
kernel;
|
||||
|
||||
app -> fuse [label="Mount"];
|
||||
fuse -> kernel [label="open /dev/osxfuseN"];
|
||||
fuse -> mount_osxfusefs [label="spawn, pass fd"];
|
||||
fuse -> wait [label="goroutine", note="blocks on cmd.Wait"];
|
||||
app <-- fuse [label="Mount returns"];
|
||||
|
||||
mount_osxfusefs -> kernel [label="mount(2)"];
|
||||
|
||||
app -> fuse [label="fs.Serve"];
|
||||
fuse => kernel [label="read /dev/osxfuseN fd", note="starts with InitRequest,\nalso seen before mount exits:\ntwo StatfsRequest calls"];
|
||||
fuse -> app [label="Init"];
|
||||
fuse <-- app [color=red];
|
||||
fuse -> kernel [label="write /dev/osxfuseN fd", color=red];
|
||||
fuse <-- kernel;
|
||||
|
||||
mount_osxfusefs <-- kernel [label="mount(2) returns", color=red];
|
||||
wait <<-- mount_osxfusefs [diagonal, label="exit", color=red];
|
||||
app <<-- wait [diagonal, label="mount has failed,\nclose Conn.Ready", color=red];
|
||||
|
||||
// actually triggers before above
|
||||
fuse <<-- kernel [diagonal, label="/dev/osxfuseN EOF"];
|
||||
app <-- fuse [label="fs.Serve returns"];
|
||||
... conn.MountError != nil, so it was was never mounted ...
|
||||
... call conn.Close to clean up ...
|
||||
}
|
BIN
vendor/bazil.org/fuse/doc/mount-osx-error-init.seq.png
generated
vendored
Normal file
BIN
vendor/bazil.org/fuse/doc/mount-osx-error-init.seq.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 32 KiB |
45
vendor/bazil.org/fuse/doc/mount-osx.seq
generated
vendored
Normal file
45
vendor/bazil.org/fuse/doc/mount-osx.seq
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
seqdiag {
|
||||
// seqdiag -T svg -o doc/mount-osx.svg doc/mount-osx.seq
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
wait [label="callMount\nhelper goroutine"];
|
||||
mount_osxfusefs;
|
||||
kernel;
|
||||
mounts;
|
||||
|
||||
app -> fuse [label="Mount"];
|
||||
fuse -> kernel [label="open /dev/osxfuseN"];
|
||||
fuse -> mount_osxfusefs [label="spawn, pass fd"];
|
||||
fuse -> wait [label="goroutine", note="blocks on cmd.Wait"];
|
||||
app <-- fuse [label="Mount returns"];
|
||||
|
||||
mount_osxfusefs -> kernel [label="mount(2)"];
|
||||
|
||||
app -> fuse [label="fs.Serve"];
|
||||
fuse => kernel [label="read /dev/osxfuseN fd", note="starts with InitRequest,\nalso seen before mount exits:\ntwo StatfsRequest calls"];
|
||||
fuse => app [label="FS/Node/Handle methods"];
|
||||
fuse => kernel [label="write /dev/osxfuseN fd"];
|
||||
... repeat ...
|
||||
|
||||
kernel ->> mounts [label="mount is visible"];
|
||||
mount_osxfusefs <-- kernel [label="mount(2) returns"];
|
||||
wait <<-- mount_osxfusefs [diagonal, label="exit", leftnote="on OS X, successful exit\nhere means we finally know\nthe mount has happened\n(can't trust InitRequest,\nkernel might have timed out\nwaiting for InitResponse)"];
|
||||
|
||||
app <<-- wait [diagonal, label="mount is ready,\nclose Conn.Ready", rightnote="InitRequest and StatfsRequest\nmay or may not be seen\nbefore Conn.Ready,\ndepending on platform"];
|
||||
|
||||
... shutting down ...
|
||||
app -> fuse [label="Unmount"];
|
||||
fuse -> kernel [label="umount(2)"];
|
||||
kernel <<-- mounts;
|
||||
fuse <-- kernel;
|
||||
app <-- fuse [label="Unmount returns"];
|
||||
|
||||
// actually triggers before above
|
||||
fuse <<-- kernel [diagonal, label="/dev/osxfuseN EOF"];
|
||||
app <-- fuse [label="fs.Serve returns"];
|
||||
|
||||
app -> fuse [label="conn.Close"];
|
||||
fuse -> kernel [label="close /dev/osxfuseN"];
|
||||
fuse <-- kernel;
|
||||
app <-- fuse;
|
||||
}
|
BIN
vendor/bazil.org/fuse/doc/mount-osx.seq.png
generated
vendored
Normal file
BIN
vendor/bazil.org/fuse/doc/mount-osx.seq.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 50 KiB |
30
vendor/bazil.org/fuse/doc/mount-sequence.md
generated
vendored
Normal file
30
vendor/bazil.org/fuse/doc/mount-sequence.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
# The mount sequence
|
||||
|
||||
FUSE mounting is a little bit tricky. There's a userspace helper tool
|
||||
that performs the handshake with the kernel, and then steps out of the
|
||||
way. This helper behaves differently on different platforms, forcing a
|
||||
more complex API on us.
|
||||
|
||||
## Successful runs
|
||||
|
||||
On Linux, the mount is immediate and file system accesses wait until
|
||||
the requests are served.
|
||||
|
||||

|
||||
|
||||
On OS X, the mount becomes visible only after `InitRequest` (and maybe
|
||||
more) have been served.
|
||||
|
||||

|
||||
|
||||
|
||||
## Errors
|
||||
|
||||
Let's see what happens if `InitRequest` gets an error response. On
|
||||
Linux, the mountpoint is there but all operations will fail:
|
||||
|
||||

|
||||
|
||||
On OS X, the mount never happened:
|
||||
|
||||

|
16
vendor/bazil.org/fuse/doc/writing-docs.md
generated
vendored
Normal file
16
vendor/bazil.org/fuse/doc/writing-docs.md
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
# Writing documentation
|
||||
|
||||
## Sequence diagrams
|
||||
|
||||
The sequence diagrams are generated with `seqdiag`:
|
||||
http://blockdiag.com/en/seqdiag/index.html
|
||||
|
||||
An easy way to work on them is to automatically update the generated
|
||||
files with https://github.com/cespare/reflex :
|
||||
|
||||
reflex -g 'doc/[^.]*.seq' -- seqdiag -T svg -o '{}.svg' '{}' &
|
||||
|
||||
reflex -g 'doc/[^.]*.seq' -- seqdiag -T png -o '{}.png' '{}' &
|
||||
|
||||
The markdown files refer to PNG images because of Github limitations,
|
||||
but the SVG is generally more pleasant to view.
|
184
vendor/bazil.org/fuse/examples/clockfs/clockfs.go
generated
vendored
Normal file
184
vendor/bazil.org/fuse/examples/clockfs/clockfs.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
// Clockfs implements a file system with the current time in a file.
|
||||
// It was written to demonstrate kernel cache invalidation.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
_ "bazil.org/fuse/fs/fstestutil"
|
||||
"bazil.org/fuse/fuseutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
||||
fmt.Fprintf(os.Stderr, " %s MOUNTPOINT\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func run(mountpoint string) error {
|
||||
c, err := fuse.Mount(
|
||||
mountpoint,
|
||||
fuse.FSName("clock"),
|
||||
fuse.Subtype("clockfsfs"),
|
||||
fuse.LocalVolume(),
|
||||
fuse.VolumeName("Clock filesystem"),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
if p := c.Protocol(); !p.HasInvalidate() {
|
||||
return fmt.Errorf("kernel FUSE support is too old to have invalidations: version %v", p)
|
||||
}
|
||||
|
||||
srv := fs.New(c, nil)
|
||||
filesys := &FS{
|
||||
// We pre-create the clock node so that it's always the same
|
||||
// object returned from all the Lookups. You could carefully
|
||||
// track its lifetime between Lookup&Forget, and have the
|
||||
// ticking & invalidation happen only when active, but let's
|
||||
// keep this example simple.
|
||||
clockFile: &File{
|
||||
fuse: srv,
|
||||
},
|
||||
}
|
||||
filesys.clockFile.tick()
|
||||
// This goroutine never exits. That's fine for this example.
|
||||
go filesys.clockFile.update()
|
||||
if err := srv.Serve(filesys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if the mount process has an error to report.
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
mountpoint := flag.Arg(0)
|
||||
|
||||
if err := run(mountpoint); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type FS struct {
|
||||
clockFile *File
|
||||
}
|
||||
|
||||
var _ fs.FS = (*FS)(nil)
|
||||
|
||||
func (f *FS) Root() (fs.Node, error) {
|
||||
return &Dir{fs: f}, nil
|
||||
}
|
||||
|
||||
// Dir implements both Node and Handle for the root directory.
|
||||
type Dir struct {
|
||||
fs *FS
|
||||
}
|
||||
|
||||
var _ fs.Node = (*Dir)(nil)
|
||||
|
||||
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 1
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ fs.NodeStringLookuper = (*Dir)(nil)
|
||||
|
||||
func (d *Dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
if name == "clock" {
|
||||
return d.fs.clockFile, nil
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
var dirDirs = []fuse.Dirent{
|
||||
{Inode: 2, Name: "clock", Type: fuse.DT_File},
|
||||
}
|
||||
|
||||
var _ fs.HandleReadDirAller = (*Dir)(nil)
|
||||
|
||||
func (d *Dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
return dirDirs, nil
|
||||
}
|
||||
|
||||
type File struct {
|
||||
fs.NodeRef
|
||||
fuse *fs.Server
|
||||
content atomic.Value
|
||||
count uint64
|
||||
}
|
||||
|
||||
var _ fs.Node = (*File)(nil)
|
||||
|
||||
func (f *File) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 2
|
||||
a.Mode = 0444
|
||||
t := f.content.Load().(string)
|
||||
a.Size = uint64(len(t))
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ fs.NodeOpener = (*File)(nil)
|
||||
|
||||
func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
||||
if !req.Flags.IsReadOnly() {
|
||||
return nil, fuse.Errno(syscall.EACCES)
|
||||
}
|
||||
resp.Flags |= fuse.OpenKeepCache
|
||||
return f, nil
|
||||
}
|
||||
|
||||
var _ fs.Handle = (*File)(nil)
|
||||
|
||||
var _ fs.HandleReader = (*File)(nil)
|
||||
|
||||
func (f *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
||||
t := f.content.Load().(string)
|
||||
fuseutil.HandleRead(req, resp, []byte(t))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) tick() {
|
||||
// Intentionally a variable-length format, to demonstrate size changes.
|
||||
f.count++
|
||||
s := fmt.Sprintf("%d\t%s\n", f.count, time.Now())
|
||||
f.content.Store(s)
|
||||
|
||||
// For simplicity, this example tries to send invalidate
|
||||
// notifications even when the kernel does not hold a reference to
|
||||
// the node, so be extra sure to ignore ErrNotCached.
|
||||
if err := f.fuse.InvalidateNodeData(f); err != nil && err != fuse.ErrNotCached {
|
||||
log.Printf("invalidate error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) update() {
|
||||
tick := time.NewTicker(1 * time.Second)
|
||||
defer tick.Stop()
|
||||
for range tick.C {
|
||||
f.tick()
|
||||
}
|
||||
}
|
101
vendor/bazil.org/fuse/examples/hellofs/hello.go
generated
vendored
Normal file
101
vendor/bazil.org/fuse/examples/hellofs/hello.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
// Hellofs implements a simple "hello world" file system.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
_ "bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
||||
fmt.Fprintf(os.Stderr, " %s MOUNTPOINT\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
mountpoint := flag.Arg(0)
|
||||
|
||||
c, err := fuse.Mount(
|
||||
mountpoint,
|
||||
fuse.FSName("helloworld"),
|
||||
fuse.Subtype("hellofs"),
|
||||
fuse.LocalVolume(),
|
||||
fuse.VolumeName("Hello world!"),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
err = fs.Serve(c, FS{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// FS implements the hello world file system.
|
||||
type FS struct{}
|
||||
|
||||
func (FS) Root() (fs.Node, error) {
|
||||
return Dir{}, nil
|
||||
}
|
||||
|
||||
// Dir implements both Node and Handle for the root directory.
|
||||
type Dir struct{}
|
||||
|
||||
func (Dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 1
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
if name == "hello" {
|
||||
return File{}, nil
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
var dirDirs = []fuse.Dirent{
|
||||
{Inode: 2, Name: "hello", Type: fuse.DT_File},
|
||||
}
|
||||
|
||||
func (Dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
return dirDirs, nil
|
||||
}
|
||||
|
||||
// File implements both Node and Handle for the hello file.
|
||||
type File struct{}
|
||||
|
||||
const greeting = "hello, world\n"
|
||||
|
||||
func (File) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 2
|
||||
a.Mode = 0444
|
||||
a.Size = uint64(len(greeting))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (File) ReadAll(ctx context.Context) ([]byte, error) {
|
||||
return []byte(greeting), nil
|
||||
}
|
54
vendor/bazil.org/fuse/fs/bench/bench_create_test.go
generated
vendored
Normal file
54
vendor/bazil.org/fuse/fs/bench/bench_create_test.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package bench_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type dummyFile struct {
|
||||
fstestutil.File
|
||||
}
|
||||
|
||||
type benchCreateDir struct {
|
||||
fstestutil.Dir
|
||||
}
|
||||
|
||||
var _ fs.NodeCreater = (*benchCreateDir)(nil)
|
||||
|
||||
func (f *benchCreateDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
child := &dummyFile{}
|
||||
return child, child, nil
|
||||
}
|
||||
|
||||
func BenchmarkCreate(b *testing.B) {
|
||||
f := &benchCreateDir{}
|
||||
mnt, err := fstestutil.MountedT(b, fstestutil.SimpleFS{f}, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
// prepare file names to decrease test overhead
|
||||
names := make([]string, 0, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
// zero-padded so cost stays the same on every iteration
|
||||
names = append(names, mnt.Dir+"/"+fmt.Sprintf("%08x", i))
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
f, err := os.Create(names[i])
|
||||
if err != nil {
|
||||
b.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
b.StopTimer()
|
||||
}
|
42
vendor/bazil.org/fuse/fs/bench/bench_lookup_test.go
generated
vendored
Normal file
42
vendor/bazil.org/fuse/fs/bench/bench_lookup_test.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package bench_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
)
|
||||
|
||||
type benchLookupDir struct {
|
||||
fstestutil.Dir
|
||||
}
|
||||
|
||||
var _ fs.NodeRequestLookuper = (*benchLookupDir)(nil)
|
||||
|
||||
func (f *benchLookupDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) {
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
func BenchmarkLookup(b *testing.B) {
|
||||
f := &benchLookupDir{}
|
||||
mnt, err := fstestutil.MountedT(b, fstestutil.SimpleFS{f}, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
name := mnt.Dir + "/does-not-exist"
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := os.Stat(name); !os.IsNotExist(err) {
|
||||
b.Fatalf("Stat: wrong error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
b.StopTimer()
|
||||
}
|
268
vendor/bazil.org/fuse/fs/bench/bench_readwrite_test.go
generated
vendored
Normal file
268
vendor/bazil.org/fuse/fs/bench/bench_readwrite_test.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
||||
package bench_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type benchConfig struct {
|
||||
directIO bool
|
||||
}
|
||||
|
||||
type benchFS struct {
|
||||
conf *benchConfig
|
||||
}
|
||||
|
||||
var _ = fs.FS(benchFS{})
|
||||
|
||||
func (f benchFS) Root() (fs.Node, error) {
|
||||
return benchDir{conf: f.conf}, nil
|
||||
}
|
||||
|
||||
type benchDir struct {
|
||||
conf *benchConfig
|
||||
}
|
||||
|
||||
var _ = fs.Node(benchDir{})
|
||||
var _ = fs.NodeStringLookuper(benchDir{})
|
||||
var _ = fs.Handle(benchDir{})
|
||||
var _ = fs.HandleReadDirAller(benchDir{})
|
||||
|
||||
func (benchDir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 1
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d benchDir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
if name == "bench" {
|
||||
return benchFile{conf: d.conf}, nil
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
func (benchDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
l := []fuse.Dirent{
|
||||
{Inode: 2, Name: "bench", Type: fuse.DT_File},
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
type benchFile struct {
|
||||
conf *benchConfig
|
||||
}
|
||||
|
||||
var _ = fs.Node(benchFile{})
|
||||
var _ = fs.NodeOpener(benchFile{})
|
||||
var _ = fs.NodeFsyncer(benchFile{})
|
||||
var _ = fs.Handle(benchFile{})
|
||||
var _ = fs.HandleReader(benchFile{})
|
||||
var _ = fs.HandleWriter(benchFile{})
|
||||
|
||||
func (benchFile) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 2
|
||||
a.Mode = 0644
|
||||
a.Size = 9999999999999999
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f benchFile) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
||||
if f.conf.directIO {
|
||||
resp.Flags |= fuse.OpenDirectIO
|
||||
}
|
||||
// TODO configurable?
|
||||
resp.Flags |= fuse.OpenKeepCache
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (benchFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
||||
resp.Data = resp.Data[:cap(resp.Data)]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (benchFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
|
||||
resp.Size = len(req.Data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (benchFile) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func benchmark(b *testing.B, fn func(b *testing.B, mnt string), conf *benchConfig) {
|
||||
filesys := benchFS{
|
||||
conf: conf,
|
||||
}
|
||||
mnt, err := fstestutil.Mounted(filesys, nil,
|
||||
fuse.MaxReadahead(64*1024*1024),
|
||||
fuse.AsyncRead(),
|
||||
fuse.WritebackCache(),
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
fn(b, mnt.Dir)
|
||||
}
|
||||
|
||||
type zero struct{}
|
||||
|
||||
func (zero) Read(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
var Zero io.Reader = zero{}
|
||||
|
||||
func doWrites(size int64) func(b *testing.B, mnt string) {
|
||||
return func(b *testing.B, mnt string) {
|
||||
p := path.Join(mnt, "bench")
|
||||
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
b.Fatalf("create: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = io.CopyN(f, Zero, size)
|
||||
if err != nil {
|
||||
b.Fatalf("write: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWrite100(b *testing.B) {
|
||||
benchmark(b, doWrites(100), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkWrite10MB(b *testing.B) {
|
||||
benchmark(b, doWrites(10*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkWrite100MB(b *testing.B) {
|
||||
benchmark(b, doWrites(100*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkDirectWrite100(b *testing.B) {
|
||||
benchmark(b, doWrites(100), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDirectWrite10MB(b *testing.B) {
|
||||
benchmark(b, doWrites(10*1024*1024), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDirectWrite100MB(b *testing.B) {
|
||||
benchmark(b, doWrites(100*1024*1024), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func doWritesSync(size int64) func(b *testing.B, mnt string) {
|
||||
return func(b *testing.B, mnt string) {
|
||||
p := path.Join(mnt, "bench")
|
||||
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
b.Fatalf("create: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = io.CopyN(f, Zero, size)
|
||||
if err != nil {
|
||||
b.Fatalf("write: %v", err)
|
||||
}
|
||||
|
||||
if err := f.Sync(); err != nil {
|
||||
b.Fatalf("sync: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteSync100(b *testing.B) {
|
||||
benchmark(b, doWritesSync(100), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkWriteSync10MB(b *testing.B) {
|
||||
benchmark(b, doWritesSync(10*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkWriteSync100MB(b *testing.B) {
|
||||
benchmark(b, doWritesSync(100*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func doReads(size int64) func(b *testing.B, mnt string) {
|
||||
return func(b *testing.B, mnt string) {
|
||||
p := path.Join(mnt, "bench")
|
||||
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
b.Fatalf("close: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
n, err := io.CopyN(ioutil.Discard, f, size)
|
||||
if err != nil {
|
||||
b.Fatalf("read: %v", err)
|
||||
}
|
||||
if n != size {
|
||||
b.Errorf("unexpected size: %d != %d", n, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRead100(b *testing.B) {
|
||||
benchmark(b, doReads(100), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkRead10MB(b *testing.B) {
|
||||
benchmark(b, doReads(10*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkRead100MB(b *testing.B) {
|
||||
benchmark(b, doReads(100*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkDirectRead100(b *testing.B) {
|
||||
benchmark(b, doReads(100), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDirectRead10MB(b *testing.B) {
|
||||
benchmark(b, doReads(10*1024*1024), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDirectRead100MB(b *testing.B) {
|
||||
benchmark(b, doReads(100*1024*1024), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
5
vendor/bazil.org/fuse/fs/bench/doc.go
generated
vendored
Normal file
5
vendor/bazil.org/fuse/fs/bench/doc.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
// Package bench contains benchmarks.
|
||||
//
|
||||
// It is kept in a separate package to avoid conflicting with the
|
||||
// debug-heavy defaults for the actual tests.
|
||||
package bench
|
70
vendor/bazil.org/fuse/fs/fstestutil/checkdir.go
generated
vendored
Normal file
70
vendor/bazil.org/fuse/fs/fstestutil/checkdir.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
package fstestutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// FileInfoCheck is a function that validates an os.FileInfo according
|
||||
// to some criteria.
|
||||
type FileInfoCheck func(fi os.FileInfo) error
|
||||
|
||||
type checkDirError struct {
|
||||
missing map[string]struct{}
|
||||
extra map[string]os.FileMode
|
||||
}
|
||||
|
||||
func (e *checkDirError) Error() string {
|
||||
return fmt.Sprintf("wrong directory contents: missing %v, extra %v", e.missing, e.extra)
|
||||
}
|
||||
|
||||
// CheckDir checks the contents of the directory at path, making sure
|
||||
// every directory entry listed in want is present. If the check is
|
||||
// not nil, it must also pass.
|
||||
//
|
||||
// If want contains the impossible filename "", unexpected files are
|
||||
// checked with that. If the key is not in want, unexpected files are
|
||||
// an error.
|
||||
//
|
||||
// Missing entries, that are listed in want but not seen, are an
|
||||
// error.
|
||||
func CheckDir(path string, want map[string]FileInfoCheck) error {
|
||||
problems := &checkDirError{
|
||||
missing: make(map[string]struct{}, len(want)),
|
||||
extra: make(map[string]os.FileMode),
|
||||
}
|
||||
for k := range want {
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
problems.missing[k] = struct{}{}
|
||||
}
|
||||
|
||||
fis, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read directory: %v", err)
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
check, ok := want[fi.Name()]
|
||||
if !ok {
|
||||
check, ok = want[""]
|
||||
}
|
||||
if !ok {
|
||||
problems.extra[fi.Name()] = fi.Mode()
|
||||
continue
|
||||
}
|
||||
delete(problems.missing, fi.Name())
|
||||
if check != nil {
|
||||
if err := check(fi); err != nil {
|
||||
return fmt.Errorf("check failed: %v: %v", fi.Name(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(problems.missing) > 0 || len(problems.extra) > 0 {
|
||||
return problems
|
||||
}
|
||||
return nil
|
||||
}
|
65
vendor/bazil.org/fuse/fs/fstestutil/debug.go
generated
vendored
Normal file
65
vendor/bazil.org/fuse/fs/fstestutil/debug.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
package fstestutil
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"bazil.org/fuse"
|
||||
)
|
||||
|
||||
type flagDebug bool
|
||||
|
||||
var debug flagDebug
|
||||
|
||||
var _ = flag.Value(&debug)
|
||||
|
||||
func (f *flagDebug) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func nop(msg interface{}) {}
|
||||
|
||||
func (f *flagDebug) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*f = flagDebug(v)
|
||||
if v {
|
||||
fuse.Debug = logMsg
|
||||
} else {
|
||||
fuse.Debug = nop
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *flagDebug) String() string {
|
||||
return strconv.FormatBool(bool(*f))
|
||||
}
|
||||
|
||||
func logMsg(msg interface{}) {
|
||||
log.Printf("FUSE: %s\n", msg)
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Var(&debug, "fuse.debug", "log FUSE processing details")
|
||||
}
|
||||
|
||||
// DebugByDefault changes the default of the `-fuse.debug` flag to
|
||||
// true.
|
||||
//
|
||||
// This package registers a command line flag `-fuse.debug` and when
|
||||
// run with that flag (and activated inside the tests), logs FUSE
|
||||
// debug messages.
|
||||
//
|
||||
// This is disabled by default, as most callers probably won't care
|
||||
// about FUSE details. Use DebugByDefault for tests where you'd
|
||||
// normally be passing `-fuse.debug` all the time anyway.
|
||||
//
|
||||
// Call from an init function.
|
||||
func DebugByDefault() {
|
||||
f := flag.Lookup("fuse.debug")
|
||||
f.DefValue = "true"
|
||||
f.Value.Set(f.DefValue)
|
||||
}
|
1
vendor/bazil.org/fuse/fs/fstestutil/doc.go
generated
vendored
Normal file
1
vendor/bazil.org/fuse/fs/fstestutil/doc.go
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
package fstestutil // import "bazil.org/fuse/fs/fstestutil"
|
141
vendor/bazil.org/fuse/fs/fstestutil/mounted.go
generated
vendored
Normal file
141
vendor/bazil.org/fuse/fs/fstestutil/mounted.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
package fstestutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
)
|
||||
|
||||
// Mount contains information about the mount for the test to use.
|
||||
type Mount struct {
|
||||
// Dir is the temporary directory where the filesystem is mounted.
|
||||
Dir string
|
||||
|
||||
Conn *fuse.Conn
|
||||
Server *fs.Server
|
||||
|
||||
// Error will receive the return value of Serve.
|
||||
Error <-chan error
|
||||
|
||||
done <-chan struct{}
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Close unmounts the filesystem and waits for fs.Serve to return. Any
|
||||
// returned error will be stored in Err. It is safe to call Close
|
||||
// multiple times.
|
||||
func (mnt *Mount) Close() {
|
||||
if mnt.closed {
|
||||
return
|
||||
}
|
||||
mnt.closed = true
|
||||
for tries := 0; tries < 1000; tries++ {
|
||||
err := fuse.Unmount(mnt.Dir)
|
||||
if err != nil {
|
||||
// TODO do more than log?
|
||||
log.Printf("unmount error: %v", err)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
<-mnt.done
|
||||
mnt.Conn.Close()
|
||||
os.Remove(mnt.Dir)
|
||||
}
|
||||
|
||||
// MountedFunc mounts a filesystem at a temporary directory. The
|
||||
// filesystem used is constructed by calling a function, to allow
|
||||
// storing fuse.Conn and fs.Server in the FS.
|
||||
//
|
||||
// It also waits until the filesystem is known to be visible (OS X
|
||||
// workaround).
|
||||
//
|
||||
// After successful return, caller must clean up by calling Close.
|
||||
func MountedFunc(fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
|
||||
dir, err := ioutil.TempDir("", "fusetest")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := fuse.Mount(dir, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
server := fs.New(c, conf)
|
||||
done := make(chan struct{})
|
||||
serveErr := make(chan error, 1)
|
||||
mnt := &Mount{
|
||||
Dir: dir,
|
||||
Conn: c,
|
||||
Server: server,
|
||||
Error: serveErr,
|
||||
done: done,
|
||||
}
|
||||
filesys := fn(mnt)
|
||||
go func() {
|
||||
defer close(done)
|
||||
serveErr <- server.Serve(filesys)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-mnt.Conn.Ready:
|
||||
if err := mnt.Conn.MountError; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mnt, nil
|
||||
case err = <-mnt.Error:
|
||||
// Serve quit early
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New("Serve exited early")
|
||||
}
|
||||
}
|
||||
|
||||
// Mounted mounts the fuse.Server at a temporary directory.
|
||||
//
|
||||
// It also waits until the filesystem is known to be visible (OS X
|
||||
// workaround).
|
||||
//
|
||||
// After successful return, caller must clean up by calling Close.
|
||||
func Mounted(filesys fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
|
||||
fn := func(*Mount) fs.FS { return filesys }
|
||||
return MountedFunc(fn, conf, options...)
|
||||
}
|
||||
|
||||
// MountedFuncT mounts a filesystem at a temporary directory,
|
||||
// directing it's debug log to the testing logger.
|
||||
//
|
||||
// See MountedFunc for usage.
|
||||
//
|
||||
// The debug log is not enabled by default. Use `-fuse.debug` or call
|
||||
// DebugByDefault to enable.
|
||||
func MountedFuncT(t testing.TB, fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
|
||||
if conf == nil {
|
||||
conf = &fs.Config{}
|
||||
}
|
||||
if debug && conf.Debug == nil {
|
||||
conf.Debug = func(msg interface{}) {
|
||||
t.Logf("FUSE: %s", msg)
|
||||
}
|
||||
}
|
||||
return MountedFunc(fn, conf, options...)
|
||||
}
|
||||
|
||||
// MountedT mounts the filesystem at a temporary directory,
|
||||
// directing it's debug log to the testing logger.
|
||||
//
|
||||
// See Mounted for usage.
|
||||
//
|
||||
// The debug log is not enabled by default. Use `-fuse.debug` or call
|
||||
// DebugByDefault to enable.
|
||||
func MountedT(t testing.TB, filesys fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
|
||||
fn := func(*Mount) fs.FS { return filesys }
|
||||
return MountedFuncT(t, fn, conf, options...)
|
||||
}
|
26
vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go
generated
vendored
Normal file
26
vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package fstestutil
|
||||
|
||||
// MountInfo describes a mounted file system.
|
||||
type MountInfo struct {
|
||||
FSName string
|
||||
Type string
|
||||
}
|
||||
|
||||
// GetMountInfo finds information about the mount at mnt. It is
|
||||
// intended for use by tests only, and only fetches information
|
||||
// relevant to the current tests.
|
||||
func GetMountInfo(mnt string) (*MountInfo, error) {
|
||||
return getMountInfo(mnt)
|
||||
}
|
||||
|
||||
// cstr converts a nil-terminated C string into a Go string
|
||||
func cstr(ca []int8) string {
|
||||
s := make([]byte, 0, len(ca))
|
||||
for _, c := range ca {
|
||||
if c == 0x00 {
|
||||
break
|
||||
}
|
||||
s = append(s, byte(c))
|
||||
}
|
||||
return string(s)
|
||||
}
|
29
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go
generated
vendored
Normal file
29
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
package fstestutil
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var re = regexp.MustCompile(`\\(.)`)
|
||||
|
||||
// unescape removes backslash-escaping. The escaped characters are not
|
||||
// mapped in any way; that is, unescape(`\n` ) == `n`.
|
||||
func unescape(s string) string {
|
||||
return re.ReplaceAllString(s, `$1`)
|
||||
}
|
||||
|
||||
func getMountInfo(mnt string) (*MountInfo, error) {
|
||||
var st syscall.Statfs_t
|
||||
err := syscall.Statfs(mnt, &st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i := &MountInfo{
|
||||
// osx getmntent(3) fails to un-escape the data, so we do it..
|
||||
// this might lead to double-unescaping in the future. fun.
|
||||
// TestMountOptionFSNameEvilBackslashDouble checks for that.
|
||||
FSName: unescape(cstr(st.Mntfromname[:])),
|
||||
}
|
||||
return i, nil
|
||||
}
|
7
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go
generated
vendored
Normal file
7
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package fstestutil
|
||||
|
||||
import "errors"
|
||||
|
||||
func getMountInfo(mnt string) (*MountInfo, error) {
|
||||
return nil, errors.New("FreeBSD has no useful mount information")
|
||||
}
|
51
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go
generated
vendored
Normal file
51
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
package fstestutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Linux /proc/mounts shows current mounts.
|
||||
// Same format as /etc/fstab. Quoting getmntent(3):
|
||||
//
|
||||
// Since fields in the mtab and fstab files are separated by whitespace,
|
||||
// octal escapes are used to represent the four characters space (\040),
|
||||
// tab (\011), newline (\012) and backslash (\134) in those files when
|
||||
// they occur in one of the four strings in a mntent structure.
|
||||
//
|
||||
// http://linux.die.net/man/3/getmntent
|
||||
|
||||
var fstabUnescape = strings.NewReplacer(
|
||||
`\040`, "\040",
|
||||
`\011`, "\011",
|
||||
`\012`, "\012",
|
||||
`\134`, "\134",
|
||||
)
|
||||
|
||||
var errNotFound = errors.New("mount not found")
|
||||
|
||||
func getMountInfo(mnt string) (*MountInfo, error) {
|
||||
data, err := ioutil.ReadFile("/proc/mounts")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 3 {
|
||||
continue
|
||||
}
|
||||
// Fields are: fsname dir type opts freq passno
|
||||
fsname := fstabUnescape.Replace(fields[0])
|
||||
dir := fstabUnescape.Replace(fields[1])
|
||||
fstype := fstabUnescape.Replace(fields[2])
|
||||
if mnt == dir {
|
||||
info := &MountInfo{
|
||||
FSName: fsname,
|
||||
Type: fstype,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
return nil, errNotFound
|
||||
}
|
28
vendor/bazil.org/fuse/fs/fstestutil/record/buffer.go
generated
vendored
Normal file
28
vendor/bazil.org/fuse/fs/fstestutil/record/buffer.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package record
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffer is like bytes.Buffer but safe to access from multiple
|
||||
// goroutines.
|
||||
type Buffer struct {
|
||||
mu sync.Mutex
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
var _ = io.Writer(&Buffer{})
|
||||
|
||||
func (b *Buffer) Write(p []byte) (n int, err error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.buf.Write(p)
|
||||
}
|
||||
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.buf.Bytes()
|
||||
}
|
409
vendor/bazil.org/fuse/fs/fstestutil/record/record.go
generated
vendored
Normal file
409
vendor/bazil.org/fuse/fs/fstestutil/record/record.go
generated
vendored
Normal file
@ -0,0 +1,409 @@
|
||||
package record // import "bazil.org/fuse/fs/fstestutil/record"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Writes gathers data from FUSE Write calls.
|
||||
type Writes struct {
|
||||
buf Buffer
|
||||
}
|
||||
|
||||
var _ = fs.HandleWriter(&Writes{})
|
||||
|
||||
func (w *Writes) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
|
||||
n, err := w.buf.Write(req.Data)
|
||||
resp.Size = n
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writes) RecordedWriteData() []byte {
|
||||
return w.buf.Bytes()
|
||||
}
|
||||
|
||||
// Counter records number of times a thing has occurred.
|
||||
type Counter struct {
|
||||
count uint32
|
||||
}
|
||||
|
||||
func (r *Counter) Inc() {
|
||||
atomic.AddUint32(&r.count, 1)
|
||||
}
|
||||
|
||||
func (r *Counter) Count() uint32 {
|
||||
return atomic.LoadUint32(&r.count)
|
||||
}
|
||||
|
||||
// MarkRecorder records whether a thing has occurred.
|
||||
type MarkRecorder struct {
|
||||
count Counter
|
||||
}
|
||||
|
||||
func (r *MarkRecorder) Mark() {
|
||||
r.count.Inc()
|
||||
}
|
||||
|
||||
func (r *MarkRecorder) Recorded() bool {
|
||||
return r.count.Count() > 0
|
||||
}
|
||||
|
||||
// Flushes notes whether a FUSE Flush call has been seen.
|
||||
type Flushes struct {
|
||||
rec MarkRecorder
|
||||
}
|
||||
|
||||
var _ = fs.HandleFlusher(&Flushes{})
|
||||
|
||||
func (r *Flushes) Flush(ctx context.Context, req *fuse.FlushRequest) error {
|
||||
r.rec.Mark()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Flushes) RecordedFlush() bool {
|
||||
return r.rec.Recorded()
|
||||
}
|
||||
|
||||
type Recorder struct {
|
||||
mu sync.Mutex
|
||||
val interface{}
|
||||
}
|
||||
|
||||
// Record that we've seen value. A nil value is indistinguishable from
|
||||
// no value recorded.
|
||||
func (r *Recorder) Record(value interface{}) {
|
||||
r.mu.Lock()
|
||||
r.val = value
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func (r *Recorder) Recorded() interface{} {
|
||||
r.mu.Lock()
|
||||
val := r.val
|
||||
r.mu.Unlock()
|
||||
return val
|
||||
}
|
||||
|
||||
type RequestRecorder struct {
|
||||
rec Recorder
|
||||
}
|
||||
|
||||
// Record a fuse.Request, after zeroing header fields that are hard to
|
||||
// reproduce.
|
||||
//
|
||||
// Make sure to record a copy, not the original request.
|
||||
func (r *RequestRecorder) RecordRequest(req fuse.Request) {
|
||||
hdr := req.Hdr()
|
||||
*hdr = fuse.Header{}
|
||||
r.rec.Record(req)
|
||||
}
|
||||
|
||||
func (r *RequestRecorder) Recorded() fuse.Request {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return nil
|
||||
}
|
||||
return val.(fuse.Request)
|
||||
}
|
||||
|
||||
// Setattrs records a Setattr request and its fields.
|
||||
type Setattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeSetattrer(&Setattrs{})
|
||||
|
||||
func (r *Setattrs) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Setattrs) RecordedSetattr() fuse.SetattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.SetattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.SetattrRequest))
|
||||
}
|
||||
|
||||
// Fsyncs records an Fsync request and its fields.
|
||||
type Fsyncs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeFsyncer(&Fsyncs{})
|
||||
|
||||
func (r *Fsyncs) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Fsyncs) RecordedFsync() fuse.FsyncRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.FsyncRequest{}
|
||||
}
|
||||
return *(val.(*fuse.FsyncRequest))
|
||||
}
|
||||
|
||||
// Mkdirs records a Mkdir request and its fields.
|
||||
type Mkdirs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeMkdirer(&Mkdirs{})
|
||||
|
||||
// Mkdir records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Mkdirs) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedMkdir returns information about the Mkdir request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Mkdirs) RecordedMkdir() fuse.MkdirRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.MkdirRequest{}
|
||||
}
|
||||
return *(val.(*fuse.MkdirRequest))
|
||||
}
|
||||
|
||||
// Symlinks records a Symlink request and its fields.
|
||||
type Symlinks struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeSymlinker(&Symlinks{})
|
||||
|
||||
// Symlink records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Symlinks) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedSymlink returns information about the Symlink request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Symlinks) RecordedSymlink() fuse.SymlinkRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.SymlinkRequest{}
|
||||
}
|
||||
return *(val.(*fuse.SymlinkRequest))
|
||||
}
|
||||
|
||||
// Links records a Link request and its fields.
|
||||
type Links struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeLinker(&Links{})
|
||||
|
||||
// Link records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Links) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedLink returns information about the Link request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Links) RecordedLink() fuse.LinkRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.LinkRequest{}
|
||||
}
|
||||
return *(val.(*fuse.LinkRequest))
|
||||
}
|
||||
|
||||
// Mknods records a Mknod request and its fields.
|
||||
type Mknods struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeMknoder(&Mknods{})
|
||||
|
||||
// Mknod records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Mknods) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedMknod returns information about the Mknod request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Mknods) RecordedMknod() fuse.MknodRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.MknodRequest{}
|
||||
}
|
||||
return *(val.(*fuse.MknodRequest))
|
||||
}
|
||||
|
||||
// Opens records a Open request and its fields.
|
||||
type Opens struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeOpener(&Opens{})
|
||||
|
||||
// Open records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Opens) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedOpen returns information about the Open request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Opens) RecordedOpen() fuse.OpenRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.OpenRequest{}
|
||||
}
|
||||
return *(val.(*fuse.OpenRequest))
|
||||
}
|
||||
|
||||
// Getxattrs records a Getxattr request and its fields.
|
||||
type Getxattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeGetxattrer(&Getxattrs{})
|
||||
|
||||
// Getxattr records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Getxattrs) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return fuse.ErrNoXattr
|
||||
}
|
||||
|
||||
// RecordedGetxattr returns information about the Getxattr request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Getxattrs) RecordedGetxattr() fuse.GetxattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.GetxattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.GetxattrRequest))
|
||||
}
|
||||
|
||||
// Listxattrs records a Listxattr request and its fields.
|
||||
type Listxattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeListxattrer(&Listxattrs{})
|
||||
|
||||
// Listxattr records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Listxattrs) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return fuse.ErrNoXattr
|
||||
}
|
||||
|
||||
// RecordedListxattr returns information about the Listxattr request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Listxattrs) RecordedListxattr() fuse.ListxattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.ListxattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.ListxattrRequest))
|
||||
}
|
||||
|
||||
// Setxattrs records a Setxattr request and its fields.
|
||||
type Setxattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeSetxattrer(&Setxattrs{})
|
||||
|
||||
// Setxattr records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Setxattrs) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
|
||||
tmp := *req
|
||||
// The byte slice points to memory that will be reused, so make a
|
||||
// deep copy.
|
||||
tmp.Xattr = append([]byte(nil), req.Xattr...)
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordedSetxattr returns information about the Setxattr request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Setxattrs) RecordedSetxattr() fuse.SetxattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.SetxattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.SetxattrRequest))
|
||||
}
|
||||
|
||||
// Removexattrs records a Removexattr request and its fields.
|
||||
type Removexattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeRemovexattrer(&Removexattrs{})
|
||||
|
||||
// Removexattr records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Removexattrs) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordedRemovexattr returns information about the Removexattr request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Removexattrs) RecordedRemovexattr() fuse.RemovexattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.RemovexattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.RemovexattrRequest))
|
||||
}
|
||||
|
||||
// Creates records a Create request and its fields.
|
||||
type Creates struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeCreater(&Creates{})
|
||||
|
||||
// Create records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Creates) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedCreate returns information about the Create request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Creates) RecordedCreate() fuse.CreateRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.CreateRequest{}
|
||||
}
|
||||
return *(val.(*fuse.CreateRequest))
|
||||
}
|
55
vendor/bazil.org/fuse/fs/fstestutil/record/wait.go
generated
vendored
Normal file
55
vendor/bazil.org/fuse/fs/fstestutil/record/wait.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package record
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type nothing struct{}
|
||||
|
||||
// ReleaseWaiter notes whether a FUSE Release call has been seen.
|
||||
//
|
||||
// Releases are not guaranteed to happen synchronously with any client
|
||||
// call, so they must be waited for.
|
||||
type ReleaseWaiter struct {
|
||||
once sync.Once
|
||||
seen chan nothing
|
||||
}
|
||||
|
||||
var _ = fs.HandleReleaser(&ReleaseWaiter{})
|
||||
|
||||
func (r *ReleaseWaiter) init() {
|
||||
r.once.Do(func() {
|
||||
r.seen = make(chan nothing, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ReleaseWaiter) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
|
||||
r.init()
|
||||
close(r.seen)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForRelease waits for Release to be called.
|
||||
//
|
||||
// With zero duration, wait forever. Otherwise, timeout early
|
||||
// in a more controller way than `-test.timeout`.
|
||||
//
|
||||
// Returns whether a Release was seen. Always true if dur==0.
|
||||
func (r *ReleaseWaiter) WaitForRelease(dur time.Duration) bool {
|
||||
r.init()
|
||||
var timeout <-chan time.Time
|
||||
if dur > 0 {
|
||||
timeout = time.After(dur)
|
||||
}
|
||||
select {
|
||||
case <-r.seen:
|
||||
return true
|
||||
case <-timeout:
|
||||
return false
|
||||
}
|
||||
}
|
55
vendor/bazil.org/fuse/fs/fstestutil/testfs.go
generated
vendored
Normal file
55
vendor/bazil.org/fuse/fs/fstestutil/testfs.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package fstestutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// SimpleFS is a trivial FS that just implements the Root method.
|
||||
type SimpleFS struct {
|
||||
Node fs.Node
|
||||
}
|
||||
|
||||
var _ = fs.FS(SimpleFS{})
|
||||
|
||||
func (f SimpleFS) Root() (fs.Node, error) {
|
||||
return f.Node, nil
|
||||
}
|
||||
|
||||
// File can be embedded in a struct to make it look like a file.
|
||||
type File struct{}
|
||||
|
||||
func (f File) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = 0666
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dir can be embedded in a struct to make it look like a directory.
|
||||
type Dir struct{}
|
||||
|
||||
func (f Dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = os.ModeDir | 0777
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChildMap is a directory with child nodes looked up from a map.
|
||||
type ChildMap map[string]fs.Node
|
||||
|
||||
var _ = fs.Node(&ChildMap{})
|
||||
var _ = fs.NodeStringLookuper(&ChildMap{})
|
||||
|
||||
func (f *ChildMap) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = os.ModeDir | 0777
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ChildMap) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
child, ok := (*f)[name]
|
||||
if !ok {
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
return child, nil
|
||||
}
|
67
vendor/bazil.org/fuse/fs/helpers_test.go
generated
vendored
Normal file
67
vendor/bazil.org/fuse/fs/helpers_test.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
package fs_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var childHelpers = map[string]func(){}
|
||||
|
||||
type childProcess struct {
|
||||
name string
|
||||
fn func()
|
||||
}
|
||||
|
||||
var _ flag.Value = (*childProcess)(nil)
|
||||
|
||||
func (c *childProcess) String() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c *childProcess) Set(s string) error {
|
||||
fn, ok := childHelpers[s]
|
||||
if !ok {
|
||||
return errors.New("helper not found")
|
||||
}
|
||||
c.name = s
|
||||
c.fn = fn
|
||||
return nil
|
||||
}
|
||||
|
||||
var childMode childProcess
|
||||
|
||||
func init() {
|
||||
flag.Var(&childMode, "fuse.internal.child", "internal use only")
|
||||
}
|
||||
|
||||
// childCmd prepares a test function to be run in a subprocess, with
|
||||
// childMode set to true. Caller must still call Run or Start.
|
||||
//
|
||||
// Re-using the test executable as the subprocess is useful because
|
||||
// now test executables can e.g. be cross-compiled, transferred
|
||||
// between hosts, and run in settings where the whole Go development
|
||||
// environment is not installed.
|
||||
func childCmd(childName string) (*exec.Cmd, error) {
|
||||
// caller may set cwd, so we can't rely on relative paths
|
||||
executable, err := filepath.Abs(os.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd := exec.Command(executable, "-fuse.internal.child="+childName)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
if childMode.fn != nil {
|
||||
childMode.fn()
|
||||
os.Exit(0)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
2
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
2
vendor/bazil.org/fuse/fs/serve.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
// FUSE service loop, for servers that wish to use it.
|
||||
|
||||
package fs
|
||||
package fs // import "bazil.org/fuse/fs"
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
30
vendor/bazil.org/fuse/fs/serve_darwin_test.go
generated
vendored
Normal file
30
vendor/bazil.org/fuse/fs/serve_darwin_test.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package fs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type exchangeData struct {
|
||||
fstestutil.File
|
||||
// this struct cannot be zero size or multiple instances may look identical
|
||||
_ int
|
||||
}
|
||||
|
||||
func TestExchangeDataNotSupported(t *testing.T) {
|
||||
t.Parallel()
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{&fstestutil.ChildMap{
|
||||
"one": &exchangeData{},
|
||||
"two": &exchangeData{},
|
||||
}}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
if err := unix.Exchangedata(mnt.Dir+"/one", mnt.Dir+"/two", 0); err != unix.ENOTSUP {
|
||||
t.Fatalf("expected ENOTSUP from exchangedata: %v", err)
|
||||
}
|
||||
}
|
2843
vendor/bazil.org/fuse/fs/serve_test.go
generated
vendored
Normal file
2843
vendor/bazil.org/fuse/fs/serve_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2
vendor/bazil.org/fuse/fuse.go
generated
vendored
2
vendor/bazil.org/fuse/fuse.go
generated
vendored
@ -98,7 +98,7 @@
|
||||
// Behavior and metadata of the mounted file system can be changed by
|
||||
// passing MountOption values to Mount.
|
||||
//
|
||||
package fuse
|
||||
package fuse // import "bazil.org/fuse"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
63
vendor/bazil.org/fuse/fuse_kernel_test.go
generated
vendored
Normal file
63
vendor/bazil.org/fuse/fuse_kernel_test.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
package fuse_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
)
|
||||
|
||||
func TestOpenFlagsAccmodeMaskReadWrite(t *testing.T) {
|
||||
var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC)
|
||||
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenReadWrite; g != e {
|
||||
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
|
||||
}
|
||||
if f.IsReadOnly() {
|
||||
t.Fatalf("IsReadOnly is wrong: %v", f)
|
||||
}
|
||||
if f.IsWriteOnly() {
|
||||
t.Fatalf("IsWriteOnly is wrong: %v", f)
|
||||
}
|
||||
if !f.IsReadWrite() {
|
||||
t.Fatalf("IsReadWrite is wrong: %v", f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenFlagsAccmodeMaskReadOnly(t *testing.T) {
|
||||
var f = fuse.OpenFlags(os.O_RDONLY | os.O_SYNC)
|
||||
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenReadOnly; g != e {
|
||||
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
|
||||
}
|
||||
if !f.IsReadOnly() {
|
||||
t.Fatalf("IsReadOnly is wrong: %v", f)
|
||||
}
|
||||
if f.IsWriteOnly() {
|
||||
t.Fatalf("IsWriteOnly is wrong: %v", f)
|
||||
}
|
||||
if f.IsReadWrite() {
|
||||
t.Fatalf("IsReadWrite is wrong: %v", f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenFlagsAccmodeMaskWriteOnly(t *testing.T) {
|
||||
var f = fuse.OpenFlags(os.O_WRONLY | os.O_SYNC)
|
||||
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenWriteOnly; g != e {
|
||||
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
|
||||
}
|
||||
if f.IsReadOnly() {
|
||||
t.Fatalf("IsReadOnly is wrong: %v", f)
|
||||
}
|
||||
if !f.IsWriteOnly() {
|
||||
t.Fatalf("IsWriteOnly is wrong: %v", f)
|
||||
}
|
||||
if f.IsReadWrite() {
|
||||
t.Fatalf("IsReadWrite is wrong: %v", f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenFlagsString(t *testing.T) {
|
||||
var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC | os.O_APPEND)
|
||||
if g, e := f.String(), "OpenReadWrite+OpenAppend+OpenSync"; g != e {
|
||||
t.Fatalf("OpenFlags.String: %q != %q", g, e)
|
||||
}
|
||||
}
|
2
vendor/bazil.org/fuse/fuseutil/fuseutil.go
generated
vendored
2
vendor/bazil.org/fuse/fuseutil/fuseutil.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
package fuseutil
|
||||
package fuseutil // import "bazil.org/fuse/fuseutil"
|
||||
|
||||
import (
|
||||
"bazil.org/fuse"
|
||||
|
64
vendor/bazil.org/fuse/options_daemon_timeout_test.go
generated
vendored
Normal file
64
vendor/bazil.org/fuse/options_daemon_timeout_test.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Test for adjustable timeout between a FUSE request and the daemon's response.
|
||||
//
|
||||
// +build darwin freebsd
|
||||
|
||||
package fuse_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type slowCreaterDir struct {
|
||||
fstestutil.Dir
|
||||
}
|
||||
|
||||
var _ fs.NodeCreater = slowCreaterDir{}
|
||||
|
||||
func (c slowCreaterDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
time.Sleep(10 * time.Second)
|
||||
// pick a really distinct error, to identify it later
|
||||
return nil, nil, fuse.Errno(syscall.ENAMETOOLONG)
|
||||
}
|
||||
|
||||
func TestMountOptionDaemonTimeout(t *testing.T) {
|
||||
if runtime.GOOS != "darwin" && runtime.GOOS != "freebsd" {
|
||||
return
|
||||
}
|
||||
if testing.Short() {
|
||||
t.Skip("skipping time-based test in short mode")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
mnt, err := fstestutil.MountedT(t,
|
||||
fstestutil.SimpleFS{slowCreaterDir{}},
|
||||
nil,
|
||||
fuse.DaemonTimeout("2"),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
// This should fail by the kernel timing out the request.
|
||||
f, err := os.Create(mnt.Dir + "/child")
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
perr, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
t.Fatalf("expected PathError, got %T: %v", err, err)
|
||||
}
|
||||
if perr.Err == syscall.ENAMETOOLONG {
|
||||
t.Fatalf("expected other than ENAMETOOLONG, got %T: %v", err, err)
|
||||
}
|
||||
}
|
10
vendor/bazil.org/fuse/options_helper_test.go
generated
vendored
Normal file
10
vendor/bazil.org/fuse/options_helper_test.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package fuse
|
||||
|
||||
// for TestMountOptionCommaError
|
||||
func ForTestSetMountOption(k, v string) MountOption {
|
||||
fn := func(conf *mountConfig) error {
|
||||
conf.options[k] = v
|
||||
return nil
|
||||
}
|
||||
return fn
|
||||
}
|
31
vendor/bazil.org/fuse/options_nocomma_test.go
generated
vendored
Normal file
31
vendor/bazil.org/fuse/options_nocomma_test.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// This file contains tests for platforms that have no escape
|
||||
// mechanism for including commas in mount options.
|
||||
//
|
||||
// +build darwin
|
||||
|
||||
package fuse_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
)
|
||||
|
||||
func TestMountOptionCommaError(t *testing.T) {
|
||||
t.Parallel()
|
||||
// this test is not tied to any specific option, it just needs
|
||||
// some string content
|
||||
var evil = "FuseTest,Marker"
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.ForTestSetMountOption("fusetest", evil),
|
||||
)
|
||||
if err == nil {
|
||||
mnt.Close()
|
||||
t.Fatal("expected an error about commas")
|
||||
}
|
||||
if g, e := err.Error(), `mount options cannot contain commas on `+runtime.GOOS+`: "fusetest"="FuseTest,Marker"`; g != e {
|
||||
t.Fatalf("wrong error: %q != %q", g, e)
|
||||
}
|
||||
}
|
231
vendor/bazil.org/fuse/options_test.go
generated
vendored
Normal file
231
vendor/bazil.org/fuse/options_test.go
generated
vendored
Normal file
@ -0,0 +1,231 @@
|
||||
package fuse_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstestutil.DebugByDefault()
|
||||
}
|
||||
|
||||
func TestMountOptionFSName(t *testing.T) {
|
||||
if runtime.GOOS == "freebsd" {
|
||||
t.Skip("FreeBSD does not support FSName")
|
||||
}
|
||||
t.Parallel()
|
||||
const name = "FuseTestMarker"
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.FSName(name),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
info, err := fstestutil.GetMountInfo(mnt.Dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := info.FSName, name; g != e {
|
||||
t.Errorf("wrong FSName: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testMountOptionFSNameEvil(t *testing.T, evil string) {
|
||||
if runtime.GOOS == "freebsd" {
|
||||
t.Skip("FreeBSD does not support FSName")
|
||||
}
|
||||
t.Parallel()
|
||||
var name = "FuseTest" + evil + "Marker"
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.FSName(name),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
info, err := fstestutil.GetMountInfo(mnt.Dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := info.FSName, name; g != e {
|
||||
t.Errorf("wrong FSName: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilComma(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
// see TestMountOptionCommaError for a test that enforces we
|
||||
// at least give a nice error, instead of corrupting the mount
|
||||
// options
|
||||
t.Skip("TODO: OS X gets this wrong, commas in mount options cannot be escaped at all")
|
||||
}
|
||||
testMountOptionFSNameEvil(t, ",")
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilSpace(t *testing.T) {
|
||||
testMountOptionFSNameEvil(t, " ")
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilTab(t *testing.T) {
|
||||
testMountOptionFSNameEvil(t, "\t")
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilNewline(t *testing.T) {
|
||||
testMountOptionFSNameEvil(t, "\n")
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilBackslash(t *testing.T) {
|
||||
testMountOptionFSNameEvil(t, `\`)
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilBackslashDouble(t *testing.T) {
|
||||
// catch double-unescaping, if it were to happen
|
||||
testMountOptionFSNameEvil(t, `\\`)
|
||||
}
|
||||
|
||||
func TestMountOptionSubtype(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("OS X does not support Subtype")
|
||||
}
|
||||
if runtime.GOOS == "freebsd" {
|
||||
t.Skip("FreeBSD does not support Subtype")
|
||||
}
|
||||
t.Parallel()
|
||||
const name = "FuseTestMarker"
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.Subtype(name),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
info, err := fstestutil.GetMountInfo(mnt.Dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := info.Type, "fuse."+name; g != e {
|
||||
t.Errorf("wrong Subtype: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO test LocalVolume
|
||||
|
||||
// TODO test AllowOther; hard because needs system-level authorization
|
||||
|
||||
func TestMountOptionAllowOtherThenAllowRoot(t *testing.T) {
|
||||
t.Parallel()
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.AllowOther(),
|
||||
fuse.AllowRoot(),
|
||||
)
|
||||
if err == nil {
|
||||
mnt.Close()
|
||||
}
|
||||
if g, e := err, fuse.ErrCannotCombineAllowOtherAndAllowRoot; g != e {
|
||||
t.Fatalf("wrong error: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO test AllowRoot; hard because needs system-level authorization
|
||||
|
||||
func TestMountOptionAllowRootThenAllowOther(t *testing.T) {
|
||||
t.Parallel()
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.AllowRoot(),
|
||||
fuse.AllowOther(),
|
||||
)
|
||||
if err == nil {
|
||||
mnt.Close()
|
||||
}
|
||||
if g, e := err, fuse.ErrCannotCombineAllowOtherAndAllowRoot; g != e {
|
||||
t.Fatalf("wrong error: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
type unwritableFile struct{}
|
||||
|
||||
func (f unwritableFile) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = 0000
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestMountOptionDefaultPermissions(t *testing.T) {
|
||||
if runtime.GOOS == "freebsd" {
|
||||
t.Skip("FreeBSD does not support DefaultPermissions")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
mnt, err := fstestutil.MountedT(t,
|
||||
fstestutil.SimpleFS{
|
||||
&fstestutil.ChildMap{"child": unwritableFile{}},
|
||||
},
|
||||
nil,
|
||||
fuse.DefaultPermissions(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
// This will be prevented by kernel-level access checking when
|
||||
// DefaultPermissions is used.
|
||||
f, err := os.OpenFile(mnt.Dir+"/child", os.O_WRONLY, 0000)
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if !os.IsPermission(err) {
|
||||
t.Fatalf("expected a permission error, got %T: %v", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
type createrDir struct {
|
||||
fstestutil.Dir
|
||||
}
|
||||
|
||||
var _ fs.NodeCreater = createrDir{}
|
||||
|
||||
func (createrDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
// pick a really distinct error, to identify it later
|
||||
return nil, nil, fuse.Errno(syscall.ENAMETOOLONG)
|
||||
}
|
||||
|
||||
func TestMountOptionReadOnly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
mnt, err := fstestutil.MountedT(t,
|
||||
fstestutil.SimpleFS{createrDir{}},
|
||||
nil,
|
||||
fuse.ReadOnly(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
// This will be prevented by kernel-level access checking when
|
||||
// ReadOnly is used.
|
||||
f, err := os.Create(mnt.Dir + "/child")
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
perr, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
t.Fatalf("expected PathError, got %T: %v", err, err)
|
||||
}
|
||||
if perr.Err != syscall.EROFS {
|
||||
t.Fatalf("expected EROFS, got %T: %v", err, err)
|
||||
}
|
||||
}
|
13
vendor/bazil.org/fuse/syscallx/doc.go
generated
vendored
Normal file
13
vendor/bazil.org/fuse/syscallx/doc.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Package syscallx provides wrappers that make syscalls on various
|
||||
// platforms more interoperable.
|
||||
//
|
||||
// The API intentionally omits the OS X-specific position and option
|
||||
// arguments for extended attribute calls.
|
||||
//
|
||||
// Not having position means it might not be useful for accessing the
|
||||
// resource fork. If that's needed by code inside fuse, a function
|
||||
// with a different name may be added on the side.
|
||||
//
|
||||
// Options can be implemented with separate wrappers, in the style of
|
||||
// Linux getxattr/lgetxattr/fgetxattr.
|
||||
package syscallx // import "bazil.org/fuse/syscallx"
|
34
vendor/bazil.org/fuse/syscallx/generate
generated
vendored
Executable file
34
vendor/bazil.org/fuse/syscallx/generate
generated
vendored
Executable file
@ -0,0 +1,34 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
mksys="$(go env GOROOT)/src/pkg/syscall/mksyscall.pl"
|
||||
|
||||
fix() {
|
||||
sed 's,^package syscall$,&x\nimport "syscall",' \
|
||||
| gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \
|
||||
| gofmt -r='Syscall6 -> syscall.Syscall6' \
|
||||
| gofmt -r='Syscall -> syscall.Syscall' \
|
||||
| gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \
|
||||
| gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \
|
||||
| gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \
|
||||
| gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \
|
||||
| gofmt -r='SYS_MSYNC -> syscall.SYS_MSYNC'
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
$mksys xattr_darwin.go \
|
||||
| fix \
|
||||
>xattr_darwin_amd64.go
|
||||
|
||||
$mksys -l32 xattr_darwin.go \
|
||||
| fix \
|
||||
>xattr_darwin_386.go
|
||||
|
||||
$mksys msync.go \
|
||||
| fix \
|
||||
>msync_amd64.go
|
||||
|
||||
$mksys -l32 msync.go \
|
||||
| fix \
|
||||
>msync_386.go
|
9
vendor/bazil.org/fuse/syscallx/msync.go
generated
vendored
Normal file
9
vendor/bazil.org/fuse/syscallx/msync.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
package syscallx
|
||||
|
||||
/* This is the source file for msync_*.go, to regenerate run
|
||||
|
||||
./generate
|
||||
|
||||
*/
|
||||
|
||||
//sys Msync(b []byte, flags int) (err error)
|
24
vendor/bazil.org/fuse/syscallx/msync_386.go
generated
vendored
Normal file
24
vendor/bazil.org/fuse/syscallx/msync_386.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// mksyscall.pl -l32 msync.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Msync(b []byte, flags int) (err error) {
|
||||
var _p0 unsafe.Pointer
|
||||
if len(b) > 0 {
|
||||
_p0 = unsafe.Pointer(&b[0])
|
||||
} else {
|
||||
_p0 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
24
vendor/bazil.org/fuse/syscallx/msync_amd64.go
generated
vendored
Normal file
24
vendor/bazil.org/fuse/syscallx/msync_amd64.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// mksyscall.pl msync.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Msync(b []byte, flags int) (err error) {
|
||||
var _p0 unsafe.Pointer
|
||||
if len(b) > 0 {
|
||||
_p0 = unsafe.Pointer(&b[0])
|
||||
} else {
|
||||
_p0 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
4
vendor/bazil.org/fuse/syscallx/syscallx.go
generated
vendored
Normal file
4
vendor/bazil.org/fuse/syscallx/syscallx.go
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
package syscallx
|
||||
|
||||
// make us look more like package syscall, so mksyscall.pl output works
|
||||
var _zero uintptr
|
26
vendor/bazil.org/fuse/syscallx/syscallx_std.go
generated
vendored
Normal file
26
vendor/bazil.org/fuse/syscallx/syscallx_std.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// +build !darwin
|
||||
|
||||
package syscallx
|
||||
|
||||
// This file just contains wrappers for platforms that already have
|
||||
// the right stuff in golang.org/x/sys/unix.
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
return unix.Getxattr(path, attr, dest)
|
||||
}
|
||||
|
||||
func Listxattr(path string, dest []byte) (sz int, err error) {
|
||||
return unix.Listxattr(path, dest)
|
||||
}
|
||||
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return unix.Setxattr(path, attr, data, flags)
|
||||
}
|
||||
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return unix.Removexattr(path, attr)
|
||||
}
|
38
vendor/bazil.org/fuse/syscallx/xattr_darwin.go
generated
vendored
Normal file
38
vendor/bazil.org/fuse/syscallx/xattr_darwin.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package syscallx
|
||||
|
||||
/* This is the source file for syscallx_darwin_*.go, to regenerate run
|
||||
|
||||
./generate
|
||||
|
||||
*/
|
||||
|
||||
// cannot use dest []byte here because OS X getxattr really wants a
|
||||
// NULL to trigger size probing, size==0 is not enough
|
||||
//
|
||||
//sys getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error)
|
||||
|
||||
func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var destp *byte
|
||||
if len(dest) > 0 {
|
||||
destp = &dest[0]
|
||||
}
|
||||
return getxattr(path, attr, destp, len(dest), 0, 0)
|
||||
}
|
||||
|
||||
//sys listxattr(path string, dest []byte, options int) (sz int, err error)
|
||||
|
||||
func Listxattr(path string, dest []byte) (sz int, err error) {
|
||||
return listxattr(path, dest, 0)
|
||||
}
|
||||
|
||||
//sys setxattr(path string, attr string, data []byte, position uint32, flags int) (err error)
|
||||
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return setxattr(path, attr, data, 0, flags)
|
||||
}
|
||||
|
||||
//sys removexattr(path string, attr string, options int) (err error)
|
||||
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return removexattr(path, attr, 0)
|
||||
}
|
97
vendor/bazil.org/fuse/syscallx/xattr_darwin_386.go
generated
vendored
Normal file
97
vendor/bazil.org/fuse/syscallx/xattr_darwin_386.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
// mksyscall.pl -l32 xattr_darwin.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func listxattr(path string, dest []byte, options int) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0)
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(position), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func removexattr(path string, attr string, options int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
97
vendor/bazil.org/fuse/syscallx/xattr_darwin_amd64.go
generated
vendored
Normal file
97
vendor/bazil.org/fuse/syscallx/xattr_darwin_amd64.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
// mksyscall.pl xattr_darwin.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func listxattr(path string, dest []byte, options int) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0)
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(position), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func removexattr(path string, attr string, options int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
16
vendor/cloud.google.com/go/.travis.yml
generated
vendored
Normal file
16
vendor/cloud.google.com/go/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
install:
|
||||
- go get -v cloud.google.com/go/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
|
||||
go test -race -v cloud.google.com/go/...
|
||||
env:
|
||||
matrix:
|
||||
# The GCLOUD_TESTS_API_KEY environment variable.
|
||||
secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI=
|
132
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
Normal file
132
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
# Contributing
|
||||
|
||||
1. Sign one of the contributor license agreements below.
|
||||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
|
||||
1. You will need to ensure that your `GOBIN` directory (by default
|
||||
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
|
||||
1. Get the cloud package by running `go get -d cloud.google.com/go`.
|
||||
1. If you have already checked out the source, make sure that the remote git
|
||||
origin is https://code.googlesource.com/gocloud:
|
||||
|
||||
git remote set-url origin https://code.googlesource.com/gocloud
|
||||
1. Make sure your auth is configured correctly by visiting
|
||||
https://code.googlesource.com, clicking "Generate Password", and following
|
||||
the directions.
|
||||
1. Make changes and create a change by running `git codereview change <name>`,
|
||||
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
|
||||
1. Keep amending to the change with `git codereview change` and mail as your receive
|
||||
feedback. Each new mailed amendment will create a new patch set for your change in Gerrit.
|
||||
|
||||
## Integration Tests
|
||||
|
||||
In addition to the unit tests, you may run the integration test suite.
|
||||
|
||||
To run the integrations tests, creating and configuration of a project in the
|
||||
Google Developers Console is required.
|
||||
|
||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
|
||||
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project)
|
||||
(or **Editor** and **Logs Configuration Writer** roles) are added to the
|
||||
service account.
|
||||
|
||||
Once you create a project, set the following environment variables to be able to
|
||||
run the against the actual APIs.
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
|
||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
- **GCLOUD_TESTS_API_KEY**: Your API key.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create some resources used in integration tests.
|
||||
|
||||
From the project's root directory:
|
||||
|
||||
``` sh
|
||||
# Set the default project in your env.
|
||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Authenticate the gcloud tool with your account.
|
||||
$ gcloud auth login
|
||||
|
||||
# Create the indexes used in the datastore integration tests.
|
||||
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
||||
|
||||
# Create a Google Cloud storage bucket with the same name as your test project,
|
||||
# and with the Stackdriver Logging service account as owner, for the sink
|
||||
# integration tests in logging.
|
||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Create a Spanner instance for the spanner integration tests.
|
||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
|
||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
|
||||
# the instance after testing with 'gcloud beta spanner instances delete'.
|
||||
```
|
||||
|
||||
Once you've set the environment variables, you can run the integration tests by
|
||||
running:
|
||||
|
||||
``` sh
|
||||
$ go test -v cloud.google.com/go/...
|
||||
```
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
Before we can accept your pull requests you'll need to sign a Contributor
|
||||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your work**,
|
||||
then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
528
vendor/cloud.google.com/go/README.md
generated
vendored
Normal file
528
vendor/cloud.google.com/go/README.md
generated
vendored
Normal file
@ -0,0 +1,528 @@
|
||||
# Google Cloud for Go
|
||||
|
||||
[](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go)
|
||||
[](https://godoc.org/cloud.google.com/go)
|
||||
|
||||
``` go
|
||||
import "cloud.google.com/go"
|
||||
```
|
||||
|
||||
Go packages for Google Cloud Platform services.
|
||||
|
||||
To install the packages on your system,
|
||||
|
||||
```
|
||||
$ go get -u cloud.google.com/go/...
|
||||
```
|
||||
|
||||
**NOTE:** These packages are under development, and may occasionally make
|
||||
backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
* [News](#news)
|
||||
* [Supported APIs](#supported-apis)
|
||||
* [Go Versions Supported](#go-versions-supported)
|
||||
* [Authorization](#authorization)
|
||||
* [Cloud Datastore](#cloud-datastore-)
|
||||
* [Cloud Storage](#cloud-storage-)
|
||||
* [Cloud Pub/Sub](#cloud-pub-sub-)
|
||||
* [Cloud BigQuery](#cloud-bigquery-)
|
||||
* [Stackdriver Logging](#stackdriver-logging-)
|
||||
* [Cloud Spanner](#cloud-spanner-)
|
||||
|
||||
|
||||
## News
|
||||
|
||||
_February 14, 2017_
|
||||
|
||||
Release of a client library for Spanner. See
|
||||
the
|
||||
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
|
||||
|
||||
Note that although the Spanner service is beta, the Go client library is alpha.
|
||||
|
||||
_December 12, 2016_
|
||||
|
||||
Beta release of BigQuery, DataStore, Logging and Storage. See the
|
||||
[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html).
|
||||
|
||||
Also, BigQuery now supports structs. Read a row directly into a struct with
|
||||
`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`.
|
||||
You can also use field tags. See the [package documentation][cloud-bigquery-ref]
|
||||
for details.
|
||||
|
||||
_December 5, 2016_
|
||||
|
||||
More changes to BigQuery:
|
||||
|
||||
* The `ValueList` type was removed. It is no longer necessary. Instead of
|
||||
```go
|
||||
var v ValueList
|
||||
... it.Next(&v) ..
|
||||
```
|
||||
use
|
||||
|
||||
```go
|
||||
var v []Value
|
||||
... it.Next(&v) ...
|
||||
```
|
||||
|
||||
* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
|
||||
`ValueList` would append to the slice. Now each call resets the size to zero first.
|
||||
|
||||
* Schema inference will infer the SQL type BYTES for a struct field of
|
||||
type []byte. Previously it inferred STRING.
|
||||
|
||||
* The types `uint`, `uint64` and `uintptr` are no longer supported in schema
|
||||
inference. BigQuery's integer type is INT64, and those types may hold values
|
||||
that are not correctly represented in a 64-bit signed integer.
|
||||
|
||||
* The SQL types DATE, TIME and DATETIME are now supported. They correspond to
|
||||
the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
|
||||
package.
|
||||
|
||||
_November 17, 2016_
|
||||
|
||||
Change to BigQuery: values from INTEGER columns will now be returned as int64,
|
||||
not int. This will avoid errors arising from large values on 32-bit systems.
|
||||
|
||||
_November 8, 2016_
|
||||
|
||||
New datastore feature: datastore now encodes your nested Go structs as Entity values,
|
||||
instead of a flattened list of the embedded struct's fields.
|
||||
This means that you may now have twice-nested slices, eg.
|
||||
```go
|
||||
type State struct {
|
||||
Cities []struct{
|
||||
Populations []int
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
|
||||
more details.
|
||||
|
||||
_November 8, 2016_
|
||||
|
||||
Breaking changes to datastore: contexts no longer hold namespaces; instead you
|
||||
must set a key's namespace explicitly. Also, key functions have been changed
|
||||
and renamed.
|
||||
|
||||
* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
|
||||
```go
|
||||
q := datastore.NewQuery("Kind").Namespace("ns")
|
||||
```
|
||||
|
||||
* All the fields of Key are exported. That means you can construct any Key with a struct literal:
|
||||
```go
|
||||
k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
|
||||
```
|
||||
|
||||
* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
|
||||
|
||||
* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
|
||||
```go
|
||||
NewIncompleteKey(ctx, kind, parent)
|
||||
```
|
||||
with
|
||||
```go
|
||||
IncompleteKey(kind, parent)
|
||||
```
|
||||
and if you do use namespaces, make sure you set the namespace on the returned key.
|
||||
|
||||
* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
|
||||
```go
|
||||
NewKey(ctx, kind, name, 0, parent)
|
||||
NewKey(ctx, kind, "", id, parent)
|
||||
```
|
||||
with
|
||||
```go
|
||||
NameKey(kind, name, parent)
|
||||
IDKey(kind, id, parent)
|
||||
```
|
||||
and if you do use namespaces, make sure you set the namespace on the returned key.
|
||||
|
||||
* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
|
||||
|
||||
* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
|
||||
|
||||
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
|
||||
more details.
|
||||
|
||||
_October 27, 2016_
|
||||
|
||||
Breaking change to bigquery: `NewGCSReference` is now a function,
|
||||
not a method on `Client`.
|
||||
|
||||
New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
|
||||
loading data into a table from a file or any `io.Reader`.
|
||||
|
||||
_October 21, 2016_
|
||||
|
||||
Breaking change to pubsub: removed `pubsub.Done`.
|
||||
|
||||
Use `iterator.Done` instead, where `iterator` is the package
|
||||
`google.golang.org/api/iterator`.
|
||||
|
||||
|
||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
||||
|
||||
## Supported APIs
|
||||
|
||||
Google API | Status | Package
|
||||
-------------------------------|--------------|-----------------------------------------------------------
|
||||
[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Logging][cloud-logging] | beta | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision`][cloud-vision-ref]
|
||||
[Language][cloud-language] | alpha | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||
[Speech][cloud-speech] | alpha | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref]
|
||||
[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
|
||||
|
||||
> **Alpha status**: the API is still being actively developed. As a
|
||||
> result, it might change in backward-incompatible ways and is not recommended
|
||||
> for production use.
|
||||
>
|
||||
> **Beta status**: the API is largely complete, but still has outstanding
|
||||
> features and bugs to be addressed. There may be minor backwards-incompatible
|
||||
> changes where necessary.
|
||||
>
|
||||
> **Stable status**: the API is mature and ready for production use. We will
|
||||
> continue addressing bugs and feature requests.
|
||||
|
||||
Documentation and examples are available at
|
||||
https://godoc.org/cloud.google.com/go
|
||||
|
||||
Visit or join the
|
||||
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
|
||||
for updates on these packages.
|
||||
|
||||
## Go Versions Supported
|
||||
|
||||
We support the two most recent major versions of Go. If Google App Engine uses
|
||||
an older version, we support that as well. You can see which versions are
|
||||
currently supported by looking at the lines following `go:` in
|
||||
[`.travis.yml`](.travis.yml).
|
||||
|
||||
## Authorization
|
||||
|
||||
By default, each API will use [Google Application Default Credentials][default-creds]
|
||||
for authorization credentials used in calling the API endpoints. This will allow your
|
||||
application to run in many environments without requiring explicit configuration.
|
||||
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
```
|
||||
|
||||
To authorize using a
|
||||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
|
||||
pass
|
||||
[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
|
||||
to the `NewClient` function of the desired package. For example:
|
||||
|
||||
```go
|
||||
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
|
||||
```
|
||||
|
||||
You can exert more control over authorization by using the
|
||||
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
|
||||
create an `oauth2.TokenSource`. Then pass
|
||||
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
|
||||
to the `NewClient` function:
|
||||
```go
|
||||
tokenSource := ...
|
||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
||||
```
|
||||
|
||||
## Cloud Datastore [](https://godoc.org/cloud.google.com/go/datastore)
|
||||
|
||||
- [About Cloud Datastore][cloud-datastore]
|
||||
- [Activating the API for your project][cloud-datastore-activation]
|
||||
- [API documentation][cloud-datastore-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore)
|
||||
- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `datastore.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := datastore.NewClient(ctx, "my-project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
Then use that client to interact with the API:
|
||||
|
||||
```go
|
||||
type Post struct {
|
||||
Title string
|
||||
Body string `datastore:",noindex"`
|
||||
PublishedAt time.Time
|
||||
}
|
||||
keys := []*datastore.Key{
|
||||
datastore.NewKey(ctx, "Post", "post1", 0, nil),
|
||||
datastore.NewKey(ctx, "Post", "post2", 0, nil),
|
||||
}
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
|
||||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud Storage [](https://godoc.org/cloud.google.com/go/storage)
|
||||
|
||||
- [About Cloud Storage][cloud-storage]
|
||||
- [API documentation][cloud-storage-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `storage.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Read the object1 from bucket.
|
||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
body, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud Pub/Sub [](https://godoc.org/cloud.google.com/go/pubsub)
|
||||
|
||||
- [About Cloud Pubsub][cloud-pubsub]
|
||||
- [API documentation][cloud-pubsub-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `pubsub.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := pubsub.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
Then use the client to publish and subscribe:
|
||||
|
||||
```go
|
||||
// Publish "hello world" on topic1.
|
||||
topic := client.Topic("topic1")
|
||||
msgIDs, err := topic.Publish(ctx, &pubsub.Message{
|
||||
Data: []byte("hello world"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create an iterator to pull messages via subscription1.
|
||||
it, err := client.Subscription("subscription1").Pull(ctx)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
defer it.Stop()
|
||||
|
||||
// Consume N messages from the iterator.
|
||||
for i := 0; i < N; i++ {
|
||||
msg, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve message: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Message %d: %s\n", i, msg.Data)
|
||||
msg.Done(true) // Acknowledge that we've consumed the message.
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud BigQuery [](https://godoc.org/cloud.google.com/go/bigquery)
|
||||
|
||||
- [About Cloud BigQuery][cloud-bigquery]
|
||||
- [API documentation][cloud-bigquery-docs]
|
||||
- [Go client documentation][cloud-bigquery-ref]
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `bigquery.Client` to use throughout your application:
|
||||
```go
|
||||
c, err := bigquery.NewClient(ctx, "my-project-ID")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
Then use that client to interact with the API:
|
||||
```go
|
||||
// Construct a query.
|
||||
q := c.Query(`
|
||||
SELECT year, SUM(number)
|
||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
||||
WHERE name = "William"
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
`)
|
||||
// Execute the query.
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Iterate through the results.
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Stackdriver Logging [](https://godoc.org/cloud.google.com/go/logging)
|
||||
|
||||
- [About Stackdriver Logging][cloud-logging]
|
||||
- [API documentation][cloud-logging-docs]
|
||||
- [Go client documentation][cloud-logging-ref]
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `logging.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
ctx := context.Background()
|
||||
client, err := logging.NewClient(ctx, "my-project")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
Usually, you'll want to add log entries to a buffer to be periodically flushed
|
||||
(automatically and asynchronously) to the Stackdriver Logging service.
|
||||
```go
|
||||
logger := client.Logger("my-log")
|
||||
logger.Log(logging.Entry{Payload: "something happened!"})
|
||||
```
|
||||
Close your client before your program exits, to flush any buffered log entries.
|
||||
```go
|
||||
err = client.Close()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Cloud Spanner [](https://godoc.org/cloud.google.com/go/spanner)
|
||||
|
||||
- [About Cloud Spanner][cloud-spanner]
|
||||
- [API documentation][cloud-spanner-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `spanner.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Simple Reads And Writes
|
||||
_, err := client.Apply(ctx, []*spanner.Mutation{
|
||||
spanner.Insert("Users",
|
||||
[]string{"name", "email"},
|
||||
[]interface{}{"alice", "a@example.com"})})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
row, err := client.Single().ReadRow(ctx, "Users",
|
||||
spanner.Key{"alice"}, []string{"email"})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the
|
||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
|
||||
document for details. We're using Gerrit for our code reviews. Please don't open pull
|
||||
requests against this repo, new pull requests will be automatically closed.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms.
|
||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
|
||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
||||
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
|
||||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
|
||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
|
||||
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
|
||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
|
||||
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
|
||||
[cloud-storage-docs]: https://cloud.google.com/storage/docs
|
||||
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
|
||||
|
||||
[cloud-bigtable]: https://cloud.google.com/bigtable/
|
||||
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
|
||||
|
||||
[cloud-bigquery]: https://cloud.google.com/bigquery/
|
||||
[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs
|
||||
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
|
||||
|
||||
[cloud-logging]: https://cloud.google.com/logging/
|
||||
[cloud-logging-docs]: https://cloud.google.com/logging/docs
|
||||
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
|
||||
|
||||
[cloud-vision]: https://cloud.google.com/vision/
|
||||
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision
|
||||
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
|
||||
|
||||
[cloud-speech]: https://cloud.google.com/speech
|
||||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1
|
||||
|
||||
[cloud-spanner]: https://cloud.google.com/spanner/
|
||||
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
|
||||
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs
|
||||
|
||||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
|
32
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
Normal file
32
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# This file configures AppVeyor (http://www.appveyor.com),
|
||||
# a Windows-based CI service similar to Travis.
|
||||
|
||||
# Identifier for this run
|
||||
version: "{build}"
|
||||
|
||||
# Clone the repo into this path, which conforms to the standard
|
||||
# Go workspace structure.
|
||||
clone_folder: c:\gopath\src\cloud.google.com\go
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762
|
||||
GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json
|
||||
KEYFILE_CONTENTS:
|
||||
secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje
|
||||
|
||||
install:
|
||||
# Info for debugging.
|
||||
- echo %PATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -d -t ./...
|
||||
|
||||
|
||||
# Provide a build script, or AppVeyor will call msbuild.
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
- echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY%
|
||||
|
||||
test_script:
|
||||
- go test -v ./...
|
49
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
Normal file
49
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloud_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/datastore"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
func Example_applicationDefaultCredentials() {
|
||||
// Google Application Default Credentials is the recommended way to authorize
|
||||
// and authenticate clients.
|
||||
//
|
||||
// See the following link on how to create and obtain Application Default Credentials:
|
||||
// https://developers.google.com/identity/protocols/application-default-credentials.
|
||||
client, err := datastore.NewClient(context.Background(), "project-id")
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
}
|
||||
|
||||
func Example_serviceAccountFile() {
|
||||
// Use a JSON key file associated with a Google service account to
|
||||
// authenticate and authorize. Service Account keys can be created and
|
||||
// downloaded from https://console.developers.google.com/permissions/serviceaccounts.
|
||||
//
|
||||
// Note: This example uses the datastore client, but the same steps apply to
|
||||
// the other client libraries underneath this package.
|
||||
client, err := datastore.NewClient(context.Background(),
|
||||
"project-id", option.WithServiceAccountFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
}
|
76
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
Normal file
76
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
// TODO(mcgreevy): support dry-run mode when creating jobs.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
const prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference.
|
||||
type ExternalData interface {
|
||||
externalDataConfig() bq.ExternalDataConfiguration
|
||||
}
|
||||
|
||||
const Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
const userAgent = "gcloud-golang-bigquery/20160429"
|
||||
|
||||
// Client may be used to perform BigQuery operations.
|
||||
type Client struct {
|
||||
service service
|
||||
projectID string
|
||||
}
|
||||
|
||||
// NewClient constructs a new Client which can perform BigQuery operations.
|
||||
// Operations performed via the client are billed to the specified GCP project.
|
||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
|
||||
o := []option.ClientOption{
|
||||
option.WithEndpoint(prodAddr),
|
||||
option.WithScopes(Scope),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
|
||||
s, err := newBigqueryService(httpClient, endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: projectID,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
// Close should be called when the client is no longer needed.
|
||||
// It need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
return nil
|
||||
}
|
74
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
Normal file
74
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// CopyConfig holds the configuration for a copy job.
|
||||
type CopyConfig struct {
|
||||
// JobID is the ID to use for the copy job. If unset, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Srcs are the tables from which data will be copied.
|
||||
Srcs []*Table
|
||||
|
||||
// Dst is the table into which the data will be copied.
|
||||
Dst *Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
}
|
||||
|
||||
// A Copier copies data into a BigQuery table from one or more BigQuery tables.
|
||||
type Copier struct {
|
||||
CopyConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// CopierFrom returns a Copier which can be used to copy data into a
|
||||
// BigQuery table from one or more BigQuery tables.
|
||||
// The returned Copier may optionally be further configured before its Run method is called.
|
||||
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
|
||||
return &Copier{
|
||||
c: t.c,
|
||||
CopyConfig: CopyConfig{
|
||||
Srcs: srcs,
|
||||
Dst: t,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a copy job.
|
||||
func (c *Copier) Run(ctx context.Context) (*Job, error) {
|
||||
conf := &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.tableRefProto(),
|
||||
}
|
||||
for _, t := range c.Srcs {
|
||||
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
|
||||
}
|
||||
job := &bq.Job{Configuration: &bq.JobConfiguration{Copy: conf}}
|
||||
setJobRef(job, c.JobID, c.c.projectID)
|
||||
return c.c.service.insertJob(ctx, c.c.projectID, &insertJobConf{job: job})
|
||||
}
|
136
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
Normal file
136
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultCopyJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "d-project-id",
|
||||
DatasetId: "d-dataset-id",
|
||||
TableId: "d-table-id",
|
||||
},
|
||||
SourceTables: []*bq.TableReference{
|
||||
{
|
||||
ProjectId: "s-project-id",
|
||||
DatasetId: "s-dataset-id",
|
||||
TableId: "s-table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
srcs []*Table
|
||||
config CopyConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
want: defaultCopyJob(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{JobID: "job-id"},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "job-id",
|
||||
ProjectId: "client-project-id",
|
||||
}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
tc.dst.c = c
|
||||
copier := tc.dst.CopierFrom(tc.srcs...)
|
||||
tc.config.Srcs = tc.srcs
|
||||
tc.config.Dst = tc.dst
|
||||
copier.CopyConfig = tc.config
|
||||
if _, err := copier.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling Run: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
103
vendor/cloud.google.com/go/bigquery/create_table_test.go
generated
vendored
Normal file
103
vendor/cloud.google.com/go/bigquery/create_table_test.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type createTableRecorder struct {
|
||||
conf *createTableConf
|
||||
service
|
||||
}
|
||||
|
||||
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
rec.conf = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCreateTableOptions(t *testing.T) {
|
||||
s := &createTableRecorder{}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: s,
|
||||
}
|
||||
ds := c.Dataset("d")
|
||||
table := ds.Table("t")
|
||||
exp := time.Now()
|
||||
q := "query"
|
||||
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q), UseStandardSQL()); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want := createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
viewQuery: q,
|
||||
useStandardSQL: true,
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
|
||||
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
|
||||
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want = createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
// No need for an elaborate schema, that is tested in schema_test.go.
|
||||
schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
|
||||
partitionCases := []struct {
|
||||
timePartitioning TimePartitioning
|
||||
expectedExpiration time.Duration
|
||||
}{
|
||||
{TimePartitioning{}, time.Duration(0)},
|
||||
{TimePartitioning{time.Second}, time.Second},
|
||||
}
|
||||
|
||||
for _, c := range partitionCases {
|
||||
if err := table.Create(context.Background(), c.timePartitioning); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want = createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
timePartitioning: &TimePartitioning{c.expectedExpiration},
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
}
|
||||
}
|
188
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
Normal file
188
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// Dataset is a reference to a BigQuery dataset.
|
||||
type Dataset struct {
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
c *Client
|
||||
}
|
||||
|
||||
type DatasetMetadata struct {
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
|
||||
DefaultTableExpiration time.Duration
|
||||
Description string // The user-friendly description of this table.
|
||||
Name string // The user-friendly name for this table.
|
||||
ID string
|
||||
Location string // The geo location of the dataset.
|
||||
Labels map[string]string // User-provided labels.
|
||||
// TODO(jba): access rules
|
||||
}
|
||||
|
||||
// Dataset creates a handle to a BigQuery dataset in the client's project.
|
||||
func (c *Client) Dataset(id string) *Dataset {
|
||||
return c.DatasetInProject(c.projectID, id)
|
||||
}
|
||||
|
||||
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
|
||||
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: projectID,
|
||||
DatasetID: datasetID,
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates a dataset in the BigQuery service. An error will be returned
|
||||
// if the dataset already exists.
|
||||
func (d *Dataset) Create(ctx context.Context) error {
|
||||
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID)
|
||||
}
|
||||
|
||||
// Delete deletes the dataset.
|
||||
func (d *Dataset) Delete(ctx context.Context) error {
|
||||
return d.c.service.deleteDataset(ctx, d.DatasetID, d.ProjectID)
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the dataset.
|
||||
func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
|
||||
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID)
|
||||
}
|
||||
|
||||
// Table creates a handle to a BigQuery table in the dataset.
|
||||
// To determine if a table exists, call Table.Metadata.
|
||||
// If the table does not already exist, use Table.Create to create it.
|
||||
func (d *Dataset) Table(tableID string) *Table {
|
||||
return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
|
||||
}
|
||||
|
||||
// Tables returns an iterator over the tables in the Dataset.
|
||||
func (d *Dataset) Tables(ctx context.Context) *TableIterator {
|
||||
it := &TableIterator{
|
||||
ctx: ctx,
|
||||
dataset: d,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.tables) },
|
||||
func() interface{} { b := it.tables; it.tables = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// A TableIterator is an iterator over Tables.
|
||||
type TableIterator struct {
|
||||
ctx context.Context
|
||||
dataset *Dataset
|
||||
tables []*Table
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is Done if there are
|
||||
// no more results. Once Next returns Done, all subsequent calls will return
|
||||
// Done.
|
||||
func (it *TableIterator) Next() (*Table, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := it.tables[0]
|
||||
it.tables = it.tables[1:]
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
tables, tok, err := it.dataset.c.service.listTables(it.ctx, it.dataset.ProjectID, it.dataset.DatasetID, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, t := range tables {
|
||||
t.c = it.dataset.c
|
||||
it.tables = append(it.tables, t)
|
||||
}
|
||||
return tok, nil
|
||||
}
|
||||
|
||||
// Datasets returns an iterator over the datasets in the Client's project.
|
||||
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
|
||||
return c.DatasetsInProject(ctx, c.projectID)
|
||||
}
|
||||
|
||||
// DatasetsInProject returns an iterator over the datasets in the provided project.
|
||||
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
|
||||
it := &DatasetIterator{
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// DatasetIterator iterates over the datasets in a project.
|
||||
type DatasetIterator struct {
|
||||
// ListHidden causes hidden datasets to be listed when set to true.
|
||||
ListHidden bool
|
||||
|
||||
// Filter restricts the datasets returned by label. The filter syntax is described in
|
||||
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
|
||||
Filter string
|
||||
|
||||
ctx context.Context
|
||||
projectID string
|
||||
c *Client
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*Dataset
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *DatasetIterator) Next() (*Dataset, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.projectID,
|
||||
pageSize, pageToken, it.ListHidden, it.Filter)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, d := range datasets {
|
||||
d.c = it.c
|
||||
it.items = append(it.items, d)
|
||||
}
|
||||
return nextPageToken, nil
|
||||
}
|
156
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
Normal file
156
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
itest "google.golang.org/api/iterator/testing"
|
||||
)
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type listTablesServiceStub struct {
|
||||
expectedProject, expectedDataset string
|
||||
tables []*Table
|
||||
service
|
||||
}
|
||||
|
||||
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
if projectID != s.expectedProject {
|
||||
return nil, "", errors.New("wrong project id")
|
||||
}
|
||||
if datasetID != s.expectedDataset {
|
||||
return nil, "", errors.New("wrong dataset id")
|
||||
}
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
end := start + pageSize
|
||||
if end > len(s.tables) {
|
||||
end = len(s.tables)
|
||||
}
|
||||
nextPageToken := ""
|
||||
if end < len(s.tables) {
|
||||
nextPageToken = strconv.Itoa(end)
|
||||
}
|
||||
return s.tables[start:end], nextPageToken, nil
|
||||
}
|
||||
|
||||
func TestTables(t *testing.T) {
|
||||
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
|
||||
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
|
||||
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
|
||||
allTables := []*Table{t1, t2, t3}
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
tables: allTables,
|
||||
},
|
||||
projectID: "x",
|
||||
}
|
||||
msg, ok := itest.TestIterator(allTables,
|
||||
func() interface{} { return c.Dataset("y").Tables(context.Background()) },
|
||||
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type listDatasetsFake struct {
|
||||
service
|
||||
|
||||
projectID string
|
||||
datasets []*Dataset
|
||||
hidden map[*Dataset]bool
|
||||
}
|
||||
|
||||
func (df *listDatasetsFake) listDatasets(_ context.Context, projectID string, pageSize int, pageToken string, listHidden bool, filter string) ([]*Dataset, string, error) {
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
if filter != "" {
|
||||
return nil, "", errors.New("filter not supported")
|
||||
}
|
||||
if projectID != df.projectID {
|
||||
return nil, "", errors.New("bad project ID")
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
var (
|
||||
i int
|
||||
result []*Dataset
|
||||
nextPageToken string
|
||||
)
|
||||
for i = start; len(result) < pageSize && i < len(df.datasets); i++ {
|
||||
if df.hidden[df.datasets[i]] && !listHidden {
|
||||
continue
|
||||
}
|
||||
result = append(result, df.datasets[i])
|
||||
}
|
||||
if i < len(df.datasets) {
|
||||
nextPageToken = strconv.Itoa(i)
|
||||
}
|
||||
return result, nextPageToken, nil
|
||||
}
|
||||
|
||||
func TestDatasets(t *testing.T) {
|
||||
service := &listDatasetsFake{projectID: "p"}
|
||||
client := &Client{service: service}
|
||||
datasets := []*Dataset{
|
||||
{"p", "a", client},
|
||||
{"p", "b", client},
|
||||
{"p", "hidden", client},
|
||||
{"p", "c", client},
|
||||
}
|
||||
service.datasets = datasets
|
||||
service.hidden = map[*Dataset]bool{datasets[2]: true}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: service,
|
||||
}
|
||||
msg, ok := itest.TestIterator(datasets,
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = true; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=true: %s", msg)
|
||||
}
|
||||
|
||||
msg, ok = itest.TestIterator([]*Dataset{datasets[0], datasets[1], datasets[3]},
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = false; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=false: %s", msg)
|
||||
}
|
||||
}
|
295
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
Normal file
295
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
Normal file
@ -0,0 +1,295 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package bigquery provides a client for the BigQuery service.
|
||||
|
||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
|
||||
The following assumes a basic familiarity with BigQuery concepts.
|
||||
See https://cloud.google.com/bigquery/docs.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, projectID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Querying
|
||||
|
||||
To query existing tables, create a Query and call its Read method:
|
||||
|
||||
q := client.Query(`
|
||||
SELECT year, SUM(number) as num
|
||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
||||
WHERE name = "William"
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
`)
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Then iterate through the resulting rows. You can store a row using
|
||||
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
|
||||
A slice is simplest:
|
||||
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
|
||||
You can also use a struct whose exported fields match the query:
|
||||
|
||||
type Count struct {
|
||||
Year int
|
||||
Num int
|
||||
}
|
||||
for {
|
||||
var c Count
|
||||
err := it.Next(&c)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(c)
|
||||
}
|
||||
|
||||
You can also start the query running and get the results later.
|
||||
Create the query as above, but call Run instead of Read. This returns a Job,
|
||||
which represents an asychronous operation.
|
||||
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Get the job's ID, a printable string. You can save this string to retrieve
|
||||
the results at a later time, even in another process.
|
||||
|
||||
jobID := job.ID()
|
||||
fmt.Printf("The job ID is %s\n", jobID)
|
||||
|
||||
To retrieve the job's results from the ID, first look up the Job:
|
||||
|
||||
job, err = client.JobFromID(ctx, jobID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Use the Job.Read method to obtain an iterator, and loop over the rows.
|
||||
Query.Read is just a convenience method that combines Query.Run and Job.Read.
|
||||
|
||||
it, err = job.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Proceed with iteration as above.
|
||||
|
||||
Datasets and Tables
|
||||
|
||||
You can refer to datasets in the client's project with the Dataset method, and
|
||||
in other projects with the DatasetInProject method:
|
||||
|
||||
myDataset := client.Dataset("my_dataset")
|
||||
yourDataset := client.DatasetInProject("your-project-id", "your_dataset")
|
||||
|
||||
These methods create references to datasets, not the datasets themselves. You can have
|
||||
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
|
||||
create a dataset from a reference:
|
||||
|
||||
if err := myDataset.Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
|
||||
to an object in BigQuery that may or may not exist.
|
||||
|
||||
table := myDataset.Table("my_table")
|
||||
|
||||
You can create, delete and update the metadata of tables with methods on Table.
|
||||
Table.Create supports a few options. For instance, you could create a temporary table with:
|
||||
|
||||
err = myDataset.Table("temp").Create(ctx, bigquery.TableExpiration(time.Now().Add(1*time.Hour)))
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
We'll see how to create a table with a schema in the next section.
|
||||
|
||||
Schemas
|
||||
|
||||
There are two ways to construct schemas with this package.
|
||||
You can build a schema by hand, like so:
|
||||
|
||||
schema1 := bigquery.Schema{
|
||||
&bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType},
|
||||
&bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
|
||||
}
|
||||
|
||||
Or you can infer the schema from a struct:
|
||||
|
||||
type student struct {
|
||||
Name string
|
||||
Grades []int
|
||||
}
|
||||
schema2, err := bigquery.InferSchema(student{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema1 and schema2 are identical.
|
||||
|
||||
Struct inference supports tags like those of the encoding/json package,
|
||||
so you can change names or ignore fields:
|
||||
|
||||
type student2 struct {
|
||||
Name string `bigquery:"full_name"`
|
||||
Grades []int
|
||||
Secret string `bigquery:"-"`
|
||||
}
|
||||
schema3, err := bigquery.InferSchema(student2{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema3 has fields "full_name" and "Grade".
|
||||
|
||||
Having constructed a schema, you can pass it to Table.Create as an option:
|
||||
|
||||
if err := table.Create(ctx, schema1); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Copying
|
||||
|
||||
You can copy one or more tables to another table. Begin by constructing a Copier
|
||||
describing the copy. Then set any desired copy options, and finally call Run to get a Job:
|
||||
|
||||
copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
|
||||
copier.WriteDisposition = bigquery.WriteTruncate
|
||||
job, err = copier.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can chain the call to Run if you don't want to set options:
|
||||
|
||||
job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can wait for your job to complete:
|
||||
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Job.Wait polls with exponential backoff. You can also poll yourself, if you
|
||||
wish:
|
||||
|
||||
for {
|
||||
status, err := job.Status(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Done() {
|
||||
if status.Err() != nil {
|
||||
log.Fatalf("Job failed with error %v", status.Err())
|
||||
}
|
||||
break
|
||||
}
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
|
||||
Loading and Uploading
|
||||
|
||||
There are two ways to populate a table with this package: load the data from a Google Cloud Storage
|
||||
object, or upload rows directly from your program.
|
||||
|
||||
For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
|
||||
it as well, and call its Run method.
|
||||
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.AllowJaggedRows = true
|
||||
loader := myDataset.Table("dest").LoaderFrom(gcsRef)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
job, err = loader.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
|
||||
Then create an Uploader, and call its Put method with a slice of values.
|
||||
|
||||
u := table.Uploader()
|
||||
// Item implements the ValueSaver interface.
|
||||
items := []*Item{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
|
||||
to specify the schema and insert ID by hand, or just supply the struct or struct pointer
|
||||
directly and the schema will be inferred:
|
||||
|
||||
type Item2 struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
// Item implements the ValueSaver interface.
|
||||
items2 := []*Item2{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items2); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Extracting
|
||||
|
||||
If you've been following so far, extracting data from a BigQuery table
|
||||
into a Google Cloud Storage object will feel familiar. First create an
|
||||
Extractor, then optionally configure it, and lastly call its Run method.
|
||||
|
||||
extractor := table.ExtractorTo(gcsRef)
|
||||
extractor.DisableHeader = true
|
||||
job, err = extractor.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package bigquery // import "cloud.google.com/go/bigquery"
|
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
Normal file
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// An Error contains detailed information about a failed bigquery operation.
|
||||
type Error struct {
|
||||
// Mirrors bq.ErrorProto, but drops DebugInfo
|
||||
Location, Message, Reason string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
|
||||
}
|
||||
|
||||
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
|
||||
if ep == nil {
|
||||
return nil
|
||||
}
|
||||
return &Error{
|
||||
Location: ep.Location,
|
||||
Message: ep.Message,
|
||||
Reason: ep.Reason,
|
||||
}
|
||||
}
|
||||
|
||||
// A MultiError contains multiple related errors.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
switch len(m) {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return m[0].Error()
|
||||
case 2:
|
||||
return m[0].Error() + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
|
||||
}
|
||||
|
||||
// RowInsertionError contains all errors that occurred when attempting to insert a row.
|
||||
type RowInsertionError struct {
|
||||
InsertID string // The InsertID associated with the affected row.
|
||||
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted.
|
||||
Errors MultiError
|
||||
}
|
||||
|
||||
func (e *RowInsertionError) Error() string {
|
||||
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
|
||||
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
|
||||
}
|
||||
|
||||
// PutMultiError contains an error for each row which was not successfully inserted
|
||||
// into a BigQuery table.
|
||||
type PutMultiError []RowInsertionError
|
||||
|
||||
func (pme PutMultiError) Error() string {
|
||||
plural := "s"
|
||||
if len(pme) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
|
||||
}
|
109
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
Normal file
109
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func rowInsertionError(msg string) RowInsertionError {
|
||||
return RowInsertionError{Errors: []error{errors.New(msg)}}
|
||||
}
|
||||
|
||||
func TestPutMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs PutMultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: PutMultiError{},
|
||||
want: "0 row insertions failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a")},
|
||||
want: "1 row insertion failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")},
|
||||
want: "2 row insertions failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs MultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: MultiError{},
|
||||
want: "(0 errors)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a")},
|
||||
want: "a",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b")},
|
||||
want: "a (and 1 other error)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")},
|
||||
want: "a (and 2 other errors)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorFromErrorProto(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *bq.ErrorProto
|
||||
want *Error
|
||||
}{
|
||||
{nil, nil},
|
||||
{
|
||||
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"},
|
||||
want: &Error{Location: "L", Message: "M", Reason: "R"},
|
||||
},
|
||||
} {
|
||||
if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorString(t *testing.T) {
|
||||
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"}
|
||||
got := e.Error()
|
||||
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") {
|
||||
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got)
|
||||
}
|
||||
}
|
652
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
Normal file
652
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
Normal file
@ -0,0 +1,652 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = client // TODO: Use client.
|
||||
}
|
||||
|
||||
func ExampleClient_Dataset() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
fmt.Println(ds)
|
||||
}
|
||||
|
||||
func ExampleClient_DatasetInProject() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.DatasetInProject("their-project-id", "their-dataset")
|
||||
fmt.Println(ds)
|
||||
}
|
||||
|
||||
func ExampleClient_Datasets() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Datasets(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleClient_DatasetsInProject() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.DatasetsInProject(ctx, "their-project-id")
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func getJobID() string { return "" }
|
||||
|
||||
func ExampleClient_JobFromID() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere.
|
||||
job, err := client.JobFromID(ctx, jobID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(job)
|
||||
}
|
||||
|
||||
func ExampleNewGCSReference() {
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
fmt.Println(gcsRef)
|
||||
}
|
||||
|
||||
func ExampleClient_Query() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
q.DefaultProjectID = "project-id"
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleClient_Query_parameters() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select num from t1 where name = @user")
|
||||
q.Parameters = []bigquery.QueryParameter{
|
||||
{Name: "user", Value: "Elizabeth"},
|
||||
}
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleQuery_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleRowIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
var row []bigquery.Value
|
||||
err := it.Next(&row)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(row)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleRowIterator_Next_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
var s score
|
||||
err := it.Next(&s)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(s)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleJob_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
// Call Query.Run to get a Job, then call Read on the job.
|
||||
// Note: Query.Read is a shorthand for this.
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleJob_Wait() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Metadata() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
func ExampleDataset_Table() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Table creates a reference to the table. It does not create the actual
|
||||
// table in BigQuery; to do so, use Table.Create.
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
fmt.Println(t)
|
||||
}
|
||||
|
||||
func ExampleDataset_Tables() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Tables(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleDatasetIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Datasets(ctx)
|
||||
for {
|
||||
ds, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleInferSchema() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING
|
||||
// Size FLOAT
|
||||
// Count INTEGER
|
||||
}
|
||||
|
||||
func ExampleInferSchema_tags() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int `bigquery:"number"`
|
||||
Secret []byte `bigquery:"-"`
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING
|
||||
// Size FLOAT
|
||||
// number INTEGER
|
||||
}
|
||||
|
||||
func ExampleTable_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
if err := t.Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Create_schema() {
|
||||
ctx := context.Background()
|
||||
// Infer table schema from a Go type.
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
if err := t.Create(ctx, schema); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Metadata() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
func ExampleTable_Uploader() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
_ = u // TODO: Use u.
|
||||
}
|
||||
|
||||
func ExampleTable_Uploader_options() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
u.IgnoreUnknownValues = true
|
||||
_ = u // TODO: Use u.
|
||||
}
|
||||
|
||||
func ExampleTable_CopierFrom() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2"))
|
||||
c.WriteDisposition = bigquery.WriteTruncate
|
||||
// TODO: set other options on the Copier.
|
||||
job, err := c.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_ExtractorTo() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.FieldDelimiter = ":"
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
extractor := ds.Table("my_table").ExtractorTo(gcsRef)
|
||||
extractor.DisableHeader = true
|
||||
// TODO: set other options on the Extractor.
|
||||
job, err := extractor.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_LoaderFrom() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.AllowJaggedRows = true
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
loader := ds.Table("my_table").LoaderFrom(gcsRef)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
// TODO: set other options on the Loader.
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_LoaderFrom_reader() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
f, err := os.Open("data.csv")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
rs := bigquery.NewReaderSource(f)
|
||||
rs.AllowJaggedRows = true
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
loader := ds.Table("my_table").LoaderFrom(rs)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
// TODO: set other options on the Loader.
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Table("my_table").Read(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleTable_Update() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
|
||||
Description: "my favorite table",
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(tm)
|
||||
}
|
||||
|
||||
func ExampleTableIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Tables(ctx)
|
||||
for {
|
||||
t, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(t)
|
||||
}
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
|
||||
// Save implements the ValueSaver interface.
|
||||
func (i *Item) Save() (map[string]bigquery.Value, string, error) {
|
||||
return map[string]bigquery.Value{
|
||||
"Name": i.Name,
|
||||
"Size": i.Size,
|
||||
"Count": i.Count,
|
||||
}, "", nil
|
||||
}
|
||||
|
||||
func ExampleUploader_Put() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
// Item implements the ValueSaver interface.
|
||||
items := []*Item{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
var schema bigquery.Schema
|
||||
|
||||
func ExampleUploader_Put_structSaver() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
|
||||
// Assume schema holds the table's schema.
|
||||
savers := []*bigquery.StructSaver{
|
||||
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"},
|
||||
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
|
||||
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
|
||||
}
|
||||
if err := u.Put(ctx, savers); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleUploader_Put_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
scores := []score{
|
||||
{Name: "n1", Num: 12},
|
||||
{Name: "n2", Num: 31},
|
||||
{Name: "n3", Num: 7},
|
||||
}
|
||||
// Schema is inferred from the score type.
|
||||
if err := u.Put(ctx, scores); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
76
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
Normal file
76
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// ExtractConfig holds the configuration for an extract job.
|
||||
type ExtractConfig struct {
|
||||
// JobID is the ID to use for the extract job. If empty, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Src is the table from which data will be extracted.
|
||||
Src *Table
|
||||
|
||||
// Dst is the destination into which the data will be extracted.
|
||||
Dst *GCSReference
|
||||
|
||||
// DisableHeader disables the printing of a header row in exported data.
|
||||
DisableHeader bool
|
||||
}
|
||||
|
||||
// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
|
||||
type Extractor struct {
|
||||
ExtractConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// ExtractorTo returns an Extractor which can be used to extract data from a
|
||||
// BigQuery table into Google Cloud Storage.
|
||||
// The returned Extractor may optionally be further configured before its Run method is called.
|
||||
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
|
||||
return &Extractor{
|
||||
c: t.c,
|
||||
ExtractConfig: ExtractConfig{
|
||||
Src: t,
|
||||
Dst: dst,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates an extract job.
|
||||
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
|
||||
conf := &bq.JobConfigurationExtract{}
|
||||
job := &bq.Job{Configuration: &bq.JobConfiguration{Extract: conf}}
|
||||
|
||||
setJobRef(job, e.JobID, e.c.projectID)
|
||||
|
||||
conf.DestinationUris = append([]string{}, e.Dst.uris...)
|
||||
conf.Compression = string(e.Dst.Compression)
|
||||
conf.DestinationFormat = string(e.Dst.DestinationFormat)
|
||||
conf.FieldDelimiter = e.Dst.FieldDelimiter
|
||||
|
||||
conf.SourceTable = e.Src.tableRefProto()
|
||||
|
||||
if e.DisableHeader {
|
||||
f := false
|
||||
conf.PrintHeader = &f
|
||||
}
|
||||
|
||||
return e.c.service.insertJob(ctx, e.c.projectID, &insertJobConf{job: job})
|
||||
}
|
102
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
Normal file
102
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultExtractJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
SourceTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
DestinationUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtract(t *testing.T) {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "project-id",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
dst *GCSReference
|
||||
src *Table
|
||||
config ExtractConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
want: defaultExtractJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: ExtractConfig{DisableHeader: true},
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
f := false
|
||||
j.Configuration.Extract.PrintHeader = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.Compression = Gzip
|
||||
g.DestinationFormat = JSON
|
||||
g.FieldDelimiter = "\t"
|
||||
return g
|
||||
}(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
j.Configuration.Extract.Compression = "GZIP"
|
||||
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Extract.FieldDelimiter = "\t"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
ext := tc.src.ExtractorTo(tc.dst)
|
||||
tc.config.Src = ext.Src
|
||||
tc.config.Dst = ext.Dst
|
||||
ext.ExtractConfig = tc.config
|
||||
if _, err := ext.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling extract: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
172
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
Normal file
172
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A ReaderSource is a source for a load operation that gets
|
||||
// data from an io.Reader.
|
||||
type ReaderSource struct {
|
||||
r io.Reader
|
||||
FileConfig
|
||||
}
|
||||
|
||||
// NewReaderSource creates a ReaderSource from an io.Reader. You may
|
||||
// optionally configure properties on the ReaderSource that describe the
|
||||
// data being read, before passing it to Table.LoaderFrom.
|
||||
func NewReaderSource(r io.Reader) *ReaderSource {
|
||||
return &ReaderSource{r: r}
|
||||
}
|
||||
|
||||
func (r *ReaderSource) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.media = r.r
|
||||
r.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
}
|
||||
|
||||
// FileConfig contains configuration options that pertain to files, typically
|
||||
// text files that require interpretation to be used as a BigQuery table. A
|
||||
// file may live in Google Cloud Storage (see GCSReference), or it may be
|
||||
// loaded into a table via the Table.LoaderFromReader.
|
||||
type FileConfig struct {
|
||||
// SourceFormat is the format of the GCS data to be read.
|
||||
// Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when
|
||||
// reading or exporting data. The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
|
||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated
|
||||
// when reading CSV data. Missing values are treated as nulls.
|
||||
AllowJaggedRows bool
|
||||
|
||||
// AllowQuotedNewlines sets whether quoted data sections containing
|
||||
// newlines are allowed when reading CSV data.
|
||||
AllowQuotedNewlines bool
|
||||
|
||||
// Indicates if we should automatically infer the options and
|
||||
// schema for CSV and JSON sources.
|
||||
AutoDetect bool
|
||||
|
||||
// Encoding is the character encoding of data to be read.
|
||||
Encoding Encoding
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be
|
||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values
|
||||
// at the end of a line. For JSON this ignores named values that do not
|
||||
// match any column name. If this field is not set, records containing
|
||||
// unknown values are treated as bad records. The MaxBadRecords field can
|
||||
// be used to customize how bad records are handled.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// Schema describes the data. It is required when reading CSV or JSON data,
|
||||
// unless the data is being loaded into a table that already exists.
|
||||
Schema Schema
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file. The
|
||||
// default quotation character is the double quote ("), which is used if
|
||||
// both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation
|
||||
// character, set ForceZeroQuote to true.
|
||||
// Only used when reading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
}
|
||||
|
||||
// quote returns the CSV quote character, or nil if unset.
|
||||
func (fc *FileConfig) quote() *string {
|
||||
if fc.ForceZeroQuote {
|
||||
quote := ""
|
||||
return "e
|
||||
}
|
||||
if fc.Quote == "" {
|
||||
return nil
|
||||
}
|
||||
return &fc.Quote
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
|
||||
conf.SkipLeadingRows = fc.SkipLeadingRows
|
||||
conf.SourceFormat = string(fc.SourceFormat)
|
||||
conf.Autodetect = fc.AutoDetect
|
||||
conf.AllowJaggedRows = fc.AllowJaggedRows
|
||||
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
|
||||
conf.Encoding = string(fc.Encoding)
|
||||
conf.FieldDelimiter = fc.FieldDelimiter
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
}
|
||||
conf.Quote = fc.quote()
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
|
||||
format := fc.SourceFormat
|
||||
if format == "" {
|
||||
// Format must be explicitly set for external data sources.
|
||||
format = CSV
|
||||
}
|
||||
// TODO(jba): support AutoDetect.
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
conf.SourceFormat = string(format)
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
}
|
||||
if format == CSV {
|
||||
conf.CsvOptions = &bq.CsvOptions{
|
||||
AllowJaggedRows: fc.AllowJaggedRows,
|
||||
AllowQuotedNewlines: fc.AllowQuotedNewlines,
|
||||
Encoding: string(fc.Encoding),
|
||||
FieldDelimiter: fc.FieldDelimiter,
|
||||
SkipLeadingRows: fc.SkipLeadingRows,
|
||||
Quote: fc.quote(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DataFormat describes the format of BigQuery table data.
|
||||
type DataFormat string
|
||||
|
||||
// Constants describing the format of BigQuery table data.
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
)
|
||||
|
||||
// Encoding specifies the character encoding of data to be loaded into BigQuery.
|
||||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
|
||||
// for more details about how this is used.
|
||||
type Encoding string
|
||||
|
||||
const (
|
||||
UTF_8 Encoding = "UTF-8"
|
||||
ISO_8859_1 Encoding = "ISO-8859-1"
|
||||
)
|
90
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
Normal file
90
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestQuote(t *testing.T) {
|
||||
ptr := func(s string) *string { return &s }
|
||||
|
||||
for _, test := range []struct {
|
||||
quote string
|
||||
force bool
|
||||
want *string
|
||||
}{
|
||||
{"", false, nil},
|
||||
{"", true, ptr("")},
|
||||
{"-", false, ptr("-")},
|
||||
{"-", true, ptr("")},
|
||||
} {
|
||||
fc := FileConfig{
|
||||
Quote: test.quote,
|
||||
ForceZeroQuote: test.force,
|
||||
}
|
||||
got := fc.quote()
|
||||
if (got == nil) != (test.want == nil) {
|
||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
if got != nil && test.want != nil && *got != *test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopulateLoadConfig(t *testing.T) {
|
||||
hyphen := "-"
|
||||
fc := FileConfig{
|
||||
SourceFormat: CSV,
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
},
|
||||
Quote: hyphen,
|
||||
}
|
||||
want := &bq.JobConfigurationLoad{
|
||||
SourceFormat: "CSV",
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}},
|
||||
Quote: &hyphen,
|
||||
}
|
||||
got := &bq.JobConfigurationLoad{}
|
||||
fc.populateLoadConfig(got)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
68
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
Normal file
68
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import bq "google.golang.org/api/bigquery/v2"
|
||||
|
||||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
|
||||
// an input or output to a BigQuery operation.
|
||||
type GCSReference struct {
|
||||
// TODO(jba): Export so that GCSReference can be used to hold data from a Job.get api call and expose it to the user.
|
||||
uris []string
|
||||
|
||||
FileConfig
|
||||
|
||||
// DestinationFormat is the format to use when writing exported files.
|
||||
// Allowed values are: CSV, Avro, JSON. The default is CSV.
|
||||
// CSV is not supported for tables with nested or repeated fields.
|
||||
DestinationFormat DataFormat
|
||||
|
||||
// Compression specifies the type of compression to apply when writing data
|
||||
// to Google Cloud Storage, or using this GCSReference as an ExternalData
|
||||
// source with CSV or JSON SourceFormat. Default is None.
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
|
||||
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
|
||||
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
|
||||
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
|
||||
// For more information about the treatment of wildcards and multiple URIs,
|
||||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
|
||||
func NewGCSReference(uri ...string) *GCSReference {
|
||||
return &GCSReference{uris: uri}
|
||||
}
|
||||
|
||||
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
|
||||
type Compression string
|
||||
|
||||
const (
|
||||
None Compression = "NONE"
|
||||
Gzip Compression = "GZIP"
|
||||
)
|
||||
|
||||
func (gcs *GCSReference) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.job.Configuration.Load.SourceUris = gcs.uris
|
||||
gcs.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
}
|
||||
|
||||
func (gcs *GCSReference) externalDataConfig() bq.ExternalDataConfiguration {
|
||||
conf := bq.ExternalDataConfiguration{
|
||||
Compression: string(gcs.Compression),
|
||||
SourceUris: append([]string{}, gcs.uris...),
|
||||
}
|
||||
gcs.FileConfig.populateExternalDataConfig(&conf)
|
||||
return conf
|
||||
}
|
754
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
Normal file
754
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
Normal file
@ -0,0 +1,754 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var (
|
||||
client *Client
|
||||
dataset *Dataset
|
||||
schema = Schema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
{Name: "num", Type: IntegerFieldType},
|
||||
}
|
||||
fiveMinutesFromNow time.Time
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
initIntegrationTest()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func getClient(t *testing.T) *Client {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// If integration tests will be run, create a unique bucket for them.
|
||||
func initIntegrationTest() {
|
||||
flag.Parse() // needed for testing.Short()
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, Scope)
|
||||
if ts == nil {
|
||||
log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
return
|
||||
}
|
||||
projID := testutil.ProjID()
|
||||
var err error
|
||||
client, err = NewClient(ctx, projID, option.WithTokenSource(ts))
|
||||
if err != nil {
|
||||
log.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
dataset = client.Dataset("bigquery_integration_test")
|
||||
if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
log.Fatalf("creating dataset: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Create(t *testing.T) {
|
||||
// Check that creating a record field with an empty schema is an error.
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
table := dataset.Table("t_bad")
|
||||
schema := Schema{
|
||||
{Name: "rec", Type: RecordFieldType, Schema: Schema{}},
|
||||
}
|
||||
err := table.Create(context.Background(), schema, TableExpiration(time.Now().Add(5*time.Minute)))
|
||||
if err == nil {
|
||||
t.Fatal("want error, got nil")
|
||||
}
|
||||
if !hasStatusCode(err, http.StatusBadRequest) {
|
||||
t.Fatalf("want a 400 error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_CreateView(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Test that standard SQL views work.
|
||||
view := dataset.Table("t_view_standardsql")
|
||||
query := ViewQuery(fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`", dataset.ProjectID, dataset.DatasetID, table.TableID))
|
||||
err := view.Create(context.Background(), UseStandardSQL(), query)
|
||||
if err != nil {
|
||||
t.Fatalf("table.create: Did not expect an error, got: %v", err)
|
||||
}
|
||||
view.Delete(ctx)
|
||||
}
|
||||
|
||||
func TestIntegration_TableMetadata(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
// Check table metadata.
|
||||
md, err := table.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// TODO(jba): check md more thorougly.
|
||||
if got, want := md.ID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want {
|
||||
t.Errorf("metadata.ID: got %q, want %q", got, want)
|
||||
}
|
||||
if got, want := md.Type, RegularTable; got != want {
|
||||
t.Errorf("metadata.Type: got %v, want %v", got, want)
|
||||
}
|
||||
if got, want := md.ExpirationTime, fiveMinutesFromNow; !got.Equal(want) {
|
||||
t.Errorf("metadata.Type: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
// Check that timePartitioning is nil by default
|
||||
if md.TimePartitioning != nil {
|
||||
t.Errorf("metadata.TimePartitioning: got %v, want %v", md.TimePartitioning, nil)
|
||||
}
|
||||
|
||||
// Create tables that have time partitioning
|
||||
partitionCases := []struct {
|
||||
timePartitioning TimePartitioning
|
||||
expectedExpiration time.Duration
|
||||
}{
|
||||
{TimePartitioning{}, time.Duration(0)},
|
||||
{TimePartitioning{time.Second}, time.Second},
|
||||
}
|
||||
for i, c := range partitionCases {
|
||||
table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
|
||||
err = table.Create(context.Background(), schema, c.timePartitioning, TableExpiration(time.Now().Add(5*time.Minute)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer table.Delete(ctx)
|
||||
md, err = table.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got := md.TimePartitioning
|
||||
want := &TimePartitioning{c.expectedExpiration}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetMetadata(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
md, err := dataset.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := md.ID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want {
|
||||
t.Errorf("ID: got %q, want %q", got, want)
|
||||
}
|
||||
jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
if md.CreationTime.Before(jan2016) {
|
||||
t.Errorf("CreationTime: got %s, want > 2016-1-1", md.CreationTime)
|
||||
}
|
||||
if md.LastModifiedTime.Before(jan2016) {
|
||||
t.Errorf("LastModifiedTime: got %s, want > 2016-1-1", md.LastModifiedTime)
|
||||
}
|
||||
|
||||
// Verify that we get a NotFound for a nonexistent dataset.
|
||||
_, err = client.Dataset("does_not_exist").Metadata(ctx)
|
||||
if err == nil || !hasStatusCode(err, http.StatusNotFound) {
|
||||
t.Errorf("got %v, want NotFound error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetDelete(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ds := client.Dataset("delete_test")
|
||||
if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
t.Fatalf("creating dataset %s: %v", ds, err)
|
||||
}
|
||||
if err := ds.Delete(ctx); err != nil {
|
||||
t.Fatalf("deleting dataset %s: %v", ds, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Tables(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Iterate over tables in the dataset.
|
||||
it := dataset.Tables(ctx)
|
||||
var tables []*Table
|
||||
for {
|
||||
tbl, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tables = append(tables, tbl)
|
||||
}
|
||||
// Other tests may be running with this dataset, so there might be more
|
||||
// than just our table in the list. So don't try for an exact match; just
|
||||
// make sure that our table is there somewhere.
|
||||
found := false
|
||||
for _, tbl := range tables {
|
||||
if reflect.DeepEqual(tbl, table) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Tables: got %v\nshould see %v in the list", pretty.Value(tables), pretty.Value(table))
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_UploadAndRead(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Populate the table.
|
||||
upl := table.Uploader()
|
||||
var (
|
||||
wantRows [][]Value
|
||||
saverRows []*ValuesSaver
|
||||
)
|
||||
for i, name := range []string{"a", "b", "c"} {
|
||||
row := []Value{name, int64(i)}
|
||||
wantRows = append(wantRows, row)
|
||||
saverRows = append(saverRows, &ValuesSaver{
|
||||
Schema: schema,
|
||||
InsertID: name,
|
||||
Row: row,
|
||||
})
|
||||
}
|
||||
if err := upl.Put(ctx, saverRows); err != nil {
|
||||
t.Fatal(putError(err))
|
||||
}
|
||||
|
||||
// Wait until the data has been uploaded. This can take a few seconds, according
|
||||
// to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
|
||||
if err := waitForRow(ctx, table); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read the table.
|
||||
checkRead(t, "upload", table.Read(ctx), wantRows)
|
||||
|
||||
// Query the table.
|
||||
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID))
|
||||
q.DefaultProjectID = dataset.ProjectID
|
||||
q.DefaultDatasetID = dataset.DatasetID
|
||||
|
||||
rit, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "query", rit, wantRows)
|
||||
|
||||
// Query the long way.
|
||||
job1, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
job2, err := client.JobFromID(ctx, job1.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rit, err = job2.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "job.Read", rit, wantRows)
|
||||
|
||||
// Test reading directly into a []Value.
|
||||
valueLists, err := readAll(table.Read(ctx))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
it := table.Read(ctx)
|
||||
for i, vl := range valueLists {
|
||||
var got []Value
|
||||
if err := it.Next(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []Value(vl)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%d: got %v, want %v", i, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Test reading into a map.
|
||||
it = table.Read(ctx)
|
||||
for _, vl := range valueLists {
|
||||
var vm map[string]Value
|
||||
if err := it.Next(&vm); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := len(vm), len(vl); got != want {
|
||||
t.Fatalf("valueMap len: got %d, want %d", got, want)
|
||||
}
|
||||
for i, v := range vl {
|
||||
if got, want := vm[schema[i].Name], v; got != want {
|
||||
t.Errorf("%d, name=%s: got %v, want %v",
|
||||
i, schema[i].Name, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type TestStruct struct {
|
||||
Name string
|
||||
Nums []int
|
||||
Sub Sub
|
||||
Subs []*Sub
|
||||
}
|
||||
|
||||
type Sub struct {
|
||||
B bool
|
||||
SubSub SubSub
|
||||
SubSubs []*SubSub
|
||||
}
|
||||
|
||||
type SubSub struct{ Count int }
|
||||
|
||||
func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
schema, err := InferSchema(TestStruct{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Populate the table.
|
||||
upl := table.Uploader()
|
||||
want := []*TestStruct{
|
||||
{Name: "a", Nums: []int{1, 2}, Sub: Sub{B: true}, Subs: []*Sub{{B: false}, {B: true}}},
|
||||
{Name: "b", Nums: []int{1}, Subs: []*Sub{{B: false}, {B: false}, {B: true}}},
|
||||
{Name: "c", Sub: Sub{B: true}},
|
||||
{
|
||||
Name: "d",
|
||||
Sub: Sub{SubSub: SubSub{12}, SubSubs: []*SubSub{{1}, {2}, {3}}},
|
||||
Subs: []*Sub{{B: false, SubSub: SubSub{4}}, {B: true, SubSubs: []*SubSub{{5}, {6}}}},
|
||||
},
|
||||
}
|
||||
var savers []*StructSaver
|
||||
for _, s := range want {
|
||||
savers = append(savers, &StructSaver{Schema: schema, Struct: s})
|
||||
}
|
||||
if err := upl.Put(ctx, savers); err != nil {
|
||||
t.Fatal(putError(err))
|
||||
}
|
||||
|
||||
// Wait until the data has been uploaded. This can take a few seconds, according
|
||||
// to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
|
||||
if err := waitForRow(ctx, table); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Test iteration with structs.
|
||||
it := table.Read(ctx)
|
||||
var got []*TestStruct
|
||||
for {
|
||||
var g TestStruct
|
||||
err := it.Next(&g)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got = append(got, &g)
|
||||
}
|
||||
sort.Sort(byName(got))
|
||||
|
||||
// BigQuery does not elide nils. It reports an error for nil fields.
|
||||
for i, g := range got {
|
||||
if i >= len(want) {
|
||||
t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
|
||||
} else if w := want[i]; !reflect.DeepEqual(g, w) {
|
||||
t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type byName []*TestStruct
|
||||
|
||||
func (b byName) Len() int { return len(b) }
|
||||
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
func TestIntegration_Update(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Test Update of non-schema fields.
|
||||
tm, err := table.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantDescription := tm.Description + "more"
|
||||
wantName := tm.Name + "more"
|
||||
got, err := table.Update(ctx, TableMetadataToUpdate{
|
||||
Description: wantDescription,
|
||||
Name: wantName,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got.Description != wantDescription {
|
||||
t.Errorf("Description: got %q, want %q", got.Description, wantDescription)
|
||||
}
|
||||
if got.Name != wantName {
|
||||
t.Errorf("Name: got %q, want %q", got.Name, wantName)
|
||||
}
|
||||
if !reflect.DeepEqual(got.Schema, schema) {
|
||||
t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema))
|
||||
}
|
||||
|
||||
// Test schema update.
|
||||
// Columns can be added. schema2 is the same as schema, except for the
|
||||
// added column in the middle.
|
||||
nested := Schema{
|
||||
{Name: "nested", Type: BooleanFieldType},
|
||||
{Name: "other", Type: StringFieldType},
|
||||
}
|
||||
schema2 := Schema{
|
||||
schema[0],
|
||||
{Name: "rec", Type: RecordFieldType, Schema: nested},
|
||||
schema[1],
|
||||
}
|
||||
|
||||
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wherever you add the column, it appears at the end.
|
||||
schema3 := Schema{schema2[0], schema2[2], schema2[1]}
|
||||
if !reflect.DeepEqual(got.Schema, schema3) {
|
||||
t.Errorf("add field:\ngot %v\nwant %v",
|
||||
pretty.Value(got.Schema), pretty.Value(schema3))
|
||||
}
|
||||
|
||||
// Updating with the empty schema succeeds, but is a no-op.
|
||||
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got.Schema, schema3) {
|
||||
t.Errorf("empty schema:\ngot %v\nwant %v",
|
||||
pretty.Value(got.Schema), pretty.Value(schema3))
|
||||
}
|
||||
|
||||
// Error cases.
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
fields []*FieldSchema
|
||||
}{
|
||||
{"change from optional to required", []*FieldSchema{
|
||||
schema3[0],
|
||||
{Name: "num", Type: IntegerFieldType, Required: true},
|
||||
schema3[2],
|
||||
}},
|
||||
{"add a required field", []*FieldSchema{
|
||||
schema3[0], schema3[1], schema3[2],
|
||||
{Name: "req", Type: StringFieldType, Required: true},
|
||||
}},
|
||||
{"remove a field", []*FieldSchema{schema3[0], schema3[1]}},
|
||||
{"remove a nested field", []*FieldSchema{
|
||||
schema3[0], schema3[1],
|
||||
{Name: "rec", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
|
||||
{"remove all nested fields", []*FieldSchema{
|
||||
schema3[0], schema3[1],
|
||||
{Name: "rec", Type: RecordFieldType, Schema: Schema{}}}},
|
||||
} {
|
||||
for {
|
||||
_, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)})
|
||||
if !hasStatusCode(err, 403) {
|
||||
break
|
||||
}
|
||||
// We've hit the rate limit for updates. Wait a bit and retry.
|
||||
t.Logf("%s: retrying after getting %v", test.desc, err)
|
||||
time.Sleep(4 * time.Second)
|
||||
}
|
||||
if err == nil {
|
||||
t.Errorf("%s: want error, got nil", test.desc)
|
||||
} else if !hasStatusCode(err, 400) {
|
||||
t.Errorf("%s: want 400, got %v", test.desc, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Load(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Load the table from a reader.
|
||||
r := strings.NewReader("a,0\nb,1\nc,2\n")
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", int64(0)},
|
||||
[]Value{"b", int64(1)},
|
||||
[]Value{"c", int64(2)},
|
||||
}
|
||||
rs := NewReaderSource(r)
|
||||
loader := table.LoaderFrom(rs)
|
||||
loader.WriteDisposition = WriteTruncate
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "reader load", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
func TestIntegration_DML(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Use DML to insert.
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", int64(0)},
|
||||
[]Value{"b", int64(1)},
|
||||
[]Value{"c", int64(2)},
|
||||
}
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+
|
||||
"VALUES ('a', 0), ('b', 1), ('c', 2)",
|
||||
table.TableID)
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "INSERT", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
func TestIntegration_TimeTypes(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
dtSchema := Schema{
|
||||
{Name: "d", Type: DateFieldType},
|
||||
{Name: "t", Type: TimeFieldType},
|
||||
{Name: "dt", Type: DateTimeFieldType},
|
||||
{Name: "ts", Type: TimestampFieldType},
|
||||
}
|
||||
table := newTable(t, dtSchema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{12, 30, 0, 0}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
wantRows := [][]Value{
|
||||
[]Value{d, tm, civil.DateTime{d, tm}, ts},
|
||||
}
|
||||
upl := table.Uploader()
|
||||
if err := upl.Put(ctx, []*ValuesSaver{
|
||||
{Schema: dtSchema, Row: wantRows[0]},
|
||||
}); err != nil {
|
||||
t.Fatal(putError(err))
|
||||
}
|
||||
if err := waitForRow(ctx, table); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// SQL wants DATETIMEs with a space between date and time, but the service
|
||||
// returns them in RFC3339 form, with a "T" between.
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt, ts) "+
|
||||
"VALUES ('%s', '%s', '%s %s', '%s')",
|
||||
table.TableID, d, tm, d, tm, ts.Format("2006-01-02 15:04:05"))
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantRows = append(wantRows, wantRows[0])
|
||||
checkRead(t, "TimeTypes", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
// Creates a new, temporary table with a unique name and the given schema.
|
||||
func newTable(t *testing.T, s Schema) *Table {
|
||||
fiveMinutesFromNow = time.Now().Add(5 * time.Minute).Round(time.Second)
|
||||
name := fmt.Sprintf("t%d", time.Now().UnixNano())
|
||||
table := dataset.Table(name)
|
||||
err := table.Create(context.Background(), s, TableExpiration(fiveMinutesFromNow))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return table
|
||||
}
|
||||
|
||||
func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) {
|
||||
got, err := readAll(it)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
if len(got) != len(want) {
|
||||
t.Errorf("%s: got %d rows, want %d", msg, len(got), len(want))
|
||||
}
|
||||
sort.Sort(byCol0(got))
|
||||
for i, r := range got {
|
||||
gotRow := []Value(r)
|
||||
wantRow := want[i]
|
||||
if !reflect.DeepEqual(gotRow, wantRow) {
|
||||
t.Errorf("%s #%d: got %v, want %v", msg, i, gotRow, wantRow)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readAll(it *RowIterator) ([][]Value, error) {
|
||||
var rows [][]Value
|
||||
for {
|
||||
var vals []Value
|
||||
err := it.Next(&vals)
|
||||
if err == iterator.Done {
|
||||
return rows, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, vals)
|
||||
}
|
||||
}
|
||||
|
||||
type byCol0 [][]Value
|
||||
|
||||
func (b byCol0) Len() int { return len(b) }
|
||||
func (b byCol0) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byCol0) Less(i, j int) bool {
|
||||
switch a := b[i][0].(type) {
|
||||
case string:
|
||||
return a < b[j][0].(string)
|
||||
case civil.Date:
|
||||
return a.Before(b[j][0].(civil.Date))
|
||||
default:
|
||||
panic("unknown type")
|
||||
}
|
||||
}
|
||||
|
||||
func hasStatusCode(err error, code int) bool {
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == code {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// wait polls the job until it is complete or an error is returned.
|
||||
func wait(ctx context.Context, job *Job) error {
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting job status: %v", err)
|
||||
}
|
||||
if status.Err() != nil {
|
||||
return fmt.Errorf("job status error: %#v", status.Err())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForRow polls the table until it contains a row.
|
||||
// TODO(jba): use internal.Retry.
|
||||
func waitForRow(ctx context.Context, table *Table) error {
|
||||
for {
|
||||
it := table.Read(ctx)
|
||||
var v []Value
|
||||
err := it.Next(&v)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if err != iterator.Done {
|
||||
return err
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func putError(err error) string {
|
||||
pme, ok := err.(PutMultiError)
|
||||
if !ok {
|
||||
return err.Error()
|
||||
}
|
||||
var msgs []string
|
||||
for _, err := range pme {
|
||||
msgs = append(msgs, err.Error())
|
||||
}
|
||||
return strings.Join(msgs, "\n")
|
||||
}
|
158
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
Normal file
158
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// A pageFetcher returns a page of rows, starting from the row specified by token.
|
||||
type pageFetcher interface {
|
||||
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
|
||||
setPaging(*pagingConf)
|
||||
}
|
||||
|
||||
func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator {
|
||||
it := &RowIterator{
|
||||
ctx: ctx,
|
||||
service: s,
|
||||
pf: pf,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.rows) },
|
||||
func() interface{} { r := it.rows; it.rows = nil; return r })
|
||||
return it
|
||||
}
|
||||
|
||||
// A RowIterator provides access to the result of a BigQuery lookup.
|
||||
type RowIterator struct {
|
||||
ctx context.Context
|
||||
service service
|
||||
pf pageFetcher
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// StartIndex can be set before the first call to Next. If PageInfo().Token
|
||||
// is also set, StartIndex is ignored.
|
||||
StartIndex uint64
|
||||
|
||||
rows [][]Value
|
||||
|
||||
schema Schema // populated on first call to fetch
|
||||
structLoader structLoader // used to populate a pointer to a struct
|
||||
}
|
||||
|
||||
// Next loads the next row into dst. Its return value is iterator.Done if there
|
||||
// are no more results. Once Next returns iterator.Done, all subsequent calls
|
||||
// will return iterator.Done.
|
||||
//
|
||||
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
|
||||
//
|
||||
// If dst is a *[]Value, it will be set to to new []Value whose i'th element
|
||||
// will be populated with the i'th column of the row.
|
||||
//
|
||||
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
|
||||
// for each schema column name, the map key of that name will be set to the column's
|
||||
// value.
|
||||
//
|
||||
// If dst is pointer to a struct, each column in the schema will be matched
|
||||
// with an exported field of the struct that has the same name, ignoring case.
|
||||
// Unmatched schema columns and struct fields will be ignored.
|
||||
//
|
||||
// Each BigQuery column type corresponds to one or more Go types; a matching struct
|
||||
// field must be of the correct type. The correspondences are:
|
||||
//
|
||||
// STRING string
|
||||
// BOOL bool
|
||||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
|
||||
// FLOAT float32, float64
|
||||
// BYTES []byte
|
||||
// TIMESTAMP time.Time
|
||||
// DATE civil.Date
|
||||
// TIME civil.Time
|
||||
// DATETIME civil.DateTime
|
||||
//
|
||||
// A repeated field corresponds to a slice or array of the element type.
|
||||
// A RECORD type (nested schema) corresponds to a nested struct or struct pointer.
|
||||
// All calls to Next on the same iterator must use the same struct type.
|
||||
func (it *RowIterator) Next(dst interface{}) error {
|
||||
var vl ValueLoader
|
||||
switch dst := dst.(type) {
|
||||
case ValueLoader:
|
||||
vl = dst
|
||||
case *[]Value:
|
||||
vl = (*valueList)(dst)
|
||||
case *map[string]Value:
|
||||
vl = (*valueMap)(dst)
|
||||
default:
|
||||
if !isStructPtr(dst) {
|
||||
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
|
||||
}
|
||||
}
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return err
|
||||
}
|
||||
row := it.rows[0]
|
||||
it.rows = it.rows[1:]
|
||||
|
||||
if vl == nil {
|
||||
// This can only happen if dst is a pointer to a struct. We couldn't
|
||||
// set vl above because we need the schema.
|
||||
if err := it.structLoader.set(dst, it.schema); err != nil {
|
||||
return err
|
||||
}
|
||||
vl = &it.structLoader
|
||||
}
|
||||
return vl.Load(row, it.schema)
|
||||
}
|
||||
|
||||
func isStructPtr(x interface{}) bool {
|
||||
t := reflect.TypeOf(x)
|
||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
pc := &pagingConf{}
|
||||
if pageSize > 0 {
|
||||
pc.recordsPerRequest = int64(pageSize)
|
||||
pc.setRecordsPerRequest = true
|
||||
}
|
||||
if pageToken == "" {
|
||||
pc.startIndex = it.StartIndex
|
||||
}
|
||||
it.pf.setPaging(pc)
|
||||
var res *readDataResult
|
||||
var err error
|
||||
for {
|
||||
res, err = it.pf.fetch(it.ctx, it.service, pageToken)
|
||||
if err != errIncompleteJob {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.rows = append(it.rows, res.rows...)
|
||||
it.schema = res.schema
|
||||
return res.pageToken, nil
|
||||
}
|
413
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
Normal file
413
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
Normal file
@ -0,0 +1,413 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type fetchResponse struct {
|
||||
result *readDataResult // The result to return.
|
||||
err error // The error to return.
|
||||
}
|
||||
|
||||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
|
||||
type pageFetcherStub struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
call, ok := pf.fetchResponses[token]
|
||||
if !ok {
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", token)
|
||||
}
|
||||
return call.result, call.err
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) setPaging(pc *pagingConf) {}
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
var (
|
||||
iiSchema = Schema{
|
||||
{Type: IntegerFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
}
|
||||
siSchema = Schema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
}
|
||||
)
|
||||
fetchFailure := errors.New("fetch failure")
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
pageToken string
|
||||
fetchResponses map[string]fetchResponse
|
||||
want [][]Value
|
||||
wantErr error
|
||||
wantSchema Schema
|
||||
}{
|
||||
{
|
||||
desc: "Iteration over single empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
schema: Schema{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{},
|
||||
wantSchema: Schema{},
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page with different schema",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{"1", 2}, {"11", 12}},
|
||||
schema: siSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{"1", 2}, {"11", 12}},
|
||||
wantSchema: siSchema,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over two pages",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Server response includes empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Fetch error",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
// We returns some data from this fetch, but also an error.
|
||||
// So the end result should include only data from the previous fetch.
|
||||
err: fetchFailure,
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantErr: fetchFailure,
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "Skip over an entire page",
|
||||
pageToken: "a",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "Skip beyond all data",
|
||||
pageToken: "b",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{},
|
||||
},
|
||||
},
|
||||
// In this test case, Next will return false on its first call,
|
||||
// so we won't even attempt to call Get.
|
||||
want: [][]Value{},
|
||||
wantSchema: Schema{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it.PageInfo().Token = tc.pageToken
|
||||
values, schema, err := consumeRowIterator(it)
|
||||
if err != tc.wantErr {
|
||||
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
|
||||
}
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
|
||||
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
|
||||
}
|
||||
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) {
|
||||
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type valueListWithSchema struct {
|
||||
vals valueList
|
||||
schema Schema
|
||||
}
|
||||
|
||||
func (v *valueListWithSchema) Load(vs []Value, s Schema) error {
|
||||
v.vals.Load(vs, s)
|
||||
v.schema = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeRowIterator reads the schema and all values from a RowIterator and returns them.
|
||||
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) {
|
||||
var got [][]Value
|
||||
var schema Schema
|
||||
for {
|
||||
var vls valueListWithSchema
|
||||
err := it.Next(&vls)
|
||||
if err == iterator.Done {
|
||||
return got, schema, nil
|
||||
}
|
||||
if err != nil {
|
||||
return got, schema, err
|
||||
}
|
||||
got = append(got, vls.vals)
|
||||
schema = vls.schema
|
||||
}
|
||||
}
|
||||
|
||||
type delayedPageFetcher struct {
|
||||
pageFetcherStub
|
||||
delayCount int
|
||||
}
|
||||
|
||||
func (pf *delayedPageFetcher) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
if pf.delayCount > 0 {
|
||||
pf.delayCount--
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
return pf.pageFetcherStub.fetch(ctx, s, token)
|
||||
}
|
||||
|
||||
func TestIterateIncompleteJob(t *testing.T) {
|
||||
want := [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}
|
||||
pf := pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
dpf := &delayedPageFetcher{
|
||||
pageFetcherStub: pf,
|
||||
delayCount: 1,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, dpf)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, want)
|
||||
}
|
||||
if dpf.delayCount != 0 {
|
||||
t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextDuringErrorState(t *testing.T) {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {err: errors.New("bang")},
|
||||
},
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error after calling Next")
|
||||
}
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error calling Next again when iterator has a non-nil error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextAfterFinished(t *testing.T) {
|
||||
testCases := []struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
want [][]Value
|
||||
}{
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
|
||||
}
|
||||
// Try calling Get again.
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != iterator.Done {
|
||||
t.Errorf("Expected Done calling Next when there are no more values")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIteratorNextTypes(t *testing.T) {
|
||||
it := newRowIterator(context.Background(), nil, nil)
|
||||
for _, v := range []interface{}{3, "s", []int{}, &[]int{},
|
||||
map[string]Value{}, &map[string]interface{}{},
|
||||
struct{}{},
|
||||
} {
|
||||
if err := it.Next(v); err == nil {
|
||||
t.Error("%v: want error, got nil", v)
|
||||
}
|
||||
}
|
||||
}
|
133
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
Normal file
133
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Job represents an operation which has been submitted to BigQuery for processing.
|
||||
type Job struct {
|
||||
service service
|
||||
projectID string
|
||||
jobID string
|
||||
|
||||
isQuery bool
|
||||
}
|
||||
|
||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package. For example, the job may have
|
||||
// been created in the BigQuery console.
|
||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||
jobType, err := c.service.getJobType(ctx, c.projectID, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Job{
|
||||
service: c.service,
|
||||
projectID: c.projectID,
|
||||
jobID: id,
|
||||
isQuery: jobType == queryJobType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (j *Job) ID() string {
|
||||
return j.jobID
|
||||
}
|
||||
|
||||
// State is one of a sequence of states that a Job progresses through as it is processed.
|
||||
type State int
|
||||
|
||||
const (
|
||||
Pending State = iota
|
||||
Running
|
||||
Done
|
||||
)
|
||||
|
||||
// JobStatus contains the current State of a job, and errors encountered while processing that job.
|
||||
type JobStatus struct {
|
||||
State State
|
||||
|
||||
err error
|
||||
|
||||
// All errors encountered during the running of the job.
|
||||
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
|
||||
Errors []*Error
|
||||
}
|
||||
|
||||
// setJobRef initializes job's JobReference if given a non-empty jobID.
|
||||
// projectID must be non-empty.
|
||||
func setJobRef(job *bq.Job, jobID, projectID string) {
|
||||
if jobID == "" {
|
||||
return
|
||||
}
|
||||
// We don't check whether projectID is empty; the server will return an
|
||||
// error when it encounters the resulting JobReference.
|
||||
|
||||
job.JobReference = &bq.JobReference{
|
||||
JobId: jobID,
|
||||
ProjectId: projectID,
|
||||
}
|
||||
}
|
||||
|
||||
// Done reports whether the job has completed.
|
||||
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
|
||||
func (s *JobStatus) Done() bool {
|
||||
return s.State == Done
|
||||
}
|
||||
|
||||
// Err returns the error that caused the job to complete unsuccesfully (if any).
|
||||
func (s *JobStatus) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Status returns the current status of the job. It fails if the Status could not be determined.
|
||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||
return j.service.jobStatus(ctx, j.projectID, j.jobID)
|
||||
}
|
||||
|
||||
// Cancel requests that a job be cancelled. This method returns without waiting for
|
||||
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
|
||||
// Cancelled jobs may still incur costs.
|
||||
func (j *Job) Cancel(ctx context.Context) error {
|
||||
return j.service.jobCancel(ctx, j.projectID, j.jobID)
|
||||
}
|
||||
|
||||
// Wait blocks until the job or th context is done. It returns the final status
|
||||
// of the job.
|
||||
// If an error occurs while retrieving the status, Wait returns that error. But
|
||||
// Wait returns nil if the status was retrieved successfully, even if
|
||||
// status.Err() != nil. So callers must check both errors. See the example.
|
||||
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
||||
var js *JobStatus
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
js, err = j.Status(ctx)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if js.Done() {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return js, nil
|
||||
}
|
86
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
Normal file
86
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// LoadConfig holds the configuration for a load job.
|
||||
type LoadConfig struct {
|
||||
// JobID is the ID to use for the load job. If unset, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Src is the source from which data will be loaded.
|
||||
Src LoadSource
|
||||
|
||||
// Dst is the table into which the data will be loaded.
|
||||
Dst *Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
}
|
||||
|
||||
// A Loader loads data from Google Cloud Storage into a BigQuery table.
|
||||
type Loader struct {
|
||||
LoadConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// A LoadSource represents a source of data that can be loaded into
|
||||
// a BigQuery table.
|
||||
//
|
||||
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
|
||||
// objects, and ReaderSource, for data read from an io.Reader.
|
||||
type LoadSource interface {
|
||||
populateInsertJobConfForLoad(conf *insertJobConf)
|
||||
}
|
||||
|
||||
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
|
||||
// The returned Loader may optionally be further configured before its Run method is called.
|
||||
func (t *Table) LoaderFrom(src LoadSource) *Loader {
|
||||
return &Loader{
|
||||
c: t.c,
|
||||
LoadConfig: LoadConfig{
|
||||
Src: src,
|
||||
Dst: t,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a load job.
|
||||
func (l *Loader) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
},
|
||||
},
|
||||
}
|
||||
conf := &insertJobConf{job: job}
|
||||
l.Src.populateInsertJobConfForLoad(conf)
|
||||
setJobRef(job, l.JobID, l.c.projectID)
|
||||
|
||||
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
|
||||
|
||||
return l.c.service.insertJob(ctx, l.c.projectID, conf)
|
||||
}
|
229
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
Normal file
229
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultLoadJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
SourceUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stringFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{Name: "fieldname", Type: StringFieldType}
|
||||
}
|
||||
|
||||
func nestedFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: "nested",
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{stringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func bqStringFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "fieldname",
|
||||
Type: "STRING",
|
||||
}
|
||||
}
|
||||
|
||||
func bqNestedFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "nested",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
c := &Client{projectID: "project-id"}
|
||||
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src LoadSource
|
||||
config LoadConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
want: defaultLoadJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: LoadConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
JobID: "ajob",
|
||||
},
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "project-id",
|
||||
}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.MaxBadRecords = 1
|
||||
g.AllowJaggedRows = true
|
||||
g.AllowQuotedNewlines = true
|
||||
g.IgnoreUnknownValues = true
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.MaxBadRecords = 1
|
||||
j.Configuration.Load.AllowJaggedRows = true
|
||||
j.Configuration.Load.AllowQuotedNewlines = true
|
||||
j.Configuration.Load.IgnoreUnknownValues = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.Schema = Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
}
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.Schema = &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.SkipLeadingRows = 1
|
||||
g.SourceFormat = JSON
|
||||
g.Encoding = UTF_8
|
||||
g.FieldDelimiter = "\t"
|
||||
g.Quote = "-"
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
// Quote is left unset in GCSReference, so should be nil here.
|
||||
j.Configuration.Load.Quote = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.ForceZeroQuote = true
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
empty := ""
|
||||
j.Configuration.Load.Quote = &empty
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *ReaderSource {
|
||||
r := NewReaderSource(strings.NewReader("foo"))
|
||||
r.SkipLeadingRows = 1
|
||||
r.SourceFormat = JSON
|
||||
r.Encoding = UTF_8
|
||||
r.FieldDelimiter = "\t"
|
||||
r.Quote = "-"
|
||||
return r
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SourceUris = nil
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
loader := tc.dst.LoaderFrom(tc.src)
|
||||
tc.config.Src = tc.src
|
||||
tc.config.Dst = tc.dst
|
||||
loader.LoadConfig = tc.config
|
||||
if _, err := loader.Run(context.Background()); err != nil {
|
||||
t.Errorf("%d: err calling Loader.Run: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("loading %d: got:\n%v\nwant:\n%v",
|
||||
i, pretty.Value(s.Job), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
265
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
Normal file
265
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
Normal file
@ -0,0 +1,265 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/fields"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
|
||||
timestampFormat = "2006-01-02 15:04:05.999999-07:00"
|
||||
|
||||
// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
|
||||
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
|
||||
)
|
||||
|
||||
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
|
||||
if s := t.Get("bigquery"); s != "" {
|
||||
if s == "-" {
|
||||
return "", false, nil, nil
|
||||
}
|
||||
if !validFieldName.MatchString(s) {
|
||||
return "", false, nil, errInvalidFieldName
|
||||
}
|
||||
return s, true, nil, nil
|
||||
}
|
||||
return "", true, nil, nil
|
||||
}
|
||||
|
||||
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
|
||||
|
||||
var (
|
||||
int64ParamType = &bq.QueryParameterType{Type: "INT64"}
|
||||
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"}
|
||||
boolParamType = &bq.QueryParameterType{Type: "BOOL"}
|
||||
stringParamType = &bq.QueryParameterType{Type: "STRING"}
|
||||
bytesParamType = &bq.QueryParameterType{Type: "BYTES"}
|
||||
dateParamType = &bq.QueryParameterType{Type: "DATE"}
|
||||
timeParamType = &bq.QueryParameterType{Type: "TIME"}
|
||||
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"}
|
||||
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
|
||||
)
|
||||
|
||||
var (
|
||||
typeOfDate = reflect.TypeOf(civil.Date{})
|
||||
typeOfTime = reflect.TypeOf(civil.Time{})
|
||||
typeOfDateTime = reflect.TypeOf(civil.DateTime{})
|
||||
typeOfGoTime = reflect.TypeOf(time.Time{})
|
||||
)
|
||||
|
||||
// A QueryParameter is a parameter to a query.
|
||||
type QueryParameter struct {
|
||||
// Name is used for named parameter mode.
|
||||
// It must match the name in the query case-insensitively.
|
||||
Name string
|
||||
|
||||
// Value is the value of the parameter.
|
||||
// The following Go types are supported, with their corresponding
|
||||
// Bigquery types:
|
||||
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
|
||||
// Note that uint, uint64 and uintptr are not supported, because
|
||||
// they may contain values that cannot fit into a 64-bit signed integer.
|
||||
// float32, float64: FLOAT64
|
||||
// bool: BOOL
|
||||
// string: STRING
|
||||
// []byte: BYTES
|
||||
// time.Time: TIMESTAMP
|
||||
// Arrays and slices of the above.
|
||||
// Structs of the above. Only the exported fields are used.
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (p QueryParameter) toRaw() (*bq.QueryParameter, error) {
|
||||
pv, err := paramValue(reflect.ValueOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pt, err := paramType(reflect.TypeOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.QueryParameter{
|
||||
Name: p.Name,
|
||||
ParameterValue: &pv,
|
||||
ParameterType: pt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
|
||||
if t == nil {
|
||||
return nil, errors.New("bigquery: nil parameter")
|
||||
}
|
||||
switch t {
|
||||
case typeOfDate:
|
||||
return dateParamType, nil
|
||||
case typeOfTime:
|
||||
return timeParamType, nil
|
||||
case typeOfDateTime:
|
||||
return dateTimeParamType, nil
|
||||
case typeOfGoTime:
|
||||
return timestampParamType, nil
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return int64ParamType, nil
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return float64ParamType, nil
|
||||
|
||||
case reflect.Bool:
|
||||
return boolParamType, nil
|
||||
|
||||
case reflect.String:
|
||||
return stringParamType, nil
|
||||
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return bytesParamType, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
et, err := paramType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
break
|
||||
}
|
||||
t = t.Elem()
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
var fts []*bq.QueryParameterTypeStructTypes
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range fields {
|
||||
pt, err := paramType(f.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fts = append(fts, &bq.QueryParameterTypeStructTypes{
|
||||
Name: f.Name,
|
||||
Type: pt,
|
||||
})
|
||||
}
|
||||
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
|
||||
}
|
||||
|
||||
func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
||||
var res bq.QueryParameterValue
|
||||
if !v.IsValid() {
|
||||
return res, errors.New("bigquery: nil parameter")
|
||||
}
|
||||
t := v.Type()
|
||||
switch t {
|
||||
case typeOfDate:
|
||||
res.Value = v.Interface().(civil.Date).String()
|
||||
return res, nil
|
||||
|
||||
case typeOfTime:
|
||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
||||
res.Value = civilTimeParamString(v.Interface().(civil.Time))
|
||||
return res, nil
|
||||
|
||||
case typeOfDateTime:
|
||||
dt := v.Interface().(civil.DateTime)
|
||||
res.Value = dt.Date.String() + " " + civilTimeParamString(dt.Time)
|
||||
return res, nil
|
||||
|
||||
case typeOfGoTime:
|
||||
res.Value = v.Interface().(time.Time).Format(timestampFormat)
|
||||
return res, nil
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
|
||||
return res, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
var vals []*bq.QueryParameterValue
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := paramValue(v.Index(i))
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
vals = append(vals, &val)
|
||||
}
|
||||
return bq.QueryParameterValue{ArrayValues: vals}, nil
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
|
||||
}
|
||||
t = t.Elem()
|
||||
v = v.Elem()
|
||||
if !v.IsValid() {
|
||||
// nil pointer becomes empty value
|
||||
return res, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
res.StructValues = map[string]bq.QueryParameterValue{}
|
||||
for _, f := range fields {
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
fp, err := paramValue(fv)
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
res.StructValues[f.Name] = fp
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
// None of the above: assume a scalar type. (If it's not a valid type,
|
||||
// paramType will catch the error.)
|
||||
res.Value = fmt.Sprint(v.Interface())
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func civilTimeParamString(t civil.Time) string {
|
||||
if t.Nanosecond == 0 {
|
||||
return t.String()
|
||||
} else {
|
||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
||||
t.Nanosecond = 0
|
||||
return t.String() + fmt.Sprintf(".%06d", micro)
|
||||
}
|
||||
}
|
262
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
Normal file
262
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
Normal file
@ -0,0 +1,262 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var scalarTests = []struct {
|
||||
val interface{}
|
||||
want string
|
||||
}{
|
||||
{int64(0), "0"},
|
||||
{3.14, "3.14"},
|
||||
{3.14159e-87, "3.14159e-87"},
|
||||
{true, "true"},
|
||||
{"string", "string"},
|
||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n"},
|
||||
{math.NaN(), "NaN"},
|
||||
{[]byte("foo"), "Zm9v"}, // base64 encoding of "foo"
|
||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
||||
"2016-03-20 04:22:09.000005-01:02"},
|
||||
{civil.Date{2016, 3, 20}, "2016-03-20"},
|
||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000"},
|
||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, "2016-03-20 04:05:06.789000"},
|
||||
}
|
||||
|
||||
type S1 struct {
|
||||
A int
|
||||
B *S2
|
||||
C bool
|
||||
}
|
||||
|
||||
type S2 struct {
|
||||
D string
|
||||
e int
|
||||
}
|
||||
|
||||
var s1 = S1{
|
||||
A: 1,
|
||||
B: &S2{D: "s"},
|
||||
C: true,
|
||||
}
|
||||
|
||||
func sval(s string) bq.QueryParameterValue {
|
||||
return bq.QueryParameterValue{Value: s}
|
||||
}
|
||||
|
||||
func TestParamValueScalar(t *testing.T) {
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Errorf("%v: got %v, want nil", test.val, err)
|
||||
continue
|
||||
}
|
||||
want := sval(test.want)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueArray(t *testing.T) {
|
||||
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{
|
||||
{Value: "1"},
|
||||
{Value: "2"},
|
||||
},
|
||||
}
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want bq.QueryParameterValue
|
||||
}{
|
||||
{[]int(nil), bq.QueryParameterValue{}},
|
||||
{[]int{}, bq.QueryParameterValue{}},
|
||||
{[]int{1, 2}, qpv},
|
||||
{[2]int{1, 2}, qpv},
|
||||
} {
|
||||
got, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueStruct(t *testing.T) {
|
||||
got, err := paramValue(reflect.ValueOf(s1))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"A": sval("1"),
|
||||
"B": bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"D": sval("s"),
|
||||
},
|
||||
},
|
||||
"C": sval("true"),
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %+v\nwant %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueErrors(t *testing.T) {
|
||||
// paramValue lets a few invalid types through, but paramType catches them.
|
||||
// Since we never call one without the other that's fine.
|
||||
for _, val := range []interface{}{nil, new([]int)} {
|
||||
_, err := paramValue(reflect.ValueOf(val))
|
||||
if err == nil {
|
||||
t.Errorf("%v (%T): got nil, want error", val, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamType(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want *bq.QueryParameterType
|
||||
}{
|
||||
{0, int64ParamType},
|
||||
{uint32(32767), int64ParamType},
|
||||
{3.14, float64ParamType},
|
||||
{float32(3.14), float64ParamType},
|
||||
{math.NaN(), float64ParamType},
|
||||
{true, boolParamType},
|
||||
{"", stringParamType},
|
||||
{"string", stringParamType},
|
||||
{time.Now(), timestampParamType},
|
||||
{[]byte("foo"), bytesParamType},
|
||||
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
|
||||
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
|
||||
{S1{}, &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "A", Type: int64ParamType},
|
||||
{Name: "B", Type: &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "D", Type: stringParamType},
|
||||
},
|
||||
}},
|
||||
{Name: "C", Type: boolParamType},
|
||||
},
|
||||
}},
|
||||
} {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamTypeErrors(t *testing.T) {
|
||||
for _, val := range []interface{}{
|
||||
nil, uint(0), new([]int), make(chan int),
|
||||
} {
|
||||
_, err := paramType(reflect.TypeOf(val))
|
||||
if err == nil {
|
||||
t.Errorf("%v (%T): got nil, want error", val, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ScalarParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !equal(got, test.val) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_OtherParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want interface{}
|
||||
}{
|
||||
{[]int(nil), []Value(nil)},
|
||||
{[]int{}, []Value(nil)},
|
||||
{[]int{1, 2}, []Value{int64(1), int64(2)}},
|
||||
{[3]int{1, 2, 3}, []Value{int64(1), int64(2), int64(3)}},
|
||||
{S1{}, []Value{int64(0), nil, false}},
|
||||
{s1, []Value{int64(1), []Value{"s"}, true}},
|
||||
} {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !equal(got, test.want) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func paramRoundTrip(c *Client, x interface{}) (Value, error) {
|
||||
q := c.Query("select ?")
|
||||
q.Parameters = []QueryParameter{{Value: x}}
|
||||
it, err := q.Read(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var val []Value
|
||||
err = it.Next(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(val) != 1 {
|
||||
return nil, errors.New("wrong number of values")
|
||||
}
|
||||
return val[0], nil
|
||||
}
|
||||
|
||||
func equal(x1, x2 interface{}) bool {
|
||||
if reflect.TypeOf(x1) != reflect.TypeOf(x2) {
|
||||
return false
|
||||
}
|
||||
switch x1 := x1.(type) {
|
||||
case float64:
|
||||
if math.IsNaN(x1) {
|
||||
return math.IsNaN(x2.(float64))
|
||||
}
|
||||
return x1 == x2
|
||||
case time.Time:
|
||||
// BigQuery is only accurate to the microsecond.
|
||||
return x1.Round(time.Microsecond).Equal(x2.(time.Time).Round(time.Microsecond))
|
||||
default:
|
||||
return reflect.DeepEqual(x1, x2)
|
||||
}
|
||||
}
|
196
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
Normal file
196
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// QueryConfig holds the configuration for a query job.
|
||||
type QueryConfig struct {
|
||||
// JobID is the ID to use for the query job. If this field is empty, a job ID
|
||||
// will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Dst is the table into which the results of the query will be written.
|
||||
// If this field is nil, a temporary table will be created.
|
||||
Dst *Table
|
||||
|
||||
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
|
||||
Q string
|
||||
|
||||
// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
|
||||
// If DefaultProjectID is set, DefaultDatasetID must also be set.
|
||||
DefaultProjectID string
|
||||
DefaultDatasetID string
|
||||
|
||||
// TableDefinitions describes data sources outside of BigQuery.
|
||||
// The map keys may be used as table names in the query string.
|
||||
TableDefinitions map[string]ExternalData
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// DisableQueryCache prevents results being fetched from the query cache.
|
||||
// If this field is false, results are fetched from the cache if they are available.
|
||||
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
|
||||
// Cached results are only available when TableID is unspecified in the query's destination Table.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
|
||||
DisableQueryCache bool
|
||||
|
||||
// DisableFlattenedResults prevents results being flattened.
|
||||
// If this field is false, results from nested and repeated fields are flattened.
|
||||
// DisableFlattenedResults implies AllowLargeResults
|
||||
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
|
||||
DisableFlattenedResults bool
|
||||
|
||||
// AllowLargeResults allows the query to produce arbitrarily large result tables.
|
||||
// The destination must be a table.
|
||||
// When using this option, queries will take longer to execute, even if the result set is small.
|
||||
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
|
||||
AllowLargeResults bool
|
||||
|
||||
// Priority specifies the priority with which to schedule the query.
|
||||
// The default priority is InteractivePriority.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
|
||||
Priority QueryPriority
|
||||
|
||||
// MaxBillingTier sets the maximum billing tier for a Query.
|
||||
// Queries that have resource usage beyond this tier will fail (without
|
||||
// incurring a charge). If this field is zero, the project default will be used.
|
||||
MaxBillingTier int
|
||||
|
||||
// MaxBytesBilled limits the number of bytes billed for
|
||||
// this job. Queries that would exceed this limit will fail (without incurring
|
||||
// a charge).
|
||||
// If this field is less than 1, the project default will be
|
||||
// used.
|
||||
MaxBytesBilled int64
|
||||
|
||||
// UseStandardSQL causes the query to use standard SQL.
|
||||
// The default is false (using legacy SQL).
|
||||
UseStandardSQL bool
|
||||
|
||||
// Parameters is a list of query parameters. The presence of parameters
|
||||
// implies the use of standard SQL.
|
||||
// If the query uses positional syntax ("?"), then no parameter may have a name.
|
||||
// If the query uses named syntax ("@p"), then all parameters must have names.
|
||||
// It is illegal to mix positional and named syntax.
|
||||
Parameters []QueryParameter
|
||||
}
|
||||
|
||||
// QueryPriority specifies a priority with which a query is to be executed.
|
||||
type QueryPriority string
|
||||
|
||||
const (
|
||||
BatchPriority QueryPriority = "BATCH"
|
||||
InteractivePriority QueryPriority = "INTERACTIVE"
|
||||
)
|
||||
|
||||
// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
|
||||
type Query struct {
|
||||
client *Client
|
||||
QueryConfig
|
||||
}
|
||||
|
||||
// Query creates a query with string q.
|
||||
// The returned Query may optionally be further configured before its Run method is called.
|
||||
func (c *Client) Query(q string) *Query {
|
||||
return &Query{
|
||||
client: c,
|
||||
QueryConfig: QueryConfig{Q: q},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a query job.
|
||||
func (q *Query) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{},
|
||||
},
|
||||
}
|
||||
setJobRef(job, q.JobID, q.client.projectID)
|
||||
|
||||
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.service.insertJob(ctx, q.client.projectID, &insertJobConf{job: job})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.isQuery = true
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error {
|
||||
conf.Query = q.Q
|
||||
|
||||
if len(q.TableDefinitions) > 0 {
|
||||
conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
}
|
||||
for name, data := range q.TableDefinitions {
|
||||
conf.TableDefinitions[name] = data.externalDataConfig()
|
||||
}
|
||||
|
||||
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
|
||||
conf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: q.DefaultDatasetID,
|
||||
ProjectId: q.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
|
||||
if tier := int64(q.MaxBillingTier); tier > 0 {
|
||||
conf.MaximumBillingTier = &tier
|
||||
}
|
||||
conf.CreateDisposition = string(q.CreateDisposition)
|
||||
conf.WriteDisposition = string(q.WriteDisposition)
|
||||
conf.AllowLargeResults = q.AllowLargeResults
|
||||
conf.Priority = string(q.Priority)
|
||||
|
||||
f := false
|
||||
if q.DisableQueryCache {
|
||||
conf.UseQueryCache = &f
|
||||
}
|
||||
if q.DisableFlattenedResults {
|
||||
conf.FlattenResults = &f
|
||||
// DisableFlattenResults implies AllowLargeResults.
|
||||
conf.AllowLargeResults = true
|
||||
}
|
||||
if q.MaxBytesBilled >= 1 {
|
||||
conf.MaximumBytesBilled = q.MaxBytesBilled
|
||||
}
|
||||
if q.UseStandardSQL || len(q.Parameters) > 0 {
|
||||
conf.UseLegacySql = false
|
||||
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
|
||||
if q.Dst != nil && !q.Dst.implicitTable() {
|
||||
conf.DestinationTable = q.Dst.tableRefProto()
|
||||
}
|
||||
for _, p := range q.Parameters {
|
||||
qp, err := p.toRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.QueryParameters = append(conf.QueryParameters, qp)
|
||||
}
|
||||
return nil
|
||||
}
|
305
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
Normal file
305
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
Normal file
@ -0,0 +1,305 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultQueryJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
Query: "query string",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
}
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src *QueryConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: defaultQuery,
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{},
|
||||
src: defaultQuery,
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DestinationTable = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
TableDefinitions: map[string]ExternalData{
|
||||
"atable": func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.AllowJaggedRows = true
|
||||
g.AllowQuotedNewlines = true
|
||||
g.Compression = Gzip
|
||||
g.Encoding = UTF_8
|
||||
g.FieldDelimiter = ";"
|
||||
g.IgnoreUnknownValues = true
|
||||
g.MaxBadRecords = 1
|
||||
g.Quote = "'"
|
||||
g.SkipLeadingRows = 2
|
||||
g.Schema = Schema([]*FieldSchema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
})
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
td := make(map[string]bq.ExternalDataConfiguration)
|
||||
quote := "'"
|
||||
td["atable"] = bq.ExternalDataConfiguration{
|
||||
Compression: "GZIP",
|
||||
IgnoreUnknownValues: true,
|
||||
MaxBadRecords: 1,
|
||||
SourceFormat: "CSV", // must be explicitly set.
|
||||
SourceUris: []string{"uri"},
|
||||
CsvOptions: &bq.CsvOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
FieldDelimiter: ";",
|
||||
SkipLeadingRows: 2,
|
||||
Quote: "e,
|
||||
},
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
{Name: "name", Type: "STRING"},
|
||||
},
|
||||
},
|
||||
}
|
||||
j.Configuration.Query.TableDefinitions = td
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
},
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
DisableQueryCache: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.UseQueryCache = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
AllowLargeResults: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.AllowLargeResults = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
DisableFlattenedResults: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.FlattenResults = &f
|
||||
j.Configuration.Query.AllowLargeResults = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
Priority: QueryPriority("low"),
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.Priority = "low"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBillingTier: 3,
|
||||
MaxBytesBilled: 5,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
tier := int64(3)
|
||||
j.Configuration.Query.MaximumBillingTier = &tier
|
||||
j.Configuration.Query.MaximumBytesBilled = 5
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBytesBilled: -1,
|
||||
},
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
UseStandardSQL: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.UseLegacySql = false
|
||||
j.Configuration.Query.ForceSendFields = []string{"UseLegacySql"}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
query := c.Query("")
|
||||
query.QueryConfig = *tc.src
|
||||
query.Dst = tc.dst
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling query: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguringQuery(t *testing.T) {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
|
||||
query := c.Query("q")
|
||||
query.JobID = "ajob"
|
||||
query.DefaultProjectID = "def-project-id"
|
||||
query.DefaultDatasetID = "def-dataset-id"
|
||||
// Note: Other configuration fields are tested in other tests above.
|
||||
// A lot of that can be consolidated once Client.Copy is gone.
|
||||
|
||||
want := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
Query: "q",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
JobReference: &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "project-id",
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
t.Fatalf("err calling Query.Run: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, want) {
|
||||
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want)
|
||||
}
|
||||
}
|
64
vendor/cloud.google.com/go/bigquery/read.go
generated
vendored
Normal file
64
vendor/cloud.google.com/go/bigquery/read.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readTabledata(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
// Read fetches the contents of the table.
|
||||
func (t *Table) Read(ctx context.Context) *RowIterator {
|
||||
return newRowIterator(ctx, t.c.service, &readTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
})
|
||||
}
|
||||
|
||||
func (conf *readQueryConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readQuery(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readQueryConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
// Read fetches the results of a query job.
|
||||
// If j is not a query job, Read returns an error.
|
||||
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
|
||||
if !j.isQuery {
|
||||
return nil, errors.New("Cannot read from a non-query job")
|
||||
}
|
||||
return newRowIterator(ctx, j.service, &readQueryConf{
|
||||
projectID: j.projectID,
|
||||
jobID: j.jobID,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via a RowIterator.
|
||||
// It is a shorthand for Query.Run followed by Job.Read.
|
||||
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job.Read(ctx)
|
||||
}
|
303
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
Normal file
303
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type readTabledataArgs struct {
|
||||
conf *readTableConf
|
||||
tok string
|
||||
}
|
||||
|
||||
type readQueryArgs struct {
|
||||
conf *readQueryConf
|
||||
tok string
|
||||
}
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type readServiceStub struct {
|
||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
|
||||
values [][][]Value // contains pages / rows / columns.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
// arguments are recorded for later inspection.
|
||||
readTabledataCalls []readTabledataArgs
|
||||
readQueryCalls []readQueryArgs
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readValues(tok string) *readDataResult {
|
||||
result := &readDataResult{
|
||||
pageToken: s.pageTokens[tok],
|
||||
rows: s.values[0],
|
||||
}
|
||||
s.values = s.values[1:]
|
||||
|
||||
return result
|
||||
}
|
||||
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
|
||||
s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
// The data for the service stub to return is populated for each test case in the testCases for loop.
|
||||
ctx := context.Background()
|
||||
service := &readServiceStub{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: service,
|
||||
}
|
||||
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: service,
|
||||
isQuery: true,
|
||||
}
|
||||
|
||||
for _, readFunc := range []func() *RowIterator{
|
||||
func() *RowIterator {
|
||||
return c.Dataset("dataset-id").Table("table-id").Read(ctx)
|
||||
},
|
||||
func() *RowIterator {
|
||||
it, err := queryJob.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return it
|
||||
},
|
||||
} {
|
||||
testCases := []struct {
|
||||
data [][][]Value
|
||||
pageTokens map[string]string
|
||||
want [][]Value
|
||||
}{
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": "a", "a": ""},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
|
||||
},
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": ""}, // no more pages after first one.
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
service.values = tc.data
|
||||
service.pageTokens = tc.pageTokens
|
||||
if got, ok := collectValues(t, readFunc()); ok {
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
|
||||
var got [][]Value
|
||||
for {
|
||||
var vals []Value
|
||||
err := it.Next(&vals)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err calling Next: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
got = append(got, vals)
|
||||
}
|
||||
return got, true
|
||||
}
|
||||
|
||||
func TestNoMoreValues(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
var vals []Value
|
||||
// We expect to retrieve two values and then fail on the next attempt.
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if err := it.Next(&vals); err != iterator.Done {
|
||||
t.Fatalf("Next: got: %v: want: iterator.Done", err)
|
||||
}
|
||||
}
|
||||
|
||||
// delayedReadStub simulates reading results from a query that has not yet
|
||||
// completed. Its readQuery method initially reports that the query job is not
|
||||
// yet complete. Subsequently, it proxies the request through to another
|
||||
// service stub.
|
||||
type delayedReadStub struct {
|
||||
numDelays int
|
||||
|
||||
readServiceStub
|
||||
}
|
||||
|
||||
func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
|
||||
if s.numDelays > 0 {
|
||||
s.numDelays--
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
return s.readServiceStub.readQuery(ctx, conf, token)
|
||||
}
|
||||
|
||||
// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete.
|
||||
func TestIncompleteJob(t *testing.T) {
|
||||
service := &delayedReadStub{
|
||||
numDelays: 2,
|
||||
readServiceStub: readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
},
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: service,
|
||||
isQuery: true,
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
var got []Value
|
||||
want := []Value{1, 2}
|
||||
if err := it.Next(&got); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if service.numDelays != 0 {
|
||||
t.Errorf("remaining numDelays : got: %v want:0", service.numDelays)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type errorReadService struct {
|
||||
service
|
||||
}
|
||||
|
||||
var errBang = errors.New("bang!")
|
||||
|
||||
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
return nil, errBang
|
||||
}
|
||||
|
||||
func TestReadError(t *testing.T) {
|
||||
// test that service read errors are propagated back to the caller.
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &errorReadService{},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != errBang {
|
||||
t.Fatalf("Get: got: %v: want: %v", err, errBang)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadTabledataOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
}}
|
||||
|
||||
if !reflect.DeepEqual(s.readTabledataCalls, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadQueryOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: s,
|
||||
isQuery: true,
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
|
||||
want := []readQueryArgs{{
|
||||
conf: &readQueryConf{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
}}
|
||||
|
||||
if !reflect.DeepEqual(s.readQueryCalls, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want)
|
||||
}
|
||||
}
|
312
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
Normal file
312
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
Normal file
@ -0,0 +1,312 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/atomiccache"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// Schema describes the fields in a table or query result.
|
||||
type Schema []*FieldSchema
|
||||
|
||||
type FieldSchema struct {
|
||||
// The field name.
|
||||
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
|
||||
// and must start with a letter or underscore.
|
||||
// The maximum length is 128 characters.
|
||||
Name string
|
||||
|
||||
// A description of the field. The maximum length is 16,384 characters.
|
||||
Description string
|
||||
|
||||
// Whether the field may contain multiple values.
|
||||
Repeated bool
|
||||
// Whether the field is required. Ignored if Repeated is true.
|
||||
Required bool
|
||||
|
||||
// The field data type. If Type is Record, then this field contains a nested schema,
|
||||
// which is described by Schema.
|
||||
Type FieldType
|
||||
// Describes the nested schema if Type is set to Record.
|
||||
Schema Schema
|
||||
}
|
||||
|
||||
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
|
||||
tfs := &bq.TableFieldSchema{
|
||||
Description: fs.Description,
|
||||
Name: fs.Name,
|
||||
Type: string(fs.Type),
|
||||
}
|
||||
|
||||
if fs.Repeated {
|
||||
tfs.Mode = "REPEATED"
|
||||
} else if fs.Required {
|
||||
tfs.Mode = "REQUIRED"
|
||||
} // else leave as default, which is interpreted as NULLABLE.
|
||||
|
||||
for _, f := range fs.Schema {
|
||||
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema())
|
||||
}
|
||||
|
||||
return tfs
|
||||
}
|
||||
|
||||
func (s Schema) asTableSchema() *bq.TableSchema {
|
||||
var fields []*bq.TableFieldSchema
|
||||
for _, f := range s {
|
||||
fields = append(fields, f.asTableFieldSchema())
|
||||
}
|
||||
return &bq.TableSchema{Fields: fields}
|
||||
}
|
||||
|
||||
// customizeCreateTable allows a Schema to be used directly as an option to CreateTable.
|
||||
func (s Schema) customizeCreateTable(conf *createTableConf) {
|
||||
conf.schema = s.asTableSchema()
|
||||
}
|
||||
|
||||
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
fs := &FieldSchema{
|
||||
Description: tfs.Description,
|
||||
Name: tfs.Name,
|
||||
Repeated: tfs.Mode == "REPEATED",
|
||||
Required: tfs.Mode == "REQUIRED",
|
||||
Type: FieldType(tfs.Type),
|
||||
}
|
||||
|
||||
for _, f := range tfs.Fields {
|
||||
fs.Schema = append(fs.Schema, convertTableFieldSchema(f))
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func convertTableSchema(ts *bq.TableSchema) Schema {
|
||||
var s Schema
|
||||
for _, f := range ts.Fields {
|
||||
s = append(s, convertTableFieldSchema(f))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type FieldType string
|
||||
|
||||
const (
|
||||
StringFieldType FieldType = "STRING"
|
||||
BytesFieldType FieldType = "BYTES"
|
||||
IntegerFieldType FieldType = "INTEGER"
|
||||
FloatFieldType FieldType = "FLOAT"
|
||||
BooleanFieldType FieldType = "BOOLEAN"
|
||||
TimestampFieldType FieldType = "TIMESTAMP"
|
||||
RecordFieldType FieldType = "RECORD"
|
||||
DateFieldType FieldType = "DATE"
|
||||
TimeFieldType FieldType = "TIME"
|
||||
DateTimeFieldType FieldType = "DATETIME"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
|
||||
errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
|
||||
errInvalidFieldName = errors.New("bigquery: invalid name of field in struct")
|
||||
)
|
||||
|
||||
var typeOfByteSlice = reflect.TypeOf([]byte{})
|
||||
|
||||
// InferSchema tries to derive a BigQuery schema from the supplied struct value.
|
||||
// NOTE: All fields in the returned Schema are configured to be required,
|
||||
// unless the corresponding field in the supplied struct is a slice or array.
|
||||
//
|
||||
// It is considered an error if the struct (including nested structs) contains
|
||||
// any exported fields that are pointers or one of the following types:
|
||||
// uint, uint64, uintptr, map, interface, complex64, complex128, func, chan.
|
||||
// In these cases, an error will be returned.
|
||||
// Future versions may handle these cases without error.
|
||||
//
|
||||
// Recursively defined structs are also disallowed.
|
||||
func InferSchema(st interface{}) (Schema, error) {
|
||||
return inferSchemaReflectCached(reflect.TypeOf(st))
|
||||
}
|
||||
|
||||
var schemaCache atomiccache.Cache
|
||||
|
||||
type cacheVal struct {
|
||||
schema Schema
|
||||
err error
|
||||
}
|
||||
|
||||
func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
|
||||
cv := schemaCache.Get(t, func() interface{} {
|
||||
s, err := inferSchemaReflect(t)
|
||||
return cacheVal{s, err}
|
||||
}).(cacheVal)
|
||||
return cv.schema, cv.err
|
||||
}
|
||||
|
||||
func inferSchemaReflect(t reflect.Type) (Schema, error) {
|
||||
rec, err := hasRecursiveType(t, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rec {
|
||||
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
|
||||
}
|
||||
return inferStruct(t)
|
||||
}
|
||||
|
||||
func inferStruct(t reflect.Type) (Schema, error) {
|
||||
switch t.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
return nil, errNoStruct
|
||||
}
|
||||
t = t.Elem()
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
return inferFields(t)
|
||||
default:
|
||||
return nil, errNoStruct
|
||||
}
|
||||
}
|
||||
|
||||
// inferFieldSchema infers the FieldSchema for a Go type
|
||||
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
|
||||
switch rt {
|
||||
case typeOfByteSlice:
|
||||
return &FieldSchema{Required: true, Type: BytesFieldType}, nil
|
||||
case typeOfGoTime:
|
||||
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
|
||||
case typeOfDate:
|
||||
return &FieldSchema{Required: true, Type: DateFieldType}, nil
|
||||
case typeOfTime:
|
||||
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
|
||||
case typeOfDateTime:
|
||||
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
|
||||
}
|
||||
if isSupportedIntType(rt) {
|
||||
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
|
||||
}
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
et := rt.Elem()
|
||||
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
|
||||
// Multi dimensional slices/arrays are not supported by BigQuery
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
|
||||
f, err := inferFieldSchema(et)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Repeated = true
|
||||
f.Required = false
|
||||
return f, nil
|
||||
case reflect.Struct, reflect.Ptr:
|
||||
nested, err := inferStruct(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil
|
||||
case reflect.String:
|
||||
return &FieldSchema{Required: true, Type: StringFieldType}, nil
|
||||
case reflect.Bool:
|
||||
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return &FieldSchema{Required: true, Type: FloatFieldType}, nil
|
||||
default:
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
}
|
||||
|
||||
// inferFields extracts all exported field types from struct type.
|
||||
func inferFields(rt reflect.Type) (Schema, error) {
|
||||
var s Schema
|
||||
fields, err := fieldCache.Fields(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, field := range fields {
|
||||
f, err := inferFieldSchema(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Name = field.Name
|
||||
s = append(s, f)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// isSupportedIntType reports whether t can be properly represented by the
|
||||
// BigQuery INTEGER/INT64 type.
|
||||
func isSupportedIntType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
|
||||
reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// typeList is a linked list of reflect.Types.
|
||||
type typeList struct {
|
||||
t reflect.Type
|
||||
next *typeList
|
||||
}
|
||||
|
||||
func (l *typeList) has(t reflect.Type) bool {
|
||||
for l != nil {
|
||||
if l.t == t {
|
||||
return true
|
||||
}
|
||||
l = l.next
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
|
||||
// via exported fields. (Schema inference ignores unexported fields.)
|
||||
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return false, nil
|
||||
}
|
||||
if seen.has(t) {
|
||||
return true, nil
|
||||
}
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
seen = &typeList{t, seen}
|
||||
// Because seen is a linked list, additions to it from one field's
|
||||
// recursive call will not affect the value for subsequent fields' calls.
|
||||
for _, field := range fields {
|
||||
ok, err := hasRecursiveType(field.Type, seen)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
792
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
Normal file
792
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
Normal file
@ -0,0 +1,792 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func (fs *FieldSchema) GoString() string {
|
||||
if fs == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}",
|
||||
fs.Name,
|
||||
fs.Description,
|
||||
fs.Repeated,
|
||||
fs.Required,
|
||||
fs.Type,
|
||||
fmt.Sprintf("%#v", fs.Schema),
|
||||
)
|
||||
}
|
||||
|
||||
func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Repeated: repeated,
|
||||
Required: required,
|
||||
Type: FieldType(typ),
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
schema Schema
|
||||
bqSchema *bq.TableSchema
|
||||
}{
|
||||
{
|
||||
// required
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, true),
|
||||
},
|
||||
},
|
||||
{
|
||||
// repeated
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REPEATED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", true, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nullable, string
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// integer
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "INTEGER", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "INTEGER", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// float
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "FLOAT", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "FLOAT", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// boolean
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "BOOLEAN", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "BOOLEAN", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// timestamp
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "TIMESTAMP", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "TIMESTAMP", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// civil times
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "f1", "TIME", ""),
|
||||
bqTableFieldSchema("desc", "f2", "DATE", ""),
|
||||
bqTableFieldSchema("desc", "f3", "DATETIME", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "f1", "TIME", false, false),
|
||||
fieldSchema("desc", "f2", "DATE", false, false),
|
||||
fieldSchema("desc", "f3", "DATETIME", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nested
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Mode: "REQUIRED",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("inner field", "inner", "STRING", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
&FieldSchema{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: []*FieldSchema{
|
||||
{
|
||||
Description: "inner field",
|
||||
Name: "inner",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
bqSchema := tc.schema.asTableSchema()
|
||||
if !reflect.DeepEqual(bqSchema, tc.bqSchema) {
|
||||
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
|
||||
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
|
||||
}
|
||||
schema := convertTableSchema(tc.bqSchema)
|
||||
if !reflect.DeepEqual(schema, tc.schema) {
|
||||
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type allStrings struct {
|
||||
String string
|
||||
ByteSlice []byte
|
||||
}
|
||||
|
||||
type allSignedIntegers struct {
|
||||
Int64 int64
|
||||
Int32 int32
|
||||
Int16 int16
|
||||
Int8 int8
|
||||
Int int
|
||||
}
|
||||
|
||||
type allUnsignedIntegers struct {
|
||||
Uint32 uint32
|
||||
Uint16 uint16
|
||||
Uint8 uint8
|
||||
}
|
||||
|
||||
type allFloat struct {
|
||||
Float64 float64
|
||||
Float32 float32
|
||||
// NOTE: Complex32 and Complex64 are unsupported by BigQuery
|
||||
}
|
||||
|
||||
type allBoolean struct {
|
||||
Bool bool
|
||||
}
|
||||
|
||||
type allTime struct {
|
||||
Timestamp time.Time
|
||||
Time civil.Time
|
||||
Date civil.Date
|
||||
DateTime civil.DateTime
|
||||
}
|
||||
|
||||
func reqField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Required: true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: allSignedIntegers{},
|
||||
want: Schema{
|
||||
reqField("Int64", "INTEGER"),
|
||||
reqField("Int32", "INTEGER"),
|
||||
reqField("Int16", "INTEGER"),
|
||||
reqField("Int8", "INTEGER"),
|
||||
reqField("Int", "INTEGER"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allUnsignedIntegers{},
|
||||
want: Schema{
|
||||
reqField("Uint32", "INTEGER"),
|
||||
reqField("Uint16", "INTEGER"),
|
||||
reqField("Uint8", "INTEGER"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allFloat{},
|
||||
want: Schema{
|
||||
reqField("Float64", "FLOAT"),
|
||||
reqField("Float32", "FLOAT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allBoolean{},
|
||||
want: Schema{
|
||||
reqField("Bool", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: &allBoolean{},
|
||||
want: Schema{
|
||||
reqField("Bool", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allTime{},
|
||||
want: Schema{
|
||||
reqField("Timestamp", "TIMESTAMP"),
|
||||
reqField("Time", "TIME"),
|
||||
reqField("Date", "DATE"),
|
||||
reqField("DateTime", "DATETIME"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allStrings{},
|
||||
want: Schema{
|
||||
reqField("String", "STRING"),
|
||||
reqField("ByteSlice", "BYTES"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type containsNested struct {
|
||||
hidden string
|
||||
NotNested int
|
||||
Nested struct {
|
||||
Inside int
|
||||
}
|
||||
}
|
||||
|
||||
type containsDoubleNested struct {
|
||||
NotNested int
|
||||
Nested struct {
|
||||
InsideNested struct {
|
||||
Inside int
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ptrNested struct {
|
||||
Ptr *struct{ Inside int }
|
||||
}
|
||||
|
||||
type dup struct { // more than one field of the same struct type
|
||||
A, B allBoolean
|
||||
}
|
||||
|
||||
func TestNestedInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: containsNested{},
|
||||
want: Schema{
|
||||
reqField("NotNested", "INTEGER"),
|
||||
&FieldSchema{
|
||||
Name: "Nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: containsDoubleNested{},
|
||||
want: Schema{
|
||||
reqField("NotNested", "INTEGER"),
|
||||
&FieldSchema{
|
||||
Name: "Nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{
|
||||
{
|
||||
Name: "InsideNested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: ptrNested{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "Ptr",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: dup{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "A",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Bool", "BOOLEAN")},
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "B",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Bool", "BOOLEAN")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeated struct {
|
||||
NotRepeated []byte
|
||||
RepeatedByteSlice [][]byte
|
||||
Slice []int
|
||||
Array [5]bool
|
||||
}
|
||||
|
||||
type nestedRepeated struct {
|
||||
NotRepeated int
|
||||
Repeated []struct {
|
||||
Inside int
|
||||
}
|
||||
RepeatedPtr []*struct{ Inside int }
|
||||
}
|
||||
|
||||
func repField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Repeated: true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: repeated{},
|
||||
want: Schema{
|
||||
reqField("NotRepeated", "BYTES"),
|
||||
repField("RepeatedByteSlice", "BYTES"),
|
||||
repField("Slice", "INTEGER"),
|
||||
repField("Array", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: nestedRepeated{},
|
||||
want: Schema{
|
||||
reqField("NotRepeated", "INTEGER"),
|
||||
{
|
||||
Name: "Repeated",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
{
|
||||
Name: "RepeatedPtr",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Embedded struct {
|
||||
Embedded int
|
||||
}
|
||||
|
||||
type embedded struct {
|
||||
Embedded2 int
|
||||
}
|
||||
|
||||
type nestedEmbedded struct {
|
||||
Embedded
|
||||
embedded
|
||||
}
|
||||
|
||||
func TestEmbeddedInference(t *testing.T) {
|
||||
got, err := InferSchema(nestedEmbedded{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := Schema{
|
||||
reqField("Embedded", "INTEGER"),
|
||||
reqField("Embedded2", "INTEGER"),
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecursiveInference(t *testing.T) {
|
||||
type List struct {
|
||||
Val int
|
||||
Next *List
|
||||
}
|
||||
|
||||
_, err := InferSchema(List{})
|
||||
if err == nil {
|
||||
t.Fatal("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
type withTags struct {
|
||||
NoTag int
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
SimpleTag int `bigquery:"simple_tag"`
|
||||
UnderscoreTag int `bigquery:"_id"`
|
||||
MixedCase int `bigquery:"MIXEDcase"`
|
||||
}
|
||||
|
||||
type withTagsNested struct {
|
||||
Nested withTags `bigquery:"nested"`
|
||||
NestedAnonymous struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
}
|
||||
|
||||
type withTagsRepeated struct {
|
||||
Repeated []withTags `bigquery:"repeated"`
|
||||
RepeatedAnonymous []struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
}
|
||||
|
||||
type withTagsEmbedded struct {
|
||||
withTags
|
||||
}
|
||||
|
||||
var withTagsSchema = Schema{
|
||||
reqField("NoTag", "INTEGER"),
|
||||
reqField("simple_tag", "INTEGER"),
|
||||
reqField("_id", "INTEGER"),
|
||||
reqField("MIXEDcase", "INTEGER"),
|
||||
}
|
||||
|
||||
func TestTagInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: withTags{},
|
||||
want: withTagsSchema,
|
||||
},
|
||||
{
|
||||
in: withTagsNested{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: withTagsSchema,
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "anon",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: withTagsRepeated{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "repeated",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: withTagsSchema,
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "anon",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: withTagsEmbedded{},
|
||||
want: withTagsSchema,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagInferenceErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
in: struct {
|
||||
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupporedStartChar int `bigquery:"øab"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupportedEndChar int `bigquery:"abø"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupportedMiddleChar int `bigquery:"aøb"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
StartInt int `bigquery:"1abc"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
Hyphens int `bigquery:"a-b"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
OmitEmpty int `bigquery:"abc,omitempty"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
want := tc.err
|
||||
_, got := InferSchema(tc.in)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
in: []byte{},
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: new(int),
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: struct{ Uint uint }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Uint64 uint64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Uintptr uintptr }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Complex complex64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Map map[string]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Chan chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Ptr *int }{},
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: struct{ Interface interface{} }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ MultiDimensional [][]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ MultiDimensional [][][]byte }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ ChanSlice []chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ NestedChan struct{ Chan []chan bool } }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
want := tc.err
|
||||
_, got := InferSchema(tc.in)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasRecursiveType(t *testing.T) {
|
||||
type (
|
||||
nonStruct int
|
||||
nonRec struct{ A string }
|
||||
dup struct{ A, B nonRec }
|
||||
rec struct {
|
||||
A int
|
||||
B *rec
|
||||
}
|
||||
recUnexported struct {
|
||||
A int
|
||||
b *rec
|
||||
}
|
||||
hasRec struct {
|
||||
A int
|
||||
R *rec
|
||||
}
|
||||
)
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want bool
|
||||
}{
|
||||
{nonStruct(0), false},
|
||||
{nonRec{}, false},
|
||||
{dup{}, false},
|
||||
{rec{}, true},
|
||||
{recUnexported{}, false},
|
||||
{hasRec{}, true},
|
||||
} {
|
||||
got, err := hasRecursiveType(reflect.TypeOf(test.in), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != test.want {
|
||||
t.Errorf("%T: got %t, want %t", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
623
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
Normal file
623
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
Normal file
@ -0,0 +1,623 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// service provides an internal abstraction to isolate the generated
|
||||
// BigQuery API; most of this package uses this interface instead.
|
||||
// The single implementation, *bigqueryService, contains all the knowledge
|
||||
// of the generated BigQuery API.
|
||||
type service interface {
|
||||
// Jobs
|
||||
insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error)
|
||||
getJobType(ctx context.Context, projectId, jobID string) (jobType, error)
|
||||
jobCancel(ctx context.Context, projectId, jobID string) error
|
||||
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
|
||||
|
||||
// Tables
|
||||
createTable(ctx context.Context, conf *createTableConf) error
|
||||
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
|
||||
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
|
||||
|
||||
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
|
||||
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
|
||||
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
|
||||
|
||||
// Table data
|
||||
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
|
||||
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
|
||||
|
||||
// Datasets
|
||||
insertDataset(ctx context.Context, datasetID, projectID string) error
|
||||
deleteDataset(ctx context.Context, datasetID, projectID string) error
|
||||
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
|
||||
|
||||
// Misc
|
||||
|
||||
// readQuery reads data resulting from a query job. If the job is
|
||||
// incomplete, an errIncompleteJob is returned. readQuery may be called
|
||||
// repeatedly to poll for job completion.
|
||||
readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error)
|
||||
|
||||
// listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated.
|
||||
listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error)
|
||||
}
|
||||
|
||||
type bigqueryService struct {
|
||||
s *bq.Service
|
||||
}
|
||||
|
||||
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
|
||||
s, err := bq.New(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
s.BasePath = endpoint
|
||||
|
||||
return &bigqueryService{s: s}, nil
|
||||
}
|
||||
|
||||
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
|
||||
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
|
||||
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
|
||||
for {
|
||||
var err error
|
||||
token, err = getPage(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type insertJobConf struct {
|
||||
job *bq.Job
|
||||
media io.Reader
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx)
|
||||
if conf.media != nil {
|
||||
call.Media(conf.media)
|
||||
}
|
||||
res, err := call.Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil
|
||||
}
|
||||
|
||||
type pagingConf struct {
|
||||
recordsPerRequest int64
|
||||
setRecordsPerRequest bool
|
||||
|
||||
startIndex uint64
|
||||
}
|
||||
|
||||
type readTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
paging pagingConf
|
||||
schema Schema // lazily initialized when the first page of data is fetched.
|
||||
}
|
||||
|
||||
type readDataResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
type readQueryConf struct {
|
||||
projectID, jobID string
|
||||
paging pagingConf
|
||||
}
|
||||
|
||||
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
|
||||
// Prepare request to fetch one page of table data.
|
||||
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
var schemaErr error
|
||||
var schemaFetch sync.WaitGroup
|
||||
if conf.schema == nil {
|
||||
schemaFetch.Add(1)
|
||||
go func() {
|
||||
defer schemaFetch.Done()
|
||||
var t *bq.Table
|
||||
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
if schemaErr == nil && t.Schema != nil {
|
||||
conf.schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
res, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schemaFetch.Wait()
|
||||
if schemaErr != nil {
|
||||
return nil, schemaErr
|
||||
}
|
||||
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: conf.schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, conf.schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var errIncompleteJob = errors.New("internal error: query results not available because job is not complete")
|
||||
|
||||
// getQueryResultsTimeout controls the maximum duration of a request to the
|
||||
// BigQuery GetQueryResults endpoint. Setting a long timeout here does not
|
||||
// cause increased overall latency, as results are returned as soon as they are
|
||||
// available.
|
||||
const getQueryResultsTimeout = time.Minute
|
||||
|
||||
func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) {
|
||||
req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID).
|
||||
TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6)
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
res, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !res.JobComplete {
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
schema := convertTableSchema(res.Schema)
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: res.TotalRows,
|
||||
schema: schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type insertRowsConf struct {
|
||||
templateSuffix string
|
||||
ignoreUnknownValues bool
|
||||
skipInvalidRows bool
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
req := &bq.TableDataInsertAllRequest{
|
||||
TemplateSuffix: conf.templateSuffix,
|
||||
IgnoreUnknownValues: conf.ignoreUnknownValues,
|
||||
SkipInvalidRows: conf.skipInvalidRows,
|
||||
}
|
||||
for _, row := range rows {
|
||||
m := make(map[string]bq.JsonValue)
|
||||
for k, v := range row.Row {
|
||||
m[k] = bq.JsonValue(v)
|
||||
}
|
||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
||||
InsertId: row.InsertID,
|
||||
Json: m,
|
||||
})
|
||||
}
|
||||
var res *bq.TableDataInsertAllResponse
|
||||
err := runWithRetry(ctx, func() error {
|
||||
var err error
|
||||
res, err = s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res.InsertErrors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errs PutMultiError
|
||||
for _, e := range res.InsertErrors {
|
||||
if int(e.Index) > len(rows) {
|
||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
||||
}
|
||||
rie := RowInsertionError{
|
||||
InsertID: rows[e.Index].InsertID,
|
||||
RowIndex: int(e.Index),
|
||||
}
|
||||
for _, errp := range e.Errors {
|
||||
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
|
||||
}
|
||||
errs = append(errs, rie)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
type jobType int
|
||||
|
||||
const (
|
||||
copyJobType jobType = iota
|
||||
extractJobType
|
||||
loadJobType
|
||||
queryJobType
|
||||
)
|
||||
|
||||
func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) {
|
||||
res, err := s.s.Jobs.Get(projectID, jobID).
|
||||
Fields("configuration").
|
||||
Context(ctx).
|
||||
Do()
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case res.Configuration.Copy != nil:
|
||||
return copyJobType, nil
|
||||
case res.Configuration.Extract != nil:
|
||||
return extractJobType, nil
|
||||
case res.Configuration.Load != nil:
|
||||
return loadJobType, nil
|
||||
case res.Configuration.Query != nil:
|
||||
return queryJobType, nil
|
||||
default:
|
||||
return 0, errors.New("unknown job type")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
|
||||
// Jobs.Cancel returns a job entity, but the only relevant piece of
|
||||
// data it may contain (the status of the job) is unreliable. From the
|
||||
// docs: "This call will return immediately, and the client will need
|
||||
// to poll for the job status to see if the cancel completed
|
||||
// successfully". So it would be misleading to return a status.
|
||||
_, err := s.s.Jobs.Cancel(projectID, jobID).
|
||||
Fields(). // We don't need any of the response data.
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
res, err := s.s.Jobs.Get(projectID, jobID).
|
||||
Fields("status"). // Only fetch what we need.
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jobStatusFromProto(res.Status)
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
|
||||
state, ok := stateMap[status.State]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected job state: %v", status.State)
|
||||
}
|
||||
|
||||
newStatus := &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
|
||||
newStatus.err = err
|
||||
}
|
||||
|
||||
for _, ep := range status.Errors {
|
||||
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
|
||||
}
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
|
||||
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
var tables []*Table
|
||||
req := s.s.Tables.List(projectID, datasetID).
|
||||
PageToken(pageToken).
|
||||
Context(ctx)
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for _, t := range res.Tables {
|
||||
tables = append(tables, s.convertListedTable(t))
|
||||
}
|
||||
return tables, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
type createTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
expiration time.Time
|
||||
viewQuery string
|
||||
schema *bq.TableSchema
|
||||
useStandardSQL bool
|
||||
timePartitioning *TimePartitioning
|
||||
}
|
||||
|
||||
// createTable creates a table in the BigQuery service.
|
||||
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
|
||||
// If viewQuery is non-empty, the created table will be of type VIEW.
|
||||
// Note: expiration can only be set during table creation.
|
||||
// Note: after table creation, a view can be modified only if its table was initially created with a view.
|
||||
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
table := &bq.Table{
|
||||
TableReference: &bq.TableReference{
|
||||
ProjectId: conf.projectID,
|
||||
DatasetId: conf.datasetID,
|
||||
TableId: conf.tableID,
|
||||
},
|
||||
}
|
||||
if !conf.expiration.IsZero() {
|
||||
table.ExpirationTime = conf.expiration.UnixNano() / 1e6
|
||||
}
|
||||
// TODO(jba): make it impossible to provide both a view query and a schema.
|
||||
if conf.viewQuery != "" {
|
||||
table.View = &bq.ViewDefinition{
|
||||
Query: conf.viewQuery,
|
||||
}
|
||||
if conf.useStandardSQL {
|
||||
table.View.UseLegacySql = false
|
||||
table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
}
|
||||
if conf.schema != nil {
|
||||
table.Schema = conf.schema
|
||||
}
|
||||
if conf.timePartitioning != nil {
|
||||
table.TimePartitioning = &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: int64(conf.timePartitioning.Expiration.Seconds() * 1000),
|
||||
}
|
||||
}
|
||||
|
||||
_, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
|
||||
table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
|
||||
return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do()
|
||||
}
|
||||
|
||||
func bqTableToMetadata(t *bq.Table) *TableMetadata {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
ID: t.Id,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.View = t.View.Query
|
||||
}
|
||||
if t.TimePartitioning != nil {
|
||||
md.TimePartitioning = &TimePartitioning{time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond}
|
||||
}
|
||||
|
||||
return md
|
||||
}
|
||||
|
||||
func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
|
||||
/// TODO(jba): access
|
||||
return &DatasetMetadata{
|
||||
CreationTime: unixMillisToTime(d.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
|
||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
|
||||
Description: d.Description,
|
||||
Name: d.FriendlyName,
|
||||
ID: d.Id,
|
||||
Location: d.Location,
|
||||
Labels: d.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a number of milliseconds since the Unix epoch to a time.Time.
|
||||
// Treat an input of zero specially: convert it to the zero time,
|
||||
// rather than the start of the epoch.
|
||||
func unixMillisToTime(m int64) time.Time {
|
||||
if m == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedTable(t *bq.TableListTables) *Table {
|
||||
return &Table{
|
||||
ProjectID: t.TableReference.ProjectId,
|
||||
DatasetID: t.TableReference.DatasetId,
|
||||
TableID: t.TableReference.TableId,
|
||||
}
|
||||
}
|
||||
|
||||
// patchTableConf contains fields to be patched.
|
||||
type patchTableConf struct {
|
||||
// These fields are omitted from the patch operation if nil.
|
||||
Description *string
|
||||
Name *string
|
||||
Schema Schema
|
||||
}
|
||||
|
||||
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if conf.Description != nil {
|
||||
t.Description = *conf.Description
|
||||
forceSend("Description")
|
||||
}
|
||||
if conf.Name != nil {
|
||||
t.FriendlyName = *conf.Name
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if conf.Schema != nil {
|
||||
t.Schema = conf.Schema.asTableSchema()
|
||||
forceSend("Schema")
|
||||
}
|
||||
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
ds := &bq.Dataset{
|
||||
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
|
||||
}
|
||||
_, err := s.s.Datasets.Insert(projectID, ds).Context(ctx).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
return s.s.Datasets.Delete(projectID, datasetID).Context(ctx).Do()
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
|
||||
table, err := s.s.Datasets.Get(projectID, datasetID).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqDatasetToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) {
|
||||
req := s.s.Datasets.List(projectID).
|
||||
Context(ctx).
|
||||
PageToken(pageToken).
|
||||
All(all)
|
||||
if maxResults > 0 {
|
||||
req.MaxResults(int64(maxResults))
|
||||
}
|
||||
if filter != "" {
|
||||
req.Filter(filter)
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var datasets []*Dataset
|
||||
for _, d := range res.Datasets {
|
||||
datasets = append(datasets, s.convertListedDataset(d))
|
||||
}
|
||||
return datasets, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: d.DatasetReference.ProjectId,
|
||||
DatasetID: d.DatasetReference.DatasetId,
|
||||
}
|
||||
}
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
// See the similar function in ../storage/invoke.go. The main difference is the
|
||||
// reason for retrying.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
backoff := gax.Backoff{
|
||||
Initial: 2 * time.Second,
|
||||
Max: 32 * time.Second,
|
||||
Multiplier: 2,
|
||||
}
|
||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return true, err
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
// Retry using the criteria in
|
||||
// https://cloud.google.com/bigquery/troubleshooting-errors
|
||||
if reason == "backendError" && (e.Code == 500 || e.Code == 503) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
})
|
||||
}
|
224
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
Normal file
224
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Table is a reference to a BigQuery table.
|
||||
type Table struct {
|
||||
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
|
||||
// In this case the result will be stored in an ephemeral table.
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
|
||||
// The maximum length is 1,024 characters.
|
||||
TableID string
|
||||
|
||||
c *Client
|
||||
}
|
||||
|
||||
// TableMetadata contains information about a BigQuery table.
|
||||
type TableMetadata struct {
|
||||
Description string // The user-friendly description of this table.
|
||||
Name string // The user-friendly name for this table.
|
||||
Schema Schema
|
||||
View string
|
||||
|
||||
ID string // An opaque ID uniquely identifying the table.
|
||||
Type TableType
|
||||
|
||||
// The time when this table expires. If not set, the table will persist
|
||||
// indefinitely. Expired tables will be deleted and their storage reclaimed.
|
||||
ExpirationTime time.Time
|
||||
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time
|
||||
|
||||
// The size of the table in bytes.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumBytes int64
|
||||
|
||||
// The number of rows of data in this table.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumRows uint64
|
||||
|
||||
// The time-based partitioning settings for this table.
|
||||
TimePartitioning *TimePartitioning
|
||||
}
|
||||
|
||||
// TableCreateDisposition specifies the circumstances under which destination table will be created.
|
||||
// Default is CreateIfNeeded.
|
||||
type TableCreateDisposition string
|
||||
|
||||
const (
|
||||
// CreateIfNeeded will create the table if it does not already exist.
|
||||
// Tables are created atomically on successful completion of a job.
|
||||
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
|
||||
|
||||
// CreateNever ensures the table must already exist and will not be
|
||||
// automatically created.
|
||||
CreateNever TableCreateDisposition = "CREATE_NEVER"
|
||||
)
|
||||
|
||||
// TableWriteDisposition specifies how existing data in a destination table is treated.
|
||||
// Default is WriteAppend.
|
||||
type TableWriteDisposition string
|
||||
|
||||
const (
|
||||
// WriteAppend will append to any existing data in the destination table.
|
||||
// Data is appended atomically on successful completion of a job.
|
||||
WriteAppend TableWriteDisposition = "WRITE_APPEND"
|
||||
|
||||
// WriteTruncate overrides the existing data in the destination table.
|
||||
// Data is overwritten atomically on successful completion of a job.
|
||||
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
|
||||
|
||||
// WriteEmpty fails writes if the destination table already contains data.
|
||||
WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
|
||||
)
|
||||
|
||||
// TableType is the type of table.
|
||||
type TableType string
|
||||
|
||||
const (
|
||||
RegularTable TableType = "TABLE"
|
||||
ViewTable TableType = "VIEW"
|
||||
)
|
||||
|
||||
func (t *Table) tableRefProto() *bq.TableReference {
|
||||
return &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
TableId: t.TableID,
|
||||
}
|
||||
}
|
||||
|
||||
// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
|
||||
func (t *Table) FullyQualifiedName() string {
|
||||
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
|
||||
func (t *Table) implicitTable() bool {
|
||||
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
|
||||
}
|
||||
|
||||
// Create creates a table in the BigQuery service.
|
||||
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error {
|
||||
conf := &createTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
}
|
||||
for _, o := range options {
|
||||
o.customizeCreateTable(conf)
|
||||
}
|
||||
return t.c.service.createTable(ctx, conf)
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the table.
|
||||
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
|
||||
return t.c.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// Delete deletes the table.
|
||||
func (t *Table) Delete(ctx context.Context) error {
|
||||
return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// A CreateTableOption is an optional argument to CreateTable.
|
||||
type CreateTableOption interface {
|
||||
customizeCreateTable(*createTableConf)
|
||||
}
|
||||
|
||||
type tableExpiration time.Time
|
||||
|
||||
// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
|
||||
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
|
||||
|
||||
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
|
||||
conf.expiration = time.Time(opt)
|
||||
}
|
||||
|
||||
type viewQuery string
|
||||
|
||||
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query.
|
||||
// For more information see: https://cloud.google.com/bigquery/querying-data#views
|
||||
func ViewQuery(query string) CreateTableOption { return viewQuery(query) }
|
||||
|
||||
func (opt viewQuery) customizeCreateTable(conf *createTableConf) {
|
||||
conf.viewQuery = string(opt)
|
||||
}
|
||||
|
||||
type useStandardSQL struct{}
|
||||
|
||||
// UseStandardSQL returns a CreateTableOption to set the table to use standard SQL.
|
||||
// The default setting is false (using legacy SQL).
|
||||
func UseStandardSQL() CreateTableOption { return useStandardSQL{} }
|
||||
|
||||
func (opt useStandardSQL) customizeCreateTable(conf *createTableConf) {
|
||||
conf.useStandardSQL = true
|
||||
}
|
||||
|
||||
// TimePartitioning is a CreateTableOption that can be used to set time-based
|
||||
// date partitioning on a table.
|
||||
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables
|
||||
type TimePartitioning struct {
|
||||
// (Optional) The amount of time to keep the storage for a partition.
|
||||
// If the duration is empty (0), the data in the partitions do not expire.
|
||||
Expiration time.Duration
|
||||
}
|
||||
|
||||
func (opt TimePartitioning) customizeCreateTable(conf *createTableConf) {
|
||||
conf.timePartitioning = &opt
|
||||
}
|
||||
|
||||
// Update modifies specific Table metadata fields.
|
||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) {
|
||||
var conf patchTableConf
|
||||
if tm.Description != nil {
|
||||
s := optional.ToString(tm.Description)
|
||||
conf.Description = &s
|
||||
}
|
||||
if tm.Name != nil {
|
||||
s := optional.ToString(tm.Name)
|
||||
conf.Name = &s
|
||||
}
|
||||
conf.Schema = tm.Schema
|
||||
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf)
|
||||
}
|
||||
|
||||
// TableMetadataToUpdate is used when updating a table's metadata.
|
||||
// Only non-nil fields will be updated.
|
||||
type TableMetadataToUpdate struct {
|
||||
// Description is the user-friendly description of this table.
|
||||
Description optional.String
|
||||
|
||||
// Name is the user-friendly name for this table.
|
||||
Name optional.String
|
||||
|
||||
// Schema is the table's schema.
|
||||
// When updating a schema, you can add columns but not remove them.
|
||||
Schema Schema
|
||||
// TODO(jba): support updating the view
|
||||
}
|
162
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
Normal file
162
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// An Uploader does streaming inserts into a BigQuery table.
|
||||
// It is safe for concurrent use.
|
||||
type Uploader struct {
|
||||
t *Table
|
||||
|
||||
// SkipInvalidRows causes rows containing invalid data to be silently
|
||||
// ignored. The default value is false, which causes the entire request to
|
||||
// fail if there is an attempt to insert an invalid row.
|
||||
SkipInvalidRows bool
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be ignored.
|
||||
// The default value is false, which causes records containing such values
|
||||
// to be treated as invalid records.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// A TableTemplateSuffix allows Uploaders to create tables automatically.
|
||||
//
|
||||
// Experimental: this option is experimental and may be modified or removed in future versions,
|
||||
// regardless of any other documented package stability guarantees.
|
||||
//
|
||||
// When you specify a suffix, the table you upload data to
|
||||
// will be used as a template for creating a new table, with the same schema,
|
||||
// called <table> + <suffix>.
|
||||
//
|
||||
// More information is available at
|
||||
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
|
||||
TableTemplateSuffix string
|
||||
}
|
||||
|
||||
// Uploader returns an Uploader that can be used to append rows to t.
|
||||
// The returned Uploader may optionally be further configured before its Put method is called.
|
||||
func (t *Table) Uploader() *Uploader {
|
||||
return &Uploader{t: t}
|
||||
}
|
||||
|
||||
// Put uploads one or more rows to the BigQuery service.
|
||||
//
|
||||
// If src is ValueSaver, then its Save method is called to produce a row for uploading.
|
||||
//
|
||||
// If src is a struct or pointer to a struct, then a schema is inferred from it
|
||||
// and used to create a StructSaver. The InsertID of the StructSaver will be
|
||||
// empty.
|
||||
//
|
||||
// If src is a slice of ValueSavers, structs, or struct pointers, then each
|
||||
// element of the slice is treated as above, and multiple rows are uploaded.
|
||||
//
|
||||
// Put returns a PutMultiError if one or more rows failed to be uploaded.
|
||||
// The PutMultiError contains a RowInsertionError for each failed row.
|
||||
//
|
||||
// Put will retry on temporary errors (see
|
||||
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
|
||||
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
|
||||
// the call will run indefinitely. Pass a context with a timeout to prevent
|
||||
// hanging calls.
|
||||
func (u *Uploader) Put(ctx context.Context, src interface{}) error {
|
||||
savers, err := valueSavers(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.putMulti(ctx, savers)
|
||||
}
|
||||
|
||||
func valueSavers(src interface{}) ([]ValueSaver, error) {
|
||||
saver, ok, err := toValueSaver(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return []ValueSaver{saver}, nil
|
||||
}
|
||||
srcVal := reflect.ValueOf(src)
|
||||
if srcVal.Kind() != reflect.Slice {
|
||||
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
|
||||
|
||||
}
|
||||
var savers []ValueSaver
|
||||
for i := 0; i < srcVal.Len(); i++ {
|
||||
s := srcVal.Index(i).Interface()
|
||||
saver, ok, err := toValueSaver(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
|
||||
}
|
||||
savers = append(savers, saver)
|
||||
}
|
||||
return savers, nil
|
||||
}
|
||||
|
||||
// Make a ValueSaver from x, which must implement ValueSaver already
|
||||
// or be a struct or pointer to struct.
|
||||
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
||||
if saver, ok := x.(ValueSaver); ok {
|
||||
return saver, ok, nil
|
||||
}
|
||||
v := reflect.ValueOf(x)
|
||||
// Support Put with []interface{}
|
||||
if v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() != reflect.Struct {
|
||||
return nil, false, nil
|
||||
}
|
||||
schema, err := inferSchemaReflect(v.Type())
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return &StructSaver{Struct: x, Schema: schema}, true, nil
|
||||
}
|
||||
|
||||
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
|
||||
var rows []*insertionRow
|
||||
for _, saver := range src {
|
||||
row, insertID, err := saver.Save()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows = append(rows, &insertionRow{InsertID: insertID, Row: row})
|
||||
}
|
||||
|
||||
return u.t.c.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &insertRowsConf{
|
||||
skipInvalidRows: u.SkipInvalidRows,
|
||||
ignoreUnknownValues: u.IgnoreUnknownValues,
|
||||
templateSuffix: u.TableTemplateSuffix,
|
||||
})
|
||||
}
|
||||
|
||||
// An insertionRow represents a row of data to be inserted into a table.
|
||||
type insertionRow struct {
|
||||
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of
|
||||
// this row on a best-effort basis.
|
||||
InsertID string
|
||||
// The data to be inserted, represented as a map from field name to Value.
|
||||
Row map[string]Value
|
||||
}
|
285
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
Normal file
285
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type testSaver struct {
|
||||
ir *insertionRow
|
||||
err error
|
||||
}
|
||||
|
||||
func (ts testSaver) Save() (map[string]Value, string, error) {
|
||||
return ts.ir.Row, ts.ir.InsertID, ts.err
|
||||
}
|
||||
|
||||
func TestRejectsNonValueSavers(t *testing.T) {
|
||||
client := &Client{projectID: "project-id"}
|
||||
u := Uploader{t: client.Dataset("dataset-id").Table("table-id")}
|
||||
|
||||
testCases := []struct {
|
||||
src interface{}
|
||||
}{
|
||||
{
|
||||
src: 1,
|
||||
},
|
||||
{
|
||||
src: []int{1, 2},
|
||||
},
|
||||
{
|
||||
src: []interface{}{
|
||||
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
|
||||
1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if err := u.Put(context.Background(), tc.src); err == nil {
|
||||
t.Errorf("put value: %v; got nil, want error", tc.src)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type insertRowsRecorder struct {
|
||||
rowBatches [][]*insertionRow
|
||||
service
|
||||
}
|
||||
|
||||
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
irr.rowBatches = append(irr.rowBatches, rows)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestInsertsData(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data [][]*insertionRow
|
||||
}{
|
||||
{
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"c", map[string]Value{"three": 3}},
|
||||
&insertionRow{"d", map[string]Value{"four": 4}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
irr := &insertRowsRecorder{}
|
||||
client := &Client{
|
||||
projectID: "project-id",
|
||||
service: irr,
|
||||
}
|
||||
u := client.Dataset("dataset-id").Table("table-id").Uploader()
|
||||
for _, batch := range tc.data {
|
||||
if len(batch) == 0 {
|
||||
continue
|
||||
}
|
||||
var toUpload interface{}
|
||||
if len(batch) == 1 {
|
||||
toUpload = testSaver{ir: batch[0]}
|
||||
} else {
|
||||
savers := []testSaver{}
|
||||
for _, row := range batch {
|
||||
savers = append(savers, testSaver{ir: row})
|
||||
}
|
||||
toUpload = savers
|
||||
}
|
||||
|
||||
err := u.Put(context.Background(), toUpload)
|
||||
if err != nil {
|
||||
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
|
||||
}
|
||||
}
|
||||
if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got: %v, want: %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type uploadOptionRecorder struct {
|
||||
received *insertRowsConf
|
||||
service
|
||||
}
|
||||
|
||||
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
u.received = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUploadOptionsPropagate(t *testing.T) {
|
||||
// we don't care for the data in this testcase.
|
||||
dummyData := testSaver{ir: &insertionRow{}}
|
||||
recorder := new(uploadOptionRecorder)
|
||||
c := &Client{service: recorder}
|
||||
table := &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
c: c,
|
||||
}
|
||||
|
||||
tests := [...]struct {
|
||||
ul *Uploader
|
||||
conf insertRowsConf
|
||||
}{
|
||||
{
|
||||
// test zero options lead to zero value for insertRowsConf
|
||||
ul: table.Uploader(),
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.IgnoreUnknownValues = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
skipInvalidRows: true,
|
||||
},
|
||||
},
|
||||
{ // multiple upload options combine
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
u.IgnoreUnknownValues = true
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
skipInvalidRows: true,
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
err := tc.ul.Put(context.Background(), dummyData)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err)
|
||||
}
|
||||
|
||||
if recorder.received == nil {
|
||||
t.Fatalf("%d: received no options at all!", i)
|
||||
}
|
||||
|
||||
want := tc.conf
|
||||
got := *recorder.received
|
||||
if got != want {
|
||||
t.Errorf("%d: got %#v, want %#v, ul=%#v", i, got, want, tc.ul)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSavers(t *testing.T) {
|
||||
ts := &testSaver{ir: &insertionRow{}}
|
||||
type T struct{ I int }
|
||||
schema, err := InferSchema(T{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want []ValueSaver
|
||||
}{
|
||||
{ts, []ValueSaver{ts}},
|
||||
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
|
||||
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
|
||||
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}},
|
||||
{[]T{{I: 1}, {I: 2}}, []ValueSaver{
|
||||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||
&StructSaver{Schema: schema, Struct: T{I: 2}},
|
||||
}},
|
||||
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{
|
||||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||
&StructSaver{Schema: schema, Struct: &T{I: 2}},
|
||||
}},
|
||||
} {
|
||||
got, err := valueSavers(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
|
||||
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
// Make sure Save is successful.
|
||||
for i, vs := range got {
|
||||
_, _, err := vs.Save()
|
||||
if err != nil {
|
||||
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
47
vendor/cloud.google.com/go/bigquery/utils_test.go
generated
vendored
Normal file
47
vendor/cloud.google.com/go/bigquery/utils_test.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultGCS() *GCSReference {
|
||||
return &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
}
|
||||
}
|
||||
|
||||
var defaultQuery = &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
}
|
||||
|
||||
type testService struct {
|
||||
*bq.Job
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
s.Job = conf.job
|
||||
return &Job{}, nil
|
||||
}
|
||||
|
||||
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
return &JobStatus{State: Done}, nil
|
||||
}
|
637
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
Normal file
637
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
Normal file
@ -0,0 +1,637 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// Value stores the contents of a single cell from a BigQuery result.
|
||||
type Value interface{}
|
||||
|
||||
// ValueLoader stores a slice of Values representing a result row from a Read operation.
|
||||
// See RowIterator.Next for more information.
|
||||
type ValueLoader interface {
|
||||
Load(v []Value, s Schema) error
|
||||
}
|
||||
|
||||
// valueList converts a []Value to implement ValueLoader.
|
||||
type valueList []Value
|
||||
|
||||
// Load stores a sequence of values in a valueList.
|
||||
// It resets the slice length to zero, then appends each value to it.
|
||||
func (vs *valueList) Load(v []Value, _ Schema) error {
|
||||
*vs = append((*vs)[:0], v...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// valueMap converts a map[string]Value to implement ValueLoader.
|
||||
type valueMap map[string]Value
|
||||
|
||||
// Load stores a sequence of values in a valueMap.
|
||||
func (vm *valueMap) Load(v []Value, s Schema) error {
|
||||
if *vm == nil {
|
||||
*vm = map[string]Value{}
|
||||
}
|
||||
loadMap(*vm, v, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadMap(m map[string]Value, vals []Value, s Schema) {
|
||||
for i, f := range s {
|
||||
val := vals[i]
|
||||
var v interface{}
|
||||
switch {
|
||||
case f.Schema == nil:
|
||||
v = val
|
||||
case !f.Repeated:
|
||||
m2 := map[string]Value{}
|
||||
loadMap(m2, val.([]Value), f.Schema)
|
||||
v = m2
|
||||
default: // repeated and nested
|
||||
sval := val.([]Value)
|
||||
vs := make([]Value, len(sval))
|
||||
for j, e := range sval {
|
||||
m2 := map[string]Value{}
|
||||
loadMap(m2, e.([]Value), f.Schema)
|
||||
vs[j] = m2
|
||||
}
|
||||
v = vs
|
||||
}
|
||||
m[f.Name] = v
|
||||
}
|
||||
}
|
||||
|
||||
type structLoader struct {
|
||||
typ reflect.Type // type of struct
|
||||
err error
|
||||
ops []structLoaderOp
|
||||
|
||||
vstructp reflect.Value // pointer to current struct value; changed by set
|
||||
}
|
||||
|
||||
// A setFunc is a function that sets a struct field or slice/array
|
||||
// element to a value.
|
||||
type setFunc func(v reflect.Value, val interface{}) error
|
||||
|
||||
// A structLoaderOp instructs the loader to set a struct field to a row value.
|
||||
type structLoaderOp struct {
|
||||
fieldIndex []int
|
||||
valueIndex int
|
||||
setFunc setFunc
|
||||
repeated bool
|
||||
}
|
||||
|
||||
func setAny(v reflect.Value, x interface{}) error {
|
||||
v.Set(reflect.ValueOf(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setInt(v reflect.Value, x interface{}) error {
|
||||
xx := x.(int64)
|
||||
if v.OverflowInt(xx) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
}
|
||||
v.SetInt(xx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func setFloat(v reflect.Value, x interface{}) error {
|
||||
xx := x.(float64)
|
||||
if v.OverflowFloat(xx) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
}
|
||||
v.SetFloat(xx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func setBool(v reflect.Value, x interface{}) error {
|
||||
v.SetBool(x.(bool))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setString(v reflect.Value, x interface{}) error {
|
||||
v.SetString(x.(string))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setBytes(v reflect.Value, x interface{}) error {
|
||||
v.SetBytes(x.([]byte))
|
||||
return nil
|
||||
}
|
||||
|
||||
// set remembers a value for the next call to Load. The value must be
|
||||
// a pointer to a struct. (This is checked in RowIterator.Next.)
|
||||
func (sl *structLoader) set(structp interface{}, schema Schema) error {
|
||||
if sl.err != nil {
|
||||
return sl.err
|
||||
}
|
||||
sl.vstructp = reflect.ValueOf(structp)
|
||||
typ := sl.vstructp.Type().Elem()
|
||||
if sl.typ == nil {
|
||||
// First call: remember the type and compile the schema.
|
||||
sl.typ = typ
|
||||
ops, err := compileToOps(typ, schema)
|
||||
if err != nil {
|
||||
sl.err = err
|
||||
return err
|
||||
}
|
||||
sl.ops = ops
|
||||
} else if sl.typ != typ {
|
||||
return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// compileToOps produces a sequence of operations that will set the fields of a
|
||||
// value of structType to the contents of a row with schema.
|
||||
func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) {
|
||||
var ops []structLoaderOp
|
||||
fields, err := fieldCache.Fields(structType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, schemaField := range schema {
|
||||
// Look for an exported struct field with the same name as the schema
|
||||
// field, ignoring case (BigQuery column names are case-insensitive,
|
||||
// and we want to act like encoding/json anyway).
|
||||
structField := fields.Match(schemaField.Name)
|
||||
if structField == nil {
|
||||
// Ignore schema fields with no corresponding struct field.
|
||||
continue
|
||||
}
|
||||
op := structLoaderOp{
|
||||
fieldIndex: structField.Index,
|
||||
valueIndex: i,
|
||||
}
|
||||
t := structField.Type
|
||||
if schemaField.Repeated {
|
||||
if t.Kind() != reflect.Slice && t.Kind() != reflect.Array {
|
||||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s",
|
||||
schemaField.Name, structField.Name, t)
|
||||
}
|
||||
t = t.Elem()
|
||||
op.repeated = true
|
||||
}
|
||||
if schemaField.Type == RecordFieldType {
|
||||
// Field can be a struct or a pointer to a struct.
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct",
|
||||
structField.Name, structField.Type)
|
||||
}
|
||||
nested, err := compileToOps(t, schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
op.setFunc = func(v reflect.Value, val interface{}) error {
|
||||
return setNested(nested, v, val.([]Value))
|
||||
}
|
||||
} else {
|
||||
op.setFunc = determineSetFunc(t, schemaField.Type)
|
||||
if op.setFunc == nil {
|
||||
return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s",
|
||||
schemaField.Name, schemaField.Type, structField.Name, t)
|
||||
}
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
return ops, nil
|
||||
}
|
||||
|
||||
// determineSetFunc chooses the best function for setting a field of type ftype
|
||||
// to a value whose schema field type is sftype. It returns nil if stype
|
||||
// is not assignable to ftype.
|
||||
// determineSetFunc considers only basic types. See compileToOps for
|
||||
// handling of repetition and nesting.
|
||||
func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
|
||||
switch stype {
|
||||
case StringFieldType:
|
||||
if ftype.Kind() == reflect.String {
|
||||
return setString
|
||||
}
|
||||
|
||||
case BytesFieldType:
|
||||
if ftype == typeOfByteSlice {
|
||||
return setBytes
|
||||
}
|
||||
|
||||
case IntegerFieldType:
|
||||
if isSupportedIntType(ftype) {
|
||||
return setInt
|
||||
}
|
||||
|
||||
case FloatFieldType:
|
||||
switch ftype.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return setFloat
|
||||
}
|
||||
|
||||
case BooleanFieldType:
|
||||
if ftype.Kind() == reflect.Bool {
|
||||
return setBool
|
||||
}
|
||||
|
||||
case TimestampFieldType:
|
||||
if ftype == typeOfGoTime {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case DateFieldType:
|
||||
if ftype == typeOfDate {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case TimeFieldType:
|
||||
if ftype == typeOfTime {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case DateTimeFieldType:
|
||||
if ftype == typeOfDateTime {
|
||||
return setAny
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sl *structLoader) Load(values []Value, _ Schema) error {
|
||||
if sl.err != nil {
|
||||
return sl.err
|
||||
}
|
||||
return runOps(sl.ops, sl.vstructp.Elem(), values)
|
||||
}
|
||||
|
||||
// runOps executes a sequence of ops, setting the fields of vstruct to the
|
||||
// supplied values.
|
||||
func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
|
||||
for _, op := range ops {
|
||||
field := vstruct.FieldByIndex(op.fieldIndex)
|
||||
var err error
|
||||
if op.repeated {
|
||||
err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc)
|
||||
} else {
|
||||
err = op.setFunc(field, values[op.valueIndex])
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error {
|
||||
// v is either a struct or a pointer to a struct.
|
||||
if v.Kind() == reflect.Ptr {
|
||||
// If the pointer is nil, set it to a zero struct value.
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
return runOps(ops, v, vals)
|
||||
}
|
||||
|
||||
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
|
||||
vlen := len(vslice)
|
||||
var flen int
|
||||
switch field.Type().Kind() {
|
||||
case reflect.Slice:
|
||||
// Make a slice of the right size, avoiding allocation if possible.
|
||||
switch {
|
||||
case field.Len() < vlen:
|
||||
field.Set(reflect.MakeSlice(field.Type(), vlen, vlen))
|
||||
case field.Len() > vlen:
|
||||
field.SetLen(vlen)
|
||||
}
|
||||
flen = vlen
|
||||
|
||||
case reflect.Array:
|
||||
flen = field.Len()
|
||||
if flen > vlen {
|
||||
// Set extra elements to their zero value.
|
||||
z := reflect.Zero(field.Type().Elem())
|
||||
for i := vlen; i < flen; i++ {
|
||||
field.Index(i).Set(z)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("bigquery: impossible field type %s", field.Type())
|
||||
}
|
||||
for i, val := range vslice {
|
||||
if i < flen { // avoid writing past the end of a short array
|
||||
if err := setElem(field.Index(i), val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A ValueSaver returns a row of data to be inserted into a table.
|
||||
type ValueSaver interface {
|
||||
// Save returns a row to be inserted into a BigQuery table, represented
|
||||
// as a map from field name to Value.
|
||||
// If insertID is non-empty, BigQuery will use it to de-duplicate
|
||||
// insertions of this row on a best-effort basis.
|
||||
Save() (row map[string]Value, insertID string, err error)
|
||||
}
|
||||
|
||||
// ValuesSaver implements ValueSaver for a slice of Values.
|
||||
type ValuesSaver struct {
|
||||
Schema Schema
|
||||
|
||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
|
||||
// of this row on a best-effort basis.
|
||||
InsertID string
|
||||
|
||||
Row []Value
|
||||
}
|
||||
|
||||
// Save implements ValueSaver.
|
||||
func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
|
||||
m, err := valuesToMap(vls.Row, vls.Schema)
|
||||
return m, vls.InsertID, err
|
||||
}
|
||||
|
||||
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
||||
if len(vs) != len(schema) {
|
||||
return nil, errors.New("Schema does not match length of row to be inserted")
|
||||
}
|
||||
|
||||
m := make(map[string]Value)
|
||||
for i, fieldSchema := range schema {
|
||||
if fieldSchema.Type != RecordFieldType {
|
||||
m[fieldSchema.Name] = vs[i]
|
||||
continue
|
||||
}
|
||||
// Nested record, possibly repeated.
|
||||
vals, ok := vs[i].([]Value)
|
||||
if !ok {
|
||||
return nil, errors.New("nested record is not a []Value")
|
||||
}
|
||||
if !fieldSchema.Repeated {
|
||||
value, err := valuesToMap(vals, fieldSchema.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[fieldSchema.Name] = value
|
||||
continue
|
||||
}
|
||||
// A repeated nested field is converted into a slice of maps.
|
||||
var maps []Value
|
||||
for _, v := range vals {
|
||||
sv, ok := v.([]Value)
|
||||
if !ok {
|
||||
return nil, errors.New("nested record in slice is not a []Value")
|
||||
}
|
||||
value, err := valuesToMap(sv, fieldSchema.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maps = append(maps, value)
|
||||
}
|
||||
m[fieldSchema.Name] = maps
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// StructSaver implements ValueSaver for a struct.
|
||||
// The struct is converted to a map of values by using the values of struct
|
||||
// fields corresponding to schema fields. Additional and missing
|
||||
// fields are ignored, as are nested struct pointers that are nil.
|
||||
type StructSaver struct {
|
||||
// Schema determines what fields of the struct are uploaded. It should
|
||||
// match the table's schema.
|
||||
Schema Schema
|
||||
|
||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
|
||||
// of this row on a best-effort basis.
|
||||
InsertID string
|
||||
|
||||
// Struct should be a struct or a pointer to a struct.
|
||||
Struct interface{}
|
||||
}
|
||||
|
||||
// Save implements ValueSaver.
|
||||
func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) {
|
||||
vstruct := reflect.ValueOf(ss.Struct)
|
||||
row, err = structToMap(vstruct, ss.Schema)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return row, ss.InsertID, nil
|
||||
}
|
||||
|
||||
func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) {
|
||||
if vstruct.Kind() == reflect.Ptr {
|
||||
vstruct = vstruct.Elem()
|
||||
}
|
||||
if !vstruct.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
m := map[string]Value{}
|
||||
if vstruct.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type())
|
||||
}
|
||||
fields, err := fieldCache.Fields(vstruct.Type())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, schemaField := range schema {
|
||||
// Look for an exported struct field with the same name as the schema
|
||||
// field, ignoring case.
|
||||
structField := fields.Match(schemaField.Name)
|
||||
if structField == nil {
|
||||
continue
|
||||
}
|
||||
val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the value to the map, unless it is nil.
|
||||
if val != nil {
|
||||
m[schemaField.Name] = val
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using
|
||||
// the schemaField as a guide.
|
||||
// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its
|
||||
// caller can easily identify a nil value.
|
||||
func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) {
|
||||
if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) {
|
||||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s",
|
||||
schemaField.Name, vfield.Type())
|
||||
}
|
||||
|
||||
// A non-nested field can be represented by its Go value.
|
||||
if schemaField.Type != RecordFieldType {
|
||||
if !schemaField.Repeated || vfield.Len() > 0 {
|
||||
return vfield.Interface(), nil
|
||||
}
|
||||
// The service treats a null repeated field as an error. Return
|
||||
// nil to omit the field entirely.
|
||||
return nil, nil
|
||||
}
|
||||
// A non-repeated nested field is converted into a map[string]Value.
|
||||
if !schemaField.Repeated {
|
||||
m, err := structToMap(vfield, schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
// A repeated nested field is converted into a slice of maps.
|
||||
if vfield.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var vals []Value
|
||||
for i := 0; i < vfield.Len(); i++ {
|
||||
m, err := structToMap(vfield.Index(i), schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals = append(vals, m)
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// convertRows converts a series of TableRows into a series of Value slices.
|
||||
// schema is used to interpret the data from rows; its length must match the
|
||||
// length of each row.
|
||||
func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {
|
||||
var rs [][]Value
|
||||
for _, r := range rows {
|
||||
row, err := convertRow(r, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rs = append(rs, row)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) {
|
||||
if len(schema) != len(r.F) {
|
||||
return nil, errors.New("schema length does not match row length")
|
||||
}
|
||||
var values []Value
|
||||
for i, cell := range r.F {
|
||||
fs := schema[i]
|
||||
v, err := convertValue(cell.V, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) {
|
||||
switch val := val.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case []interface{}:
|
||||
return convertRepeatedRecord(val, typ, schema)
|
||||
case map[string]interface{}:
|
||||
return convertNestedRecord(val, schema)
|
||||
case string:
|
||||
return convertBasicType(val, typ)
|
||||
default:
|
||||
return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ)
|
||||
}
|
||||
}
|
||||
|
||||
func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) {
|
||||
var values []Value
|
||||
for _, cell := range vals {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
v, err := convertValue(val, typ, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) {
|
||||
// convertNestedRecord is similar to convertRow, as a record has the same structure as a row.
|
||||
|
||||
// Nested records are wrapped in a map with a single key, "f".
|
||||
record := val["f"].([]interface{})
|
||||
if len(record) != len(schema) {
|
||||
return nil, errors.New("schema length does not match record length")
|
||||
}
|
||||
|
||||
var values []Value
|
||||
for i, cell := range record {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
|
||||
fs := schema[i]
|
||||
v, err := convertValue(val, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// convertBasicType returns val as an interface with a concrete type specified by typ.
|
||||
func convertBasicType(val string, typ FieldType) (Value, error) {
|
||||
switch typ {
|
||||
case StringFieldType:
|
||||
return val, nil
|
||||
case BytesFieldType:
|
||||
return base64.StdEncoding.DecodeString(val)
|
||||
case IntegerFieldType:
|
||||
return strconv.ParseInt(val, 10, 64)
|
||||
case FloatFieldType:
|
||||
return strconv.ParseFloat(val, 64)
|
||||
case BooleanFieldType:
|
||||
return strconv.ParseBool(val)
|
||||
case TimestampFieldType:
|
||||
f, err := strconv.ParseFloat(val, 64)
|
||||
return Value(time.Unix(0, int64(f*1e9)).UTC()), err
|
||||
case DateFieldType:
|
||||
return civil.ParseDate(val)
|
||||
case TimeFieldType:
|
||||
return civil.ParseTime(val)
|
||||
case DateTimeFieldType:
|
||||
return civil.ParseDateTime(val)
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized type: %s", typ)
|
||||
}
|
||||
}
|
885
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
Normal file
885
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
Normal file
@ -0,0 +1,885 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestConvertBasicValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
{Type: FloatFieldType},
|
||||
{Type: BooleanFieldType},
|
||||
{Type: BytesFieldType},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: "a"},
|
||||
{V: "1"},
|
||||
{V: "1.2"},
|
||||
{V: "true"},
|
||||
{V: base64.StdEncoding.EncodeToString([]byte("foo"))},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{"a", int64(1), 1.2, true, []byte("foo")}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertTime(t *testing.T) {
|
||||
// TODO(jba): add tests for civil time types.
|
||||
schema := []*FieldSchema{
|
||||
{Type: TimestampFieldType},
|
||||
}
|
||||
thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC)
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
if !got[0].(time.Time).Equal(thyme) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme)
|
||||
}
|
||||
if got[0].(time.Time).Location() != time.UTC {
|
||||
t.Errorf("expected time zone UTC: got:\n%v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertNullValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: nil},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{nil}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{[]Value{int64(1), int64(2), int64(3)}}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedRecordContainingRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{
|
||||
V: map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": []interface{}{
|
||||
map[string]interface{}{"v": "1"},
|
||||
map[string]interface{}{"v": "2"},
|
||||
map[string]interface{}{"v": "3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedRecordContainingRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Repeated: true,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{ // repeated records.
|
||||
map[string]interface{}{ // first record.
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // list of record fields.
|
||||
map[string]interface{}{ // only record (repeated ints)
|
||||
"v": []interface{}{ // pointless wrapper.
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]interface{}{ // second record.
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "4",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "5",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "6",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
|
||||
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
|
||||
[]Value{ // the record is a list of length 1, containing an entry for the repeated integer field.
|
||||
[]Value{int64(1), int64(2), int64(3)}, // the repeated integer field is a list of length 3.
|
||||
},
|
||||
[]Value{ // second record
|
||||
[]Value{int64(4), int64(5), int64(6)},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedRecordContainingRecord(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Repeated: true,
|
||||
Schema: Schema{
|
||||
{
|
||||
Type: StringFieldType,
|
||||
},
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType},
|
||||
{Type: StringFieldType},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{ // repeated records.
|
||||
map[string]interface{}{ // first record.
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // list of record fields.
|
||||
map[string]interface{}{ // first record field (name)
|
||||
"v": "first repeated record",
|
||||
},
|
||||
map[string]interface{}{ // second record field (nested record).
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // nested record fields
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "two",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]interface{}{ // second record.
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "second repeated record",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "four",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
// TODO: test with flattenresults.
|
||||
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
|
||||
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
|
||||
[]Value{ // record contains a string followed by a nested record.
|
||||
"first repeated record",
|
||||
[]Value{
|
||||
int64(1),
|
||||
"two",
|
||||
},
|
||||
},
|
||||
[]Value{ // second record.
|
||||
"second repeated record",
|
||||
[]Value{
|
||||
int64(3),
|
||||
"four",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuesSaverConvertsToMap(t *testing.T) {
|
||||
testCases := []struct {
|
||||
vs ValuesSaver
|
||||
want *insertionRow
|
||||
}{
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "intField", Type: IntegerFieldType},
|
||||
{Name: "strField", Type: StringFieldType},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, "a"},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{"intField": 1, "strField": "a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "intField", Type: IntegerFieldType},
|
||||
{
|
||||
Name: "recordField",
|
||||
Type: RecordFieldType,
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "nestedInt", Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, []Value{[]Value{2, 3}}},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"intField": 1,
|
||||
"recordField": map[string]Value{
|
||||
"nestedInt": []Value{2, 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // repeated nested field
|
||||
vs: ValuesSaver{
|
||||
Schema: Schema{
|
||||
{
|
||||
Name: "records",
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Name: "x", Type: IntegerFieldType},
|
||||
{Name: "y", Type: IntegerFieldType},
|
||||
},
|
||||
Repeated: true,
|
||||
},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{ // a row is a []Value
|
||||
[]Value{ // repeated field's value is a []Value
|
||||
[]Value{1, 2}, // first record of the repeated field
|
||||
[]Value{3, 4}, // second record
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"records": []Value{
|
||||
map[string]Value{"x": 1, "y": 2},
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
data, insertID, err := tc.vs.Save()
|
||||
if err != nil {
|
||||
t.Errorf("Expected successful save; got: %v", err)
|
||||
}
|
||||
got := &insertionRow{insertID, data}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructSaver(t *testing.T) {
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "r", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
{Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
}
|
||||
|
||||
type (
|
||||
N struct{ B bool }
|
||||
T struct {
|
||||
S string
|
||||
R []int
|
||||
Nested *N
|
||||
Rnested []*N
|
||||
}
|
||||
)
|
||||
|
||||
check := func(msg string, in interface{}, want map[string]Value) {
|
||||
ss := StructSaver{
|
||||
Schema: schema,
|
||||
InsertID: "iid",
|
||||
Struct: in,
|
||||
}
|
||||
got, gotIID, err := ss.Save()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
if wantIID := "iid"; gotIID != wantIID {
|
||||
t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
in := T{
|
||||
S: "x",
|
||||
R: []int{1, 2},
|
||||
Nested: &N{B: true},
|
||||
Rnested: []*N{{true}, {false}},
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
"r": []int{1, 2},
|
||||
"nested": map[string]Value{"b": true},
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}},
|
||||
}
|
||||
check("all values", in, want)
|
||||
check("all values, ptr", &in, want)
|
||||
check("empty struct", T{}, map[string]Value{"s": ""})
|
||||
|
||||
// Missing and extra fields ignored.
|
||||
type T2 struct {
|
||||
S string
|
||||
// missing R, Nested, RNested
|
||||
Extra int
|
||||
}
|
||||
check("missing and extra", T2{S: "x"}, map[string]Value{"s": "x"})
|
||||
|
||||
check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}},
|
||||
map[string]Value{
|
||||
"s": "",
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertRows(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
{Type: FloatFieldType},
|
||||
{Type: BooleanFieldType},
|
||||
}
|
||||
rows := []*bq.TableRow{
|
||||
{F: []*bq.TableCell{
|
||||
{V: "a"},
|
||||
{V: "1"},
|
||||
{V: "1.2"},
|
||||
{V: "true"},
|
||||
}},
|
||||
{F: []*bq.TableCell{
|
||||
{V: "b"},
|
||||
{V: "2"},
|
||||
{V: "2.2"},
|
||||
{V: "false"},
|
||||
}},
|
||||
}
|
||||
want := [][]Value{
|
||||
{"a", int64(1), 1.2, true},
|
||||
{"b", int64(2), 2.2, false},
|
||||
}
|
||||
got, err := convertRows(rows, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("got %v, want nil", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("\ngot %v\nwant %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueList(t *testing.T) {
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "i", Type: IntegerFieldType},
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}
|
||||
want := []Value{"x", 7, 3.14, true}
|
||||
var got []Value
|
||||
vl := (*valueList)(&got)
|
||||
if err := vl.Load(want, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
|
||||
// Load truncates, not appends.
|
||||
// https://github.com/GoogleCloudPlatform/google-cloud-go/issues/437
|
||||
if err := vl.Load(want, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueMap(t *testing.T) {
|
||||
ns := Schema{
|
||||
{Name: "x", Type: IntegerFieldType},
|
||||
{Name: "y", Type: IntegerFieldType},
|
||||
}
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "i", Type: IntegerFieldType},
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
{Name: "n", Type: RecordFieldType, Schema: ns},
|
||||
{Name: "rn", Type: RecordFieldType, Schema: ns, Repeated: true},
|
||||
}
|
||||
in := []Value{"x", 7, 3.14, true,
|
||||
[]Value{1, 2},
|
||||
[]Value{[]Value{3, 4}, []Value{5, 6}},
|
||||
}
|
||||
var vm valueMap
|
||||
if err := vm.Load(in, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
"i": 7,
|
||||
"f": 3.14,
|
||||
"b": true,
|
||||
"n": map[string]Value{"x": 1, "y": 2},
|
||||
"rn": []Value{
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
map[string]Value{"x": 5, "y": 6},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(vm, valueMap(want)) {
|
||||
t.Errorf("got\n%+v\nwant\n%+v", vm, want)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
// For testing StructLoader
|
||||
schema2 = Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "s2", Type: StringFieldType},
|
||||
{Name: "by", Type: BytesFieldType},
|
||||
{Name: "I", Type: IntegerFieldType},
|
||||
{Name: "F", Type: FloatFieldType},
|
||||
{Name: "B", Type: BooleanFieldType},
|
||||
{Name: "TS", Type: TimestampFieldType},
|
||||
{Name: "D", Type: DateFieldType},
|
||||
{Name: "T", Type: TimeFieldType},
|
||||
{Name: "DT", Type: DateTimeFieldType},
|
||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||
{Name: "nestS", Type: StringFieldType},
|
||||
{Name: "nestI", Type: IntegerFieldType},
|
||||
}},
|
||||
{Name: "t", Type: StringFieldType},
|
||||
}
|
||||
|
||||
testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC)
|
||||
testDate = civil.Date{2016, 11, 5}
|
||||
testTime = civil.Time{7, 50, 22, 8}
|
||||
testDateTime = civil.DateTime{testDate, testTime}
|
||||
|
||||
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), 3.14, true,
|
||||
testTimestamp, testDate, testTime, testDateTime,
|
||||
[]Value{"nested", int64(17)}, "z"}
|
||||
)
|
||||
|
||||
type testStruct1 struct {
|
||||
B bool
|
||||
I int
|
||||
times
|
||||
S string
|
||||
S2 String
|
||||
By []byte
|
||||
s string
|
||||
F float64
|
||||
Nested nested
|
||||
Tagged string `bigquery:"t"`
|
||||
}
|
||||
|
||||
type String string
|
||||
|
||||
type nested struct {
|
||||
NestS string
|
||||
NestI int
|
||||
}
|
||||
|
||||
type times struct {
|
||||
TS time.Time
|
||||
T civil.Time
|
||||
D civil.Date
|
||||
DT civil.DateTime
|
||||
}
|
||||
|
||||
func TestStructLoader(t *testing.T) {
|
||||
var ts1 testStruct1
|
||||
if err := load(&ts1, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Note: the schema field named "s" gets matched to the exported struct
|
||||
// field "S", not the unexported "s".
|
||||
want := &testStruct1{
|
||||
B: true,
|
||||
I: 7,
|
||||
F: 3.14,
|
||||
times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime},
|
||||
S: "x",
|
||||
S2: "y",
|
||||
By: []byte{1, 2, 3},
|
||||
Nested: nested{NestS: "nested", NestI: 17},
|
||||
Tagged: "z",
|
||||
}
|
||||
if !reflect.DeepEqual(&ts1, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want))
|
||||
d, _, err := pretty.Diff(*want, ts1)
|
||||
if err == nil {
|
||||
t.Logf("diff:\n%s", d)
|
||||
}
|
||||
}
|
||||
|
||||
// Test pointers to nested structs.
|
||||
type nestedPtr struct{ Nested *nested }
|
||||
var np nestedPtr
|
||||
if err := load(&np, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}}
|
||||
if !reflect.DeepEqual(&np, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
|
||||
}
|
||||
|
||||
// Existing values should be reused.
|
||||
nst := &nested{NestS: "x", NestI: -10}
|
||||
np = nestedPtr{Nested: nst}
|
||||
if err := load(&np, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(&np, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
|
||||
}
|
||||
if np.Nested != nst {
|
||||
t.Error("nested struct pointers not equal")
|
||||
}
|
||||
}
|
||||
|
||||
type repStruct struct {
|
||||
Nums []int
|
||||
ShortNums [2]int // to test truncation
|
||||
LongNums [5]int // to test padding with zeroes
|
||||
Nested []*nested
|
||||
}
|
||||
|
||||
var (
|
||||
repSchema = Schema{
|
||||
{Name: "nums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "shortNums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "longNums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "nested", Type: RecordFieldType, Repeated: true, Schema: Schema{
|
||||
{Name: "nestS", Type: StringFieldType},
|
||||
{Name: "nestI", Type: IntegerFieldType},
|
||||
}},
|
||||
}
|
||||
v123 = []Value{int64(1), int64(2), int64(3)}
|
||||
repValues = []Value{v123, v123, v123,
|
||||
[]Value{
|
||||
[]Value{"x", int64(1)},
|
||||
[]Value{"y", int64(2)},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestStructLoaderRepeated(t *testing.T) {
|
||||
var r1 repStruct
|
||||
if err := load(&r1, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := repStruct{
|
||||
Nums: []int{1, 2, 3},
|
||||
ShortNums: [...]int{1, 2}, // extra values discarded
|
||||
LongNums: [...]int{1, 2, 3, 0, 0},
|
||||
Nested: []*nested{{"x", 1}, {"y", 2}},
|
||||
}
|
||||
if !reflect.DeepEqual(r1, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want))
|
||||
}
|
||||
|
||||
r2 := repStruct{
|
||||
Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to
|
||||
LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed
|
||||
}
|
||||
if err := load(&r2, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(r2, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want))
|
||||
}
|
||||
if got, want := cap(r2.Nums), 5; got != want {
|
||||
t.Errorf("cap(r2.Nums) = %d, want %d", got, want)
|
||||
}
|
||||
|
||||
// Short slice case.
|
||||
r3 := repStruct{Nums: []int{-1}}
|
||||
if err := load(&r3, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(r3, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want))
|
||||
}
|
||||
if got, want := cap(r3.Nums), 3; got != want {
|
||||
t.Errorf("cap(r3.Nums) = %d, want %d", got, want)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStructLoaderOverflow(t *testing.T) {
|
||||
type S struct {
|
||||
I int16
|
||||
F float32
|
||||
}
|
||||
schema := Schema{
|
||||
{Name: "I", Type: IntegerFieldType},
|
||||
{Name: "F", Type: FloatFieldType},
|
||||
}
|
||||
var s S
|
||||
if err := load(&s, schema, []Value{int64(math.MaxInt16 + 1), 0}); err == nil {
|
||||
t.Error("int: got nil, want error")
|
||||
}
|
||||
if err := load(&s, schema, []Value{int64(0), math.MaxFloat32 * 2}); err == nil {
|
||||
t.Error("float: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructLoaderFieldOverlap(t *testing.T) {
|
||||
// It's OK if the struct has fields that the schema does not, and vice versa.
|
||||
type S1 struct {
|
||||
I int
|
||||
X [][]int // not in the schema; does not even correspond to a valid BigQuery type
|
||||
// many schema fields missing
|
||||
}
|
||||
var s1 S1
|
||||
if err := load(&s1, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want1 := S1{I: 7}
|
||||
if !reflect.DeepEqual(s1, want1) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1))
|
||||
}
|
||||
|
||||
// It's even valid to have no overlapping fields at all.
|
||||
type S2 struct{ Z int }
|
||||
|
||||
var s2 S2
|
||||
if err := load(&s2, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want2 := S2{}
|
||||
if !reflect.DeepEqual(s2, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructLoaderErrors(t *testing.T) {
|
||||
check := func(sp interface{}) {
|
||||
var sl structLoader
|
||||
err := sl.set(sp, schema2)
|
||||
if err == nil {
|
||||
t.Errorf("%T: got nil, want error", sp)
|
||||
}
|
||||
}
|
||||
|
||||
type bad1 struct{ F int32 } // wrong type for FLOAT column
|
||||
check(&bad1{})
|
||||
|
||||
type bad2 struct{ I uint } // unsupported integer type
|
||||
check(&bad2{})
|
||||
|
||||
// Using more than one struct type with the same structLoader.
|
||||
type different struct {
|
||||
B bool
|
||||
I int
|
||||
times
|
||||
S string
|
||||
s string
|
||||
Nums []int
|
||||
}
|
||||
|
||||
var sl structLoader
|
||||
if err := sl.set(&testStruct1{}, schema2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := sl.set(&different{}, schema2)
|
||||
if err == nil {
|
||||
t.Error("different struct types: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func load(pval interface{}, schema Schema, vals []Value) error {
|
||||
var sl structLoader
|
||||
if err := sl.set(pval, schema); err != nil {
|
||||
return err
|
||||
}
|
||||
return sl.Load(vals, nil)
|
||||
}
|
||||
|
||||
func BenchmarkStructLoader_NoCompile(b *testing.B) {
|
||||
benchmarkStructLoader(b, false)
|
||||
}
|
||||
|
||||
func BenchmarkStructLoader_Compile(b *testing.B) {
|
||||
benchmarkStructLoader(b, true)
|
||||
}
|
||||
|
||||
func benchmarkStructLoader(b *testing.B, compile bool) {
|
||||
var ts1 testStruct1
|
||||
for i := 0; i < b.N; i++ {
|
||||
var sl structLoader
|
||||
for j := 0; j < 10; j++ {
|
||||
if err := load(&ts1, schema2, testValues); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if !compile {
|
||||
sl.typ = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
335
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
Normal file
335
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||
"cloud.google.com/go/longrunning"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const adminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// AdminClient is a client type for performing admin operations within a specific instance.
|
||||
type AdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
tClient btapb.BigtableTableAdminClient
|
||||
|
||||
project, instance string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// NewAdminClient creates a new AdminClient for a given project and instance.
|
||||
func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) {
|
||||
o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &AdminClient{
|
||||
conn: conn,
|
||||
tClient: btapb.NewBigtableTableAdminClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the AdminClient.
|
||||
func (ac *AdminClient) Close() error {
|
||||
return ac.conn.Close()
|
||||
}
|
||||
|
||||
func (ac *AdminClient) instancePrefix() string {
|
||||
return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance)
|
||||
}
|
||||
|
||||
// Tables returns a list of the tables in the instance.
|
||||
func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ListTablesRequest{
|
||||
Parent: prefix,
|
||||
}
|
||||
res, err := ac.tClient.ListTables(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := make([]string, 0, len(res.Tables))
|
||||
for _, tbl := range res.Tables {
|
||||
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// CreateTable creates a new table in the instance.
|
||||
// This method may return before the table's creation is complete.
|
||||
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.CreateTableRequest{
|
||||
Parent: prefix,
|
||||
TableId: table,
|
||||
}
|
||||
_, err := ac.tClient.CreateTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreatePresplitTable creates a new table in the instance.
|
||||
// The list of row keys will be used to initially split the table into multiple tablets.
|
||||
// Given two split keys, "s1" and "s2", three tablets will be created,
|
||||
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
|
||||
// This method may return before the table's creation is complete.
|
||||
func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, split_keys []string) error {
|
||||
var req_splits []*btapb.CreateTableRequest_Split
|
||||
for _, split := range split_keys {
|
||||
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
|
||||
}
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.CreateTableRequest{
|
||||
Parent: prefix,
|
||||
TableId: table,
|
||||
InitialSplits: req_splits,
|
||||
}
|
||||
_, err := ac.tClient.CreateTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateColumnFamily creates a new column family in a table.
|
||||
func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {
|
||||
// TODO(dsymonds): Permit specifying gcexpr and any other family settings.
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteTable deletes a table and all of its data.
|
||||
func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.DeleteTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
}
|
||||
_, err := ac.tClient.DeleteTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteColumnFamily deletes a column family in a table and all of its data.
|
||||
func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// TableInfo represents information about a table.
|
||||
type TableInfo struct {
|
||||
Families []string
|
||||
}
|
||||
|
||||
// TableInfo retrieves information about a table.
|
||||
func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.GetTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
}
|
||||
res, err := ac.tClient.GetTable(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ti := &TableInfo{}
|
||||
for fam := range res.ColumnFamilies {
|
||||
ti.Families = append(ti.Families, fam)
|
||||
}
|
||||
return ti, nil
|
||||
}
|
||||
|
||||
// SetGCPolicy specifies which cells in a column family should be garbage collected.
|
||||
// GC executes opportunistically in the background; table reads may return data
|
||||
// matching the GC policy.
|
||||
func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// InstanceAdminClient is a client type for performing admin operations on instances.
|
||||
// These operations can be substantially more dangerous than those provided by AdminClient.
|
||||
type InstanceAdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
iClient btapb.BigtableInstanceAdminClient
|
||||
|
||||
project string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// NewInstanceAdminClient creates a new InstanceAdminClient for a given project.
|
||||
func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) {
|
||||
o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &InstanceAdminClient{
|
||||
conn: conn,
|
||||
iClient: btapb.NewBigtableInstanceAdminClient(conn),
|
||||
|
||||
project: project,
|
||||
md: metadata.Pairs(resourcePrefixHeader, "projects/"+project),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the InstanceAdminClient.
|
||||
func (iac *InstanceAdminClient) Close() error {
|
||||
return iac.conn.Close()
|
||||
}
|
||||
|
||||
// StorageType is the type of storage used for all tables in an instance
|
||||
type StorageType int
|
||||
|
||||
const (
|
||||
SSD StorageType = iota
|
||||
HDD
|
||||
)
|
||||
|
||||
func (st StorageType) proto() btapb.StorageType {
|
||||
if st == HDD {
|
||||
return btapb.StorageType_HDD
|
||||
}
|
||||
return btapb.StorageType_SSD
|
||||
}
|
||||
|
||||
// InstanceInfo represents information about an instance
|
||||
type InstanceInfo struct {
|
||||
Name string // name of the instance
|
||||
DisplayName string // display name for UIs
|
||||
}
|
||||
|
||||
// InstanceConf contains the information necessary to create an Instance
|
||||
type InstanceConf struct {
|
||||
InstanceId, DisplayName, ClusterId, Zone string
|
||||
NumNodes int32
|
||||
StorageType StorageType
|
||||
}
|
||||
|
||||
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
|
||||
|
||||
// CreateInstance creates a new instance in the project.
|
||||
// This method will return when the instance has been created or when an error occurs.
|
||||
func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
req := &btapb.CreateInstanceRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
InstanceId: conf.InstanceId,
|
||||
Instance: &btapb.Instance{DisplayName: conf.DisplayName},
|
||||
Clusters: map[string]*btapb.Cluster{
|
||||
conf.ClusterId: {
|
||||
ServeNodes: conf.NumNodes,
|
||||
DefaultStorageType: conf.StorageType.proto(),
|
||||
Location: "projects/" + iac.project + "/locations/" + conf.Zone,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
lro, err := iac.iClient.CreateInstance(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := btapb.Instance{}
|
||||
return longrunning.InternalNewOperation(iac.conn, lro).Wait(ctx, &resp)
|
||||
}
|
||||
|
||||
// DeleteInstance deletes an instance from the project.
|
||||
func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId}
|
||||
_, err := iac.iClient.DeleteInstance(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Instances returns a list of instances in the project.
|
||||
func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
req := &btapb.ListInstancesRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
}
|
||||
res, err := iac.iClient.ListInstances(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var is []*InstanceInfo
|
||||
for _, i := range res.Instances {
|
||||
m := instanceNameRegexp.FindStringSubmatch(i.Name)
|
||||
if m == nil {
|
||||
return nil, fmt.Errorf("malformed instance name %q", i.Name)
|
||||
}
|
||||
is = append(is, &InstanceInfo{
|
||||
Name: m[2],
|
||||
DisplayName: i.DisplayName,
|
||||
})
|
||||
}
|
||||
return is, nil
|
||||
}
|
91
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
Normal file
91
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestAdminIntegration(t *testing.T) {
|
||||
testEnv, err := NewIntegrationEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
defer testEnv.Close()
|
||||
|
||||
timeout := 2 * time.Second
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
adminClient, err := testEnv.NewAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewAdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
list := func() []string {
|
||||
tbls, err := adminClient.Tables(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Fetching list of tables: %v", err)
|
||||
}
|
||||
sort.Strings(tbls)
|
||||
return tbls
|
||||
}
|
||||
containsAll := func(got, want []string) bool {
|
||||
gotSet := make(map[string]bool)
|
||||
|
||||
for _, s := range got {
|
||||
gotSet[s] = true
|
||||
}
|
||||
for _, s := range want {
|
||||
if !gotSet[s] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
defer adminClient.DeleteTable(ctx, "mytable")
|
||||
|
||||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
defer adminClient.DeleteTable(ctx, "myothertable")
|
||||
|
||||
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Deleting table: %v", err)
|
||||
}
|
||||
tables := list()
|
||||
if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) {
|
||||
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted)
|
||||
}
|
||||
}
|
735
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
Normal file
735
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
Normal file
@ -0,0 +1,735 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable // import "cloud.google.com/go/bigtable"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/internal/gax"
|
||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const prodAddr = "bigtable.googleapis.com:443"
|
||||
|
||||
// Client is a client for reading and writing data to tables in an instance.
|
||||
//
|
||||
// A Client is safe to use concurrently, except for its Close method.
|
||||
type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
client btpb.BigtableClient
|
||||
project, instance string
|
||||
}
|
||||
|
||||
// NewClient creates a new Client for a given project and instance.
|
||||
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
|
||||
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Default to a small connection pool that can be overridden.
|
||||
o = append(o, option.WithGRPCConnectionPool(4))
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &Client{
|
||||
conn: conn,
|
||||
client: btpb.NewBigtableClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
var (
|
||||
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted,codes.Internal}
|
||||
isIdempotentRetryCode = make(map[codes.Code]bool)
|
||||
retryOptions = []gax.CallOption{
|
||||
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
|
||||
gax.WithRetryCodes(idempotentRetryCodes),
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, code := range idempotentRetryCodes {
|
||||
isIdempotentRetryCode[code] = true
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) fullTableName(table string) string {
|
||||
return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table)
|
||||
}
|
||||
|
||||
// A Table refers to a table.
|
||||
//
|
||||
// A Table is safe to use concurrently.
|
||||
type Table struct {
|
||||
c *Client
|
||||
table string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// Open opens a table.
|
||||
func (c *Client) Open(table string) *Table {
|
||||
return &Table{
|
||||
c: c,
|
||||
table: table,
|
||||
md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Read method that returns a sequence of ReadItems.
|
||||
|
||||
// ReadRows reads rows from a table. f is called for each row.
|
||||
// If f returns false, the stream is shut down and ReadRows returns.
|
||||
// f owns its argument, and f is called serially in order by row key.
|
||||
//
|
||||
// By default, the yielded rows will contain all values in all cells.
|
||||
// Use RowFilter to limit the cells returned.
|
||||
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
|
||||
var prevRowKey string
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
req := &btpb.ReadRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Rows: arg.proto(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.set(req)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
|
||||
defer cancel()
|
||||
|
||||
stream, err := t.c.client.ReadRows(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr := newChunkReader()
|
||||
for {
|
||||
res, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// Reset arg for next Invoke call.
|
||||
arg = arg.retainRowsAfter(prevRowKey)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cc := range res.Chunks {
|
||||
row, err := cr.Process(cc)
|
||||
if err != nil {
|
||||
// No need to prepare for a retry, this is an unretryable error.
|
||||
return err
|
||||
}
|
||||
if row == nil {
|
||||
continue
|
||||
}
|
||||
prevRowKey = row.Key()
|
||||
if !f(row) {
|
||||
// Cancel and drain stream.
|
||||
cancel()
|
||||
for {
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
// The stream has ended. We don't return an error
|
||||
// because the caller has intentionally interrupted the scan.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
// No need to prepare for a retry, this is an unretryable error.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, retryOptions...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadRow is a convenience implementation of a single-row reader.
|
||||
// A missing row will return a zero-length map and a nil error.
|
||||
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {
|
||||
var r Row
|
||||
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {
|
||||
r = rr
|
||||
return true
|
||||
}, opts...)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// decodeFamilyProto adds the cell data from f to the given row.
|
||||
func decodeFamilyProto(r Row, row string, f *btpb.Family) {
|
||||
fam := f.Name // does not have colon
|
||||
for _, col := range f.Columns {
|
||||
for _, cell := range col.Cells {
|
||||
ri := ReadItem{
|
||||
Row: row,
|
||||
Column: fam + ":" + string(col.Qualifier),
|
||||
Timestamp: Timestamp(cell.TimestampMicros),
|
||||
Value: cell.Value,
|
||||
}
|
||||
r[fam] = append(r[fam], ri)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RowSet is a set of rows to be read. It is satisfied by RowList and RowRange.
|
||||
type RowSet interface {
|
||||
proto() *btpb.RowSet
|
||||
|
||||
// retainRowsAfter returns a new RowSet that does not include the
|
||||
// given row key or any row key lexicographically less than it.
|
||||
retainRowsAfter(lastRowKey string) RowSet
|
||||
}
|
||||
|
||||
// RowList is a sequence of row keys.
|
||||
type RowList []string
|
||||
|
||||
func (r RowList) proto() *btpb.RowSet {
|
||||
keys := make([][]byte, len(r))
|
||||
for i, row := range r {
|
||||
keys[i] = []byte(row)
|
||||
}
|
||||
return &btpb.RowSet{RowKeys: keys}
|
||||
}
|
||||
|
||||
func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
|
||||
var retryKeys RowList
|
||||
for _, key := range r {
|
||||
if key > lastRowKey {
|
||||
retryKeys = append(retryKeys, key)
|
||||
}
|
||||
}
|
||||
return retryKeys
|
||||
}
|
||||
|
||||
// A RowRange is a half-open interval [Start, Limit) encompassing
|
||||
// all the rows with keys at least as large as Start, and less than Limit.
|
||||
// (Bigtable string comparison is the same as Go's.)
|
||||
// A RowRange can be unbounded, encompassing all keys at least as large as Start.
|
||||
type RowRange struct {
|
||||
start string
|
||||
limit string
|
||||
}
|
||||
|
||||
// NewRange returns the new RowRange [begin, end).
|
||||
func NewRange(begin, end string) RowRange {
|
||||
return RowRange{
|
||||
start: begin,
|
||||
limit: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Unbounded tests whether a RowRange is unbounded.
|
||||
func (r RowRange) Unbounded() bool {
|
||||
return r.limit == ""
|
||||
}
|
||||
|
||||
// Contains says whether the RowRange contains the key.
|
||||
func (r RowRange) Contains(row string) bool {
|
||||
return r.start <= row && (r.limit == "" || r.limit > row)
|
||||
}
|
||||
|
||||
// String provides a printable description of a RowRange.
|
||||
func (r RowRange) String() string {
|
||||
a := strconv.Quote(r.start)
|
||||
if r.Unbounded() {
|
||||
return fmt.Sprintf("[%s,∞)", a)
|
||||
}
|
||||
return fmt.Sprintf("[%s,%q)", a, r.limit)
|
||||
}
|
||||
|
||||
func (r RowRange) proto() *btpb.RowSet {
|
||||
rr := &btpb.RowRange{
|
||||
StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)},
|
||||
}
|
||||
if !r.Unbounded() {
|
||||
rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)}
|
||||
}
|
||||
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
|
||||
}
|
||||
|
||||
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
|
||||
if lastRowKey == "" {
|
||||
return r
|
||||
}
|
||||
// Set the beginning of the range to the row after the last scanned.
|
||||
start := lastRowKey + "\x00"
|
||||
if r.Unbounded() {
|
||||
return InfiniteRange(start)
|
||||
}
|
||||
return NewRange(start, r.limit)
|
||||
}
|
||||
|
||||
// SingleRow returns a RowSet for reading a single row.
|
||||
func SingleRow(row string) RowSet {
|
||||
return RowList{row}
|
||||
}
|
||||
|
||||
// PrefixRange returns a RowRange consisting of all keys starting with the prefix.
|
||||
func PrefixRange(prefix string) RowRange {
|
||||
return RowRange{
|
||||
start: prefix,
|
||||
limit: prefixSuccessor(prefix),
|
||||
}
|
||||
}
|
||||
|
||||
// InfiniteRange returns the RowRange consisting of all keys at least as
|
||||
// large as start.
|
||||
func InfiniteRange(start string) RowRange {
|
||||
return RowRange{
|
||||
start: start,
|
||||
limit: "",
|
||||
}
|
||||
}
|
||||
|
||||
// prefixSuccessor returns the lexically smallest string greater than the
|
||||
// prefix, if it exists, or "" otherwise. In either case, it is the string
|
||||
// needed for the Limit of a RowRange.
|
||||
func prefixSuccessor(prefix string) string {
|
||||
if prefix == "" {
|
||||
return "" // infinite range
|
||||
}
|
||||
n := len(prefix)
|
||||
for n--; n >= 0 && prefix[n] == '\xff'; n-- {
|
||||
}
|
||||
if n == -1 {
|
||||
return ""
|
||||
}
|
||||
ans := []byte(prefix[:n])
|
||||
ans = append(ans, prefix[n]+1)
|
||||
return string(ans)
|
||||
}
|
||||
|
||||
// A ReadOption is an optional argument to ReadRows.
|
||||
type ReadOption interface {
|
||||
set(req *btpb.ReadRowsRequest)
|
||||
}
|
||||
|
||||
// RowFilter returns a ReadOption that applies f to the contents of read rows.
|
||||
func RowFilter(f Filter) ReadOption { return rowFilter{f} }
|
||||
|
||||
type rowFilter struct{ f Filter }
|
||||
|
||||
func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() }
|
||||
|
||||
// LimitRows returns a ReadOption that will limit the number of rows to be read.
|
||||
func LimitRows(limit int64) ReadOption { return limitRows{limit} }
|
||||
|
||||
type limitRows struct{ limit int64 }
|
||||
|
||||
func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit }
|
||||
|
||||
// mutationsAreRetryable returns true if all mutations are idempotent
|
||||
// and therefore retryable. A mutation is idempotent iff all cell timestamps
|
||||
// have an explicit timestamp set and do not rely on the timestamp being set on the server.
|
||||
func mutationsAreRetryable(muts []*btpb.Mutation) bool {
|
||||
serverTime := int64(ServerTime)
|
||||
for _, mut := range muts {
|
||||
setCell := mut.GetSetCell()
|
||||
if setCell != nil && setCell.TimestampMicros == serverTime {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Apply applies a Mutation to a specific row.
|
||||
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
after := func(res proto.Message) {
|
||||
for _, o := range opts {
|
||||
o.after(res)
|
||||
}
|
||||
}
|
||||
|
||||
var callOptions []gax.CallOption
|
||||
if m.cond == nil {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Mutations: m.ops,
|
||||
}
|
||||
if mutationsAreRetryable(m.ops) {
|
||||
callOptions = retryOptions
|
||||
}
|
||||
var res *btpb.MutateRowResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
res, err = t.c.client.MutateRow(ctx, req)
|
||||
return err
|
||||
}, callOptions...)
|
||||
if err == nil {
|
||||
after(res)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
req := &btpb.CheckAndMutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
PredicateFilter: m.cond.proto(),
|
||||
}
|
||||
if m.mtrue != nil {
|
||||
req.TrueMutations = m.mtrue.ops
|
||||
}
|
||||
if m.mfalse != nil {
|
||||
req.FalseMutations = m.mfalse.ops
|
||||
}
|
||||
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
|
||||
callOptions = retryOptions
|
||||
}
|
||||
var cmRes *btpb.CheckAndMutateRowResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
|
||||
return err
|
||||
}, callOptions...)
|
||||
if err == nil {
|
||||
after(cmRes)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// An ApplyOption is an optional argument to Apply.
|
||||
type ApplyOption interface {
|
||||
after(res proto.Message)
|
||||
}
|
||||
|
||||
type applyAfterFunc func(res proto.Message)
|
||||
|
||||
func (a applyAfterFunc) after(res proto.Message) { a(res) }
|
||||
|
||||
// GetCondMutationResult returns an ApplyOption that reports whether the conditional
|
||||
// mutation's condition matched.
|
||||
func GetCondMutationResult(matched *bool) ApplyOption {
|
||||
return applyAfterFunc(func(res proto.Message) {
|
||||
if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok {
|
||||
*matched = res.PredicateMatched
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Mutation represents a set of changes for a single row of a table.
|
||||
type Mutation struct {
|
||||
ops []*btpb.Mutation
|
||||
|
||||
// for conditional mutations
|
||||
cond Filter
|
||||
mtrue, mfalse *Mutation
|
||||
}
|
||||
|
||||
// NewMutation returns a new mutation.
|
||||
func NewMutation() *Mutation {
|
||||
return new(Mutation)
|
||||
}
|
||||
|
||||
// NewCondMutation returns a conditional mutation.
|
||||
// The given row filter determines which mutation is applied:
|
||||
// If the filter matches any cell in the row, mtrue is applied;
|
||||
// otherwise, mfalse is applied.
|
||||
// Either given mutation may be nil.
|
||||
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
|
||||
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}
|
||||
}
|
||||
|
||||
// Set sets a value in a specified column, with the given timestamp.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
// A timestamp of ServerTime means to use the server timestamp.
|
||||
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimestampMicros: int64(ts.TruncateToMilliseconds()),
|
||||
Value: value,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
|
||||
func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteTimestampRange deletes all cells whose columns are family:column
|
||||
// and whose timestamps are in the half-open interval [start, end).
|
||||
// If end is zero, it will be interpreted as infinity.
|
||||
// The timestamps will be truncated to millisecond granularity.
|
||||
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimeRange: &btpb.TimestampRange{
|
||||
StartTimestampMicros: int64(start.TruncateToMilliseconds()),
|
||||
EndTimestampMicros: int64(end.TruncateToMilliseconds()),
|
||||
},
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
|
||||
func (m *Mutation) DeleteCellsInFamily(family string) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{
|
||||
FamilyName: family,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteRow deletes the entire row.
|
||||
func (m *Mutation) DeleteRow() {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}})
|
||||
}
|
||||
|
||||
// entryErr is a container that combines an entry with the error that was returned for it.
|
||||
// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed.
|
||||
type entryErr struct {
|
||||
Entry *btpb.MutateRowsRequest_Entry
|
||||
Err error
|
||||
}
|
||||
|
||||
// ApplyBulk applies multiple Mutations.
|
||||
// Each mutation is individually applied atomically,
|
||||
// but the set of mutations may be applied in any order.
|
||||
//
|
||||
// Two types of failures may occur. If the entire process
|
||||
// fails, (nil, err) will be returned. If specific mutations
|
||||
// fail to apply, ([]err, nil) will be returned, and the errors
|
||||
// will correspond to the relevant rowKeys/muts arguments.
|
||||
//
|
||||
// Conditional mutations cannot be applied in bulk and providing one will result in an error.
|
||||
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
if len(rowKeys) != len(muts) {
|
||||
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
|
||||
}
|
||||
|
||||
origEntries := make([]*entryErr, len(rowKeys))
|
||||
for i, key := range rowKeys {
|
||||
mut := muts[i]
|
||||
if mut.cond != nil {
|
||||
return nil, errors.New("conditional mutations cannot be applied in bulk")
|
||||
}
|
||||
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
|
||||
}
|
||||
|
||||
// entries will be reduced after each invocation to just what needs to be retried.
|
||||
entries := make([]*entryErr, len(rowKeys))
|
||||
copy(entries, origEntries)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := t.doApplyBulk(ctx, entries, opts...)
|
||||
if err != nil {
|
||||
// We want to retry the entire request with the current entries
|
||||
return err
|
||||
}
|
||||
entries = t.getApplyBulkRetries(entries)
|
||||
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
|
||||
// We have at least one mutation that needs to be retried.
|
||||
// Return an arbitrary error that is retryable according to callOptions.
|
||||
return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
|
||||
}
|
||||
return nil
|
||||
}, retryOptions...)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Accumulate all of the errors into an array to return, interspersed with nils for successful
|
||||
// entries. The absence of any errors means we should return nil.
|
||||
var errs []error
|
||||
var foundErr bool
|
||||
for _, entry := range origEntries {
|
||||
if entry.Err != nil {
|
||||
foundErr = true
|
||||
}
|
||||
errs = append(errs, entry.Err)
|
||||
}
|
||||
if foundErr {
|
||||
return errs, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getApplyBulkRetries returns the entries that need to be retried
|
||||
func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr {
|
||||
var retryEntries []*entryErr
|
||||
for _, entry := range entries {
|
||||
err := entry.Err
|
||||
if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) {
|
||||
// There was an error and the entry is retryable.
|
||||
retryEntries = append(retryEntries, entry)
|
||||
}
|
||||
}
|
||||
return retryEntries
|
||||
}
|
||||
|
||||
// doApplyBulk does the work of a single ApplyBulk invocation
|
||||
func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error {
|
||||
after := func(res proto.Message) {
|
||||
for _, o := range opts {
|
||||
o.after(res)
|
||||
}
|
||||
}
|
||||
|
||||
entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs))
|
||||
for i, entryErr := range entryErrs {
|
||||
entries[i] = entryErr.Entry
|
||||
}
|
||||
req := &btpb.MutateRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Entries: entries,
|
||||
}
|
||||
stream, err := t.c.client.MutateRows(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
res, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, entry := range res.Entries {
|
||||
status := entry.Status
|
||||
if status.Code == int32(codes.OK) {
|
||||
entryErrs[i].Err = nil
|
||||
} else {
|
||||
entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message)
|
||||
}
|
||||
}
|
||||
after(res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Timestamp is in units of microseconds since 1 January 1970.
|
||||
type Timestamp int64
|
||||
|
||||
// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.
|
||||
// It indicates that the server's timestamp should be used.
|
||||
const ServerTime Timestamp = -1
|
||||
|
||||
// Time converts a time.Time into a Timestamp.
|
||||
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }
|
||||
|
||||
// Now returns the Timestamp representation of the current time on the client.
|
||||
func Now() Timestamp { return Time(time.Now()) }
|
||||
|
||||
// Time converts a Timestamp into a time.Time.
|
||||
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }
|
||||
|
||||
// TruncateToMilliseconds truncates a Timestamp to millisecond granularity,
|
||||
// which is currently the only granularity supported.
|
||||
func (ts Timestamp) TruncateToMilliseconds() Timestamp {
|
||||
if ts == ServerTime {
|
||||
return ts
|
||||
}
|
||||
return ts - ts % 1000
|
||||
}
|
||||
|
||||
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
|
||||
// It returns the newly written cells.
|
||||
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
req := &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Rules: m.ops,
|
||||
}
|
||||
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.Row == nil {
|
||||
return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil")
|
||||
}
|
||||
r := make(Row)
|
||||
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family
|
||||
decodeFamilyProto(r, row, fam)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ReadModifyWrite represents a set of operations on a single row of a table.
|
||||
// It is like Mutation but for non-idempotent changes.
|
||||
// When applied, these operations operate on the latest values of the row's cells,
|
||||
// and result in a new value being written to the relevant cell with a timestamp
|
||||
// that is max(existing timestamp, current server time).
|
||||
//
|
||||
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will
|
||||
// be executed serially by the server.
|
||||
type ReadModifyWrite struct {
|
||||
ops []*btpb.ReadModifyWriteRule
|
||||
}
|
||||
|
||||
// NewReadModifyWrite returns a new ReadModifyWrite.
|
||||
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }
|
||||
|
||||
// AppendValue appends a value to a specific cell's value.
|
||||
// If the cell is unset, it will be treated as an empty value.
|
||||
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
|
||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{v},
|
||||
})
|
||||
}
|
||||
|
||||
// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,
|
||||
// and adds a value to it. If the cell is unset, it will be treated as zero.
|
||||
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite
|
||||
// operation will fail.
|
||||
func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
|
||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta},
|
||||
})
|
||||
}
|
||||
|
||||
// mergeMetadata returns a context populated by the existing metadata, if any,
|
||||
// joined with internal metadata.
|
||||
func mergeMetadata(ctx context.Context, md metadata.MD) context.Context {
|
||||
mdCopy, _ := metadata.FromContext(ctx)
|
||||
return metadata.NewContext(ctx, metadata.Join(mdCopy, md))
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user