mirror of
https://github.com/octoleo/restic.git
synced 2024-12-22 10:58:55 +00:00
Upadte vendored dependencies
This commit is contained in:
parent
315b7f282f
commit
8d37b723ca
46
Gopkg.lock
generated
46
Gopkg.lock
generated
@ -10,20 +10,20 @@
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "f6de2c509ed9d2af648c3c147207eaaf97149aed"
|
||||
version = "v0.14.0"
|
||||
revision = "eaddaf6dd7ee35fd3c2420c8d27478db176b0485"
|
||||
version = "v0.15.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "2592daf71ab6b95dcfc7f7437ecc1afb9ddb7360"
|
||||
version = "v11.0.0-beta"
|
||||
revision = "509eea43b93cec2f3f17acbe2578ef58703923f8"
|
||||
version = "v11.1.1-beta"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "f6be1abbb5abd0517522f850dd785990d373da7e"
|
||||
version = "v8.4.0"
|
||||
revision = "7aa5b8a6f18b5c15910c767ab005fc4585221177"
|
||||
version = "v9.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cenkalti/backoff"
|
||||
@ -40,14 +40,14 @@
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
||||
version = "v3.0.0"
|
||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
||||
version = "v3.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/dustin/go-humanize"
|
||||
packages = ["."]
|
||||
revision = "79e699ccd02f240a1f1fbbdcee7e64c1c12e41aa"
|
||||
revision = "77ed807830b4df581417e7f89eb81d4872832b72"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/elithrar/simple-scrypt"
|
||||
@ -58,14 +58,14 @@
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
|
||||
version = "v1.28.2"
|
||||
revision = "5b3e00af70a9484542169a976dcab8d03e601a17"
|
||||
version = "v1.30.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
|
||||
revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
@ -107,7 +107,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "9d3f812e23d270d1c66a9a01e20af1005061cdc4"
|
||||
revision = "c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
@ -122,10 +122,10 @@
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "7c1f7a370726a2457b33b29baefc2402b4965c65"
|
||||
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/xattr"
|
||||
@ -136,8 +136,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/restic/chunker"
|
||||
packages = ["."]
|
||||
revision = "bb2ecf9a98e35a0b336ffc23fc515fb6e7961577"
|
||||
version = "v0.1.0"
|
||||
revision = "db83917be3b88cc307464b7d8a221c173e34a0db"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
@ -161,7 +161,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "e5f66de850af3302fbe378c8acded2b0fa55472c"
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
@ -173,13 +173,13 @@
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"]
|
||||
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
|
||||
revision = "edd5e9b0879d13ee6970a50153d85b8fec9f7686"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp"]
|
||||
revision = "0a9397675ba34b2845f758fe3cd68828369c6517"
|
||||
revision = "cd69bc3fc700721b709c3a59e16e24c67b58f6ff"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -191,13 +191,13 @@
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "314a259e304ff91bd6985da2a7149bbf91237993"
|
||||
revision = "8dbc5d05d6edcc104950cc299a1ce6641235bc86"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "906273f42cdebd65de3a53f30dd9e23de1b55ba9"
|
||||
revision = "7afc123cf726cd2f253faa3e144d2ab65477b18f"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
@ -214,6 +214,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "abc33af201086afac21e33a2a7987a473daa6a229c3699ca13761f4d4fd7f52e"
|
||||
inputs-digest = "f0a207197cb502238ac87ca8e07b2640c02ec380a50b036e09ef87e40e31ca2d"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
31
cmd/restic/excludes
Normal file
31
cmd/restic/excludes
Normal file
@ -0,0 +1,31 @@
|
||||
/boot
|
||||
/dev
|
||||
/etc
|
||||
/home
|
||||
/lost+found
|
||||
/mnt
|
||||
/proc
|
||||
/root
|
||||
/run
|
||||
/sys
|
||||
/tmp
|
||||
/usr
|
||||
/var
|
||||
/opt/android-sdk
|
||||
/opt/bullet
|
||||
/opt/dex2jar
|
||||
/opt/jameica
|
||||
/opt/google
|
||||
/opt/JDownloader
|
||||
/opt/JDownloaderScripts
|
||||
/opt/opencascade
|
||||
/opt/vagrant
|
||||
/opt/visual-studio-code
|
||||
/opt/vtk6
|
||||
/bin
|
||||
/fonts*
|
||||
/srv/ftp
|
||||
/srv/http
|
||||
/sbin
|
||||
/lib
|
||||
/lib64
|
6
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
6
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
@ -42,6 +42,12 @@ run the against the actual APIs.
|
||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
- **GCLOUD_TESTS_API_KEY**: Your API key.
|
||||
|
||||
Firestore requires a different project and key:
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID
|
||||
supporting Firestore
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create some resources used in integration tests.
|
||||
|
||||
|
53
vendor/cloud.google.com/go/README.md
generated
vendored
53
vendor/cloud.google.com/go/README.md
generated
vendored
@ -33,6 +33,18 @@ make backwards-incompatible changes.
|
||||
|
||||
## News
|
||||
|
||||
*v0.15.0*
|
||||
|
||||
_October 3, 2017_
|
||||
|
||||
- firestore: beta release. See the
|
||||
[announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
|
||||
|
||||
- errorreporting: The existing package has been redesigned.
|
||||
|
||||
- errors: This package has been removed. Use errorreporting.
|
||||
|
||||
|
||||
_September 28, 2017_
|
||||
|
||||
*v0.14.0*
|
||||
@ -104,41 +116,6 @@ _August 22, 2017_
|
||||
- storage: support bucket lifecycle configurations.
|
||||
|
||||
|
||||
_July 31, 2017_
|
||||
|
||||
*v0.11.0*
|
||||
|
||||
- Clients for spanner, pubsub and video are now in beta.
|
||||
|
||||
- New client for DLP.
|
||||
|
||||
- spanner: performance and testing improvements.
|
||||
|
||||
- storage: requester-pays buckets are supported.
|
||||
|
||||
- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
|
||||
|
||||
- pubsub: bug fixes and other minor improvements
|
||||
|
||||
_June 17, 2017_
|
||||
|
||||
|
||||
*v0.10.0*
|
||||
|
||||
- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
|
||||
|
||||
- pubsub: Subscription.Receive now runs concurrently for higher throughput.
|
||||
|
||||
- vision: cloud.google.com/go/vision is deprecated. Use
|
||||
cloud.google.com/go/vision/apiv1 instead.
|
||||
|
||||
- translation: now stable.
|
||||
|
||||
- trace: several changes to the surface. See the link below.
|
||||
|
||||
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
|
||||
|
||||
|
||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
||||
|
||||
## Supported APIs
|
||||
@ -146,6 +123,7 @@ cloud.google.com/go/vision/apiv1 instead.
|
||||
Google API | Status | Package
|
||||
---------------------------------|--------------|-----------------------------------------------------------
|
||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
|
||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
@ -480,6 +458,11 @@ for more information.
|
||||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
|
||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
|
||||
|
||||
[cloud-firestore]: https://cloud.google.com/firestore/
|
||||
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
|
||||
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs
|
||||
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate
|
||||
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
|
||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
|
||||
|
11
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
11
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
@ -124,7 +124,7 @@ These methods create references to datasets, not the datasets themselves. You ca
|
||||
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
|
||||
create a dataset from a reference:
|
||||
|
||||
if err := myDataset.Create(ctx); err != nil {
|
||||
if err := myDataset.Create(ctx, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
@ -134,9 +134,10 @@ to an object in BigQuery that may or may not exist.
|
||||
table := myDataset.Table("my_table")
|
||||
|
||||
You can create, delete and update the metadata of tables with methods on Table.
|
||||
Table.Create supports a few options. For instance, you could create a temporary table with:
|
||||
For instance, you could create a temporary table with:
|
||||
|
||||
err = myDataset.Table("temp").Create(ctx, bigquery.TableExpiration(time.Now().Add(1*time.Hour)))
|
||||
err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
|
||||
ExpirationTime: time.Now().Add(1*time.Hour)})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
@ -179,9 +180,9 @@ so you can change names or ignore fields:
|
||||
}
|
||||
// schema3 has fields "full_name" and "Grade".
|
||||
|
||||
Having constructed a schema, you can pass it to Table.Create as an option:
|
||||
Having constructed a schema, you can create a table with it like so:
|
||||
|
||||
if err := table.Create(ctx, schema1); err != nil {
|
||||
if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
|
215
vendor/cloud.google.com/go/errorreporting/error_logging_test.go
generated
vendored
215
vendor/cloud.google.com/go/errorreporting/error_logging_test.go
generated
vendored
@ -1,215 +0,0 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errorreporting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/logging"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
type fakeLogger struct {
|
||||
entry *logging.Entry
|
||||
fail bool
|
||||
}
|
||||
|
||||
func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
|
||||
if c.fail {
|
||||
return errors.New("request failed")
|
||||
}
|
||||
c.entry = &e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeLogger) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestClientUsingLogging(c *fakeLogger) *Client {
|
||||
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
|
||||
return c, nil
|
||||
}
|
||||
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.RepanicDefault = false
|
||||
return t
|
||||
}
|
||||
|
||||
func TestCatchNothingUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx)
|
||||
}
|
||||
|
||||
func entryMessage(e *logging.Entry) string {
|
||||
return e.Payload.(map[string]interface{})["message"].(string)
|
||||
}
|
||||
|
||||
func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) {
|
||||
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" {
|
||||
t.Errorf("error report didn't contain service name")
|
||||
}
|
||||
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" {
|
||||
t.Errorf("error report didn't contain version name")
|
||||
}
|
||||
if !strings.Contains(entryMessage(e), "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(entryMessage(e), panickingFunction) {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatchPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestCatchPanic")
|
||||
if !strings.Contains(entryMessage(e), "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchPanicNilClientUsingLogging(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "TestCatchPanicNilClient") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
var c *Client
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestLogFailedReportsUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{fail: true}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchNilPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestCatchNilPanic")
|
||||
if !strings.Contains(entryMessage(e), "nil") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
b := true
|
||||
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestNotCatchNilPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestReportUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
c.Report(ctx, nil, "hello, ", "error")
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestReport")
|
||||
}
|
||||
|
||||
func TestReportfUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestReportf")
|
||||
if !strings.Contains(entryMessage(e), "2+2=4") {
|
||||
t.Errorf("error report didn't contain formatted message")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloseUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
436
vendor/cloud.google.com/go/errorreporting/errors.go
generated
vendored
436
vendor/cloud.google.com/go/errorreporting/errors.go
generated
vendored
@ -20,50 +20,17 @@
|
||||
//
|
||||
// To initialize a client, use the NewClient function.
|
||||
//
|
||||
// import "cloud.google.com/go/errorreporting"
|
||||
// import er "cloud.google.com/go/errorreporting"
|
||||
// ...
|
||||
// errorsClient, err = errorreporting.NewClient(ctx, projectID, "myservice", "v1.0", true)
|
||||
// errorsClient, err = er.NewClient(ctx, projectID, er.Config{
|
||||
// ServiceName: "myservice",
|
||||
// ServiceVersion: "v1.0",
|
||||
// })
|
||||
//
|
||||
// The client can recover panics in your program and report them as errors.
|
||||
// To use this functionality, defer its Catch method, as you would any other
|
||||
// function for recovering panics.
|
||||
//
|
||||
// func foo(ctx context.Context, ...) {
|
||||
// defer errorsClient.Catch(ctx)
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Catch writes an error report containing the recovered value and a stack trace
|
||||
// to Stackdriver Error Reporting.
|
||||
//
|
||||
// There are various options you can add to the call to Catch that modify how
|
||||
// panics are handled.
|
||||
//
|
||||
// WithMessage and WithMessagef add a custom message after the recovered value,
|
||||
// using fmt.Sprint and fmt.Sprintf respectively.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errorreporting.WithMessagef("x=%d", x))
|
||||
//
|
||||
// WithRequest fills in various fields in the error report with information
|
||||
// about an http.Request that's being handled.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errorreporting.WithRequest(httpReq))
|
||||
//
|
||||
// By default, after recovering a panic, Catch will panic again with the
|
||||
// recovered value. You can turn off this behavior with the Repanic option.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errorreporting.Repanic(false))
|
||||
//
|
||||
// You can also change the default behavior for the client by changing the
|
||||
// RepanicDefault field.
|
||||
//
|
||||
// errorsClient.RepanicDefault = false
|
||||
//
|
||||
// It is also possible to write an error report directly without recovering a
|
||||
// panic, using Report or Reportf.
|
||||
// With a client, you can then report errors:
|
||||
//
|
||||
// if err != nil {
|
||||
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
|
||||
// errorsClient.Report(ctx, er.Entry{Error: err})
|
||||
// }
|
||||
//
|
||||
// If you try to write an error report with a nil client, or if the client
|
||||
@ -77,16 +44,15 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
api "cloud.google.com/go/errorreporting/apiv1beta1"
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/logging"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/support/bundler"
|
||||
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
)
|
||||
|
||||
@ -94,12 +60,39 @@ const (
|
||||
userAgent = `gcloud-golang-errorreporting/20160701`
|
||||
)
|
||||
|
||||
type apiInterface interface {
|
||||
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
|
||||
Close() error
|
||||
// Config is additional configuration for Client.
|
||||
type Config struct {
|
||||
// ServiceName identifies the running program and is included in the error reports.
|
||||
// Optional.
|
||||
ServiceName string
|
||||
|
||||
// ServiceVersion identifies the version of the running program and is
|
||||
// included in the error reports.
|
||||
// Optional.
|
||||
ServiceVersion string
|
||||
|
||||
// OnError is the function to call if any background
|
||||
// tasks errored. By default, errors are logged.
|
||||
OnError func(err error)
|
||||
}
|
||||
|
||||
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
|
||||
// Entry holds information about the reported error.
|
||||
type Entry struct {
|
||||
Error error
|
||||
Req *http.Request // if error is associated with a request.
|
||||
}
|
||||
|
||||
// Client represents a Google Cloud Error Reporting client.
|
||||
type Client struct {
|
||||
projectID string
|
||||
apiClient client
|
||||
serviceContext erpb.ServiceContext
|
||||
bundler *bundler.Bundler
|
||||
|
||||
onErrorFn func(err error)
|
||||
}
|
||||
|
||||
var newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) {
|
||||
client, err := api.NewReportErrorsClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -108,289 +101,89 @@ var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (ap
|
||||
return client, nil
|
||||
}
|
||||
|
||||
type loggerInterface interface {
|
||||
LogSync(ctx context.Context, e logging.Entry) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
*logging.Logger
|
||||
c *logging.Client
|
||||
}
|
||||
|
||||
func (l logger) Close() error {
|
||||
return l.c.Close()
|
||||
}
|
||||
|
||||
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
|
||||
lc, err := logging.NewClient(ctx, projectID, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Logging client: %v", err)
|
||||
}
|
||||
l := lc.Logger("errorreports")
|
||||
return logger{l, lc}, nil
|
||||
}
|
||||
|
||||
type sender interface {
|
||||
send(ctx context.Context, r *http.Request, message string)
|
||||
close() error
|
||||
}
|
||||
|
||||
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
|
||||
type errorApiSender struct {
|
||||
apiClient apiInterface
|
||||
projectID string
|
||||
serviceContext erpb.ServiceContext
|
||||
}
|
||||
|
||||
// loggingSender sends error reports using the Stackdriver Logging API.
|
||||
type loggingSender struct {
|
||||
logger loggerInterface
|
||||
projectID string
|
||||
serviceContext map[string]string
|
||||
}
|
||||
|
||||
// Client represents a Google Cloud Error Reporting client.
|
||||
type Client struct {
|
||||
sender
|
||||
// RepanicDefault determines whether Catch will re-panic after recovering a
|
||||
// panic. This behavior can be overridden for an individual call to Catch using
|
||||
// the Repanic option.
|
||||
RepanicDefault bool
|
||||
}
|
||||
|
||||
// NewClient returns a new error reporting client. Generally you will want
|
||||
// to create a client on program initialization and use it through the lifetime
|
||||
// of the process.
|
||||
//
|
||||
// The service name and version string identify the running program, and are
|
||||
// included in error reports. The version string can be left empty.
|
||||
//
|
||||
// Set useLogging to report errors also using Stackdriver Logging,
|
||||
// which will result in errors appearing in both the logs and the error
|
||||
// dashboard. This is useful if you are already a user of Stackdriver Logging.
|
||||
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
|
||||
if useLogging {
|
||||
l, err := newLoggerInterface(ctx, projectID, opts...)
|
||||
func NewClient(ctx context.Context, projectID string, cfg Config, opts ...option.ClientOption) (*Client, error) {
|
||||
if cfg.ServiceName == "" {
|
||||
cfg.ServiceName = "goapp"
|
||||
}
|
||||
c, err := newClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Logging client: %v", err)
|
||||
return nil, fmt.Errorf("creating client: %v", err)
|
||||
}
|
||||
sender := &loggingSender{
|
||||
logger: l,
|
||||
projectID: projectID,
|
||||
serviceContext: map[string]string{
|
||||
"service": serviceName,
|
||||
},
|
||||
}
|
||||
if serviceVersion != "" {
|
||||
sender.serviceContext["version"] = serviceVersion
|
||||
}
|
||||
c := &Client{
|
||||
sender: sender,
|
||||
RepanicDefault: true,
|
||||
}
|
||||
return c, nil
|
||||
} else {
|
||||
a, err := newApiInterface(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Error Reporting client: %v", err)
|
||||
}
|
||||
c := &Client{
|
||||
sender: &errorApiSender{
|
||||
apiClient: a,
|
||||
|
||||
client := &Client{
|
||||
apiClient: c,
|
||||
projectID: "projects/" + projectID,
|
||||
serviceContext: erpb.ServiceContext{
|
||||
Service: serviceName,
|
||||
Version: serviceVersion,
|
||||
Service: cfg.ServiceName,
|
||||
Version: cfg.ServiceVersion,
|
||||
},
|
||||
},
|
||||
RepanicDefault: true,
|
||||
}
|
||||
return c, nil
|
||||
bundler := bundler.NewBundler((*erpb.ReportErrorEventRequest)(nil), func(bundle interface{}) {
|
||||
reqs := bundle.([]*erpb.ReportErrorEventRequest)
|
||||
for _, req := range reqs {
|
||||
_, err = client.apiClient.ReportErrorEvent(ctx, req)
|
||||
if err != nil {
|
||||
client.onError(fmt.Errorf("failed to upload: %v", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
// TODO(jbd): Optimize bundler limits.
|
||||
bundler.DelayThreshold = 2 * time.Second
|
||||
bundler.BundleCountThreshold = 100
|
||||
bundler.BundleByteThreshold = 1000
|
||||
bundler.BundleByteLimit = 1000
|
||||
bundler.BufferedByteLimit = 10000
|
||||
client.bundler = bundler
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *Client) onError(err error) {
|
||||
if c.onErrorFn != nil {
|
||||
c.onErrorFn(err)
|
||||
return
|
||||
}
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
// Close should be called when the client is no longer needed.
|
||||
// It need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
err := c.sender.close()
|
||||
c.sender = nil
|
||||
return c.apiClient.Close()
|
||||
}
|
||||
|
||||
// Report writes an error report. It doesn't block. Errors in
|
||||
// writing the error report can be handled via Client.OnError.
|
||||
func (c *Client) Report(e Entry) {
|
||||
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error())
|
||||
c.bundler.Add(req, 1)
|
||||
}
|
||||
|
||||
// ReportSync writes an error report. It blocks until the entry is written.
|
||||
func (c *Client) ReportSync(ctx context.Context, e Entry) error {
|
||||
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error())
|
||||
_, err := c.apiClient.ReportErrorEvent(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// An Option is an optional argument to Catch.
|
||||
type Option interface {
|
||||
isOption()
|
||||
}
|
||||
|
||||
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
|
||||
// If *p is true when Catch is called, an error report is made even if recover
|
||||
// returns nil. This allows Catch to report an error for panic(nil).
|
||||
// If p is nil, the option is ignored.
|
||||
// Flush blocks until all currently buffered error reports are sent.
|
||||
//
|
||||
// Here is an example of how to use PanicFlag:
|
||||
//
|
||||
// func foo(ctx context.Context, ...) {
|
||||
// hasPanicked := true
|
||||
// defer errorsClient.Catch(ctx, errorreporting.PanicFlag(&hasPanicked))
|
||||
// ...
|
||||
// ...
|
||||
// // We have reached the end of the function, so we're not panicking.
|
||||
// hasPanicked = false
|
||||
// }
|
||||
func PanicFlag(p *bool) Option { return panicFlag{p} }
|
||||
|
||||
type panicFlag struct {
|
||||
*bool
|
||||
// If any errors occurred since the last call to Flush, or the
|
||||
// creation of the client if this is the first call, then Flush report the
|
||||
// error via the (*Client).OnError handler.
|
||||
func (c *Client) Flush() {
|
||||
c.bundler.Flush()
|
||||
}
|
||||
|
||||
func (h panicFlag) isOption() {}
|
||||
|
||||
// Repanic returns an Option that determines whether Catch will re-panic after
|
||||
// it reports an error. This overrides the default in the client.
|
||||
func Repanic(r bool) Option { return repanic(r) }
|
||||
|
||||
type repanic bool
|
||||
|
||||
func (r repanic) isOption() {}
|
||||
|
||||
// WithRequest returns an Option that informs Catch or Report of an http.Request
|
||||
// that is being handled. Information from the Request is included in the error
|
||||
// report, if one is made.
|
||||
func WithRequest(r *http.Request) Option { return withRequest{r} }
|
||||
|
||||
type withRequest struct {
|
||||
*http.Request
|
||||
}
|
||||
|
||||
func (w withRequest) isOption() {}
|
||||
|
||||
// WithMessage returns an Option that sets a message to be included in the error
|
||||
// report, if one is made. v is converted to a string with fmt.Sprint.
|
||||
func WithMessage(v ...interface{}) Option { return message(v) }
|
||||
|
||||
type message []interface{}
|
||||
|
||||
func (m message) isOption() {}
|
||||
|
||||
// WithMessagef returns an Option that sets a message to be included in the error
|
||||
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
|
||||
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
|
||||
|
||||
type messagef struct {
|
||||
format string
|
||||
v []interface{}
|
||||
}
|
||||
|
||||
func (m messagef) isOption() {}
|
||||
|
||||
// Catch tries to recover a panic; if it succeeds, it writes an error report.
|
||||
// It should be called by deferring it, like any other function for recovering
|
||||
// panics.
|
||||
//
|
||||
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Catch(ctx context.Context, opt ...Option) {
|
||||
panicked := false
|
||||
for _, o := range opt {
|
||||
switch o := o.(type) {
|
||||
case panicFlag:
|
||||
panicked = panicked || o.bool != nil && *o.bool
|
||||
}
|
||||
}
|
||||
x := recover()
|
||||
if x == nil && !panicked {
|
||||
return
|
||||
}
|
||||
var (
|
||||
r *http.Request
|
||||
shouldRepanic = true
|
||||
messages = []string{fmt.Sprint(x)}
|
||||
)
|
||||
if c != nil {
|
||||
shouldRepanic = c.RepanicDefault
|
||||
}
|
||||
for _, o := range opt {
|
||||
switch o := o.(type) {
|
||||
case repanic:
|
||||
shouldRepanic = bool(o)
|
||||
case withRequest:
|
||||
r = o.Request
|
||||
case message:
|
||||
messages = append(messages, fmt.Sprint(o...))
|
||||
case messagef:
|
||||
messages = append(messages, fmt.Sprintf(o.format, o.v...))
|
||||
}
|
||||
}
|
||||
c.logInternal(ctx, r, true, strings.Join(messages, " "))
|
||||
if shouldRepanic {
|
||||
panic(x)
|
||||
}
|
||||
}
|
||||
|
||||
// Report writes an error report unconditionally, instead of only when a panic
|
||||
// occurs.
|
||||
// If r is non-nil, information from the Request is included in the error report.
|
||||
//
|
||||
// Report can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
|
||||
c.logInternal(ctx, r, false, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
// Reportf writes an error report unconditionally, instead of only when a panic
|
||||
// occurs.
|
||||
// If r is non-nil, information from the Request is included in the error report.
|
||||
//
|
||||
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
|
||||
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
|
||||
func (c *Client) makeReportErrorEventRequest(r *http.Request, msg string) *erpb.ReportErrorEventRequest {
|
||||
// limit the stack trace to 16k.
|
||||
var buf [16384]byte
|
||||
var buf [16 * 1024]byte
|
||||
stack := buf[0:runtime.Stack(buf[:], false)]
|
||||
message := msg + "\n" + chopStack(stack, isPanic)
|
||||
if c == nil {
|
||||
log.Println("Error report used nil client:", message)
|
||||
return
|
||||
}
|
||||
c.send(ctx, r, message)
|
||||
}
|
||||
message := msg + "\n" + chopStack(stack)
|
||||
|
||||
func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) {
|
||||
payload := map[string]interface{}{
|
||||
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
|
||||
"message": message,
|
||||
"serviceContext": s.serviceContext,
|
||||
}
|
||||
if r != nil {
|
||||
payload["context"] = map[string]interface{}{
|
||||
"httpRequest": map[string]interface{}{
|
||||
"method": r.Method,
|
||||
"url": r.Host + r.RequestURI,
|
||||
"userAgent": r.UserAgent(),
|
||||
"referrer": r.Referer(),
|
||||
"remoteIp": r.RemoteAddr,
|
||||
},
|
||||
}
|
||||
}
|
||||
e := logging.Entry{
|
||||
Severity: logging.Error,
|
||||
Payload: payload,
|
||||
}
|
||||
err := s.logger.LogSync(ctx, e)
|
||||
if err != nil {
|
||||
log.Println("Error writing error report:", err, "report:", payload)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *loggingSender) close() error {
|
||||
return s.logger.Close()
|
||||
}
|
||||
|
||||
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
|
||||
time := time.Now()
|
||||
var errorContext *erpb.ErrorContext
|
||||
if r != nil {
|
||||
errorContext = &erpb.ErrorContext{
|
||||
@ -403,37 +196,21 @@ func (s *errorApiSender) send(ctx context.Context, r *http.Request, message stri
|
||||
},
|
||||
}
|
||||
}
|
||||
req := erpb.ReportErrorEventRequest{
|
||||
ProjectName: s.projectID,
|
||||
return &erpb.ReportErrorEventRequest{
|
||||
ProjectName: c.projectID,
|
||||
Event: &erpb.ReportedErrorEvent{
|
||||
EventTime: ×tamp.Timestamp{
|
||||
Seconds: time.Unix(),
|
||||
Nanos: int32(time.Nanosecond()),
|
||||
},
|
||||
ServiceContext: &s.serviceContext,
|
||||
EventTime: ptypes.TimestampNow(),
|
||||
ServiceContext: &c.serviceContext,
|
||||
Message: message,
|
||||
Context: errorContext,
|
||||
},
|
||||
}
|
||||
_, err := s.apiClient.ReportErrorEvent(ctx, &req)
|
||||
if err != nil {
|
||||
log.Println("Error writing error report:", err, "report:", message)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *errorApiSender) close() error {
|
||||
return s.apiClient.Close()
|
||||
}
|
||||
|
||||
// chopStack trims a stack trace so that the function which panics or calls
|
||||
// Report is first.
|
||||
func chopStack(s []byte, isPanic bool) string {
|
||||
var f []byte
|
||||
if isPanic {
|
||||
f = []byte("panic(")
|
||||
} else {
|
||||
f = []byte("cloud.google.com/go/errorreporting.(*Client).Report")
|
||||
}
|
||||
func chopStack(s []byte) string {
|
||||
f := []byte("cloud.google.com/go/errorreporting.(*Client).Report")
|
||||
|
||||
lfFirst := bytes.IndexByte(s, '\n')
|
||||
if lfFirst == -1 {
|
||||
@ -454,3 +231,8 @@ func chopStack(s []byte, isPanic bool) string {
|
||||
}
|
||||
return string(s[:lfFirst+1]) + string(stack)
|
||||
}
|
||||
|
||||
type client interface {
|
||||
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
|
||||
Close() error
|
||||
}
|
||||
|
167
vendor/cloud.google.com/go/errorreporting/errors_test.go
generated
vendored
167
vendor/cloud.google.com/go/errorreporting/errors_test.go
generated
vendored
@ -15,12 +15,12 @@
|
||||
package errorreporting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
@ -28,14 +28,16 @@ import (
|
||||
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
)
|
||||
|
||||
const testProjectID = "testproject"
|
||||
|
||||
type fakeReportErrorsClient struct {
|
||||
req *erpb.ReportErrorEventRequest
|
||||
fail bool
|
||||
doneCh chan struct{}
|
||||
}
|
||||
|
||||
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
|
||||
defer func() {
|
||||
close(c.doneCh)
|
||||
}()
|
||||
if c.fail {
|
||||
return nil, errors.New("request failed")
|
||||
}
|
||||
@ -47,166 +49,65 @@ func (c *fakeReportErrorsClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFakeReportErrorsClient() *fakeReportErrorsClient {
|
||||
c := &fakeReportErrorsClient{}
|
||||
c.doneCh = make(chan struct{})
|
||||
return c
|
||||
}
|
||||
|
||||
func newTestClient(c *fakeReportErrorsClient) *Client {
|
||||
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
|
||||
newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) {
|
||||
return c, nil
|
||||
}
|
||||
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false)
|
||||
t, err := NewClient(context.Background(), testutil.ProjID(), Config{
|
||||
ServiceName: "myservice",
|
||||
ServiceVersion: "v1.0",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.RepanicDefault = false
|
||||
return t
|
||||
}
|
||||
|
||||
var ctx context.Context
|
||||
|
||||
func init() {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
func TestCatchNothing(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx)
|
||||
}
|
||||
|
||||
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) {
|
||||
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, fn string) {
|
||||
if req.Event.ServiceContext.Service != "myservice" {
|
||||
t.Errorf("error report didn't contain service name")
|
||||
}
|
||||
if req.Event.ServiceContext.Version != "v1.000" {
|
||||
if req.Event.ServiceContext.Version != "v1.0" {
|
||||
t.Errorf("error report didn't contain version name")
|
||||
}
|
||||
if !strings.Contains(req.Event.Message, "hello, error") {
|
||||
if !strings.Contains(req.Event.Message, "error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(req.Event.Message, panickingFunction) {
|
||||
if !strings.Contains(req.Event.Message, fn) {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatchPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestCatchPanic")
|
||||
if !strings.Contains(r.Event.Message, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchPanicNilClient(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "TestCatchPanicNilClient") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
var c *Client
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestLogFailedReports(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{fail: true}
|
||||
c := newTestClient(fc)
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchNilPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestCatchNilPanic")
|
||||
if !strings.Contains(r.Event.Message, "nil") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
b := true
|
||||
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestNotCatchNilPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestReport(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
fc := newFakeReportErrorsClient()
|
||||
c := newTestClient(fc)
|
||||
c.Report(ctx, nil, "hello, ", "error")
|
||||
c.Report(Entry{Error: errors.New("error")})
|
||||
|
||||
<-fc.doneCh
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestReport")
|
||||
}
|
||||
|
||||
func TestReportf(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
func TestReportSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fc := newFakeReportErrorsClient()
|
||||
c := newTestClient(fc)
|
||||
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
|
||||
if err := c.ReportSync(ctx, Entry{Error: errors.New("error")}); err != nil {
|
||||
t.Fatalf("cannot upload errors: %v", err)
|
||||
}
|
||||
|
||||
<-fc.doneCh
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestReportf")
|
||||
if !strings.Contains(r.Event.Message, "2+2=4") {
|
||||
t.Errorf("error report didn't contain formatted message")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestReport")
|
||||
}
|
||||
|
66
vendor/cloud.google.com/go/errorreporting/stack_test.go
generated
vendored
66
vendor/cloud.google.com/go/errorreporting/stack_test.go
generated
vendored
@ -21,68 +21,7 @@ func TestChopStack(t *testing.T) {
|
||||
name string
|
||||
in []byte
|
||||
expected string
|
||||
isPanic bool
|
||||
}{
|
||||
{
|
||||
name: "Catch",
|
||||
in: []byte(`goroutine 20 [running]:
|
||||
runtime/debug.Stack()
|
||||
/gopath/src/runtime/debug/stack.go:24 +0x79
|
||||
cloud.google.com/go/errorreporting.(*Client).logInternal()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
|
||||
cloud.google.com/go/errorreporting.(*Client).Catch()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
|
||||
panic()
|
||||
/gopath/src/runtime/panic.go:458 +0x243
|
||||
cloud.google.com/go/errorreporting.TestCatchPanic()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
|
||||
testing.tRunner()
|
||||
/gopath/src/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/src/testing/testing.go:646 +0x2ec
|
||||
`),
|
||||
expected: `goroutine 20 [running]:
|
||||
cloud.google.com/go/errorreporting.TestCatchPanic()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
|
||||
testing.tRunner()
|
||||
/gopath/src/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/src/testing/testing.go:646 +0x2ec
|
||||
`,
|
||||
isPanic: true,
|
||||
},
|
||||
{
|
||||
name: "function not found",
|
||||
in: []byte(`goroutine 20 [running]:
|
||||
runtime/debug.Stack()
|
||||
/gopath/src/runtime/debug/stack.go:24 +0x79
|
||||
cloud.google.com/go/errorreporting.(*Client).logInternal()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
|
||||
cloud.google.com/go/errorreporting.(*Client).Catch()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
|
||||
cloud.google.com/go/errorreporting.TestCatchPanic()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
|
||||
testing.tRunner()
|
||||
/gopath/src/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/src/testing/testing.go:646 +0x2ec
|
||||
`),
|
||||
expected: `goroutine 20 [running]:
|
||||
runtime/debug.Stack()
|
||||
/gopath/src/runtime/debug/stack.go:24 +0x79
|
||||
cloud.google.com/go/errorreporting.(*Client).logInternal()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
|
||||
cloud.google.com/go/errorreporting.(*Client).Catch()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
|
||||
cloud.google.com/go/errorreporting.TestCatchPanic()
|
||||
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
|
||||
testing.tRunner()
|
||||
/gopath/src/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/src/testing/testing.go:646 +0x2ec
|
||||
`,
|
||||
isPanic: true,
|
||||
},
|
||||
{
|
||||
name: "Report",
|
||||
in: []byte(` goroutine 39 [running]:
|
||||
@ -107,12 +46,11 @@ testing.tRunner()
|
||||
created by testing.(*T).Run
|
||||
/gopath/testing/testing.go:646 +0x2ec
|
||||
`,
|
||||
isPanic: false,
|
||||
},
|
||||
} {
|
||||
out := chopStack(test.in, test.isPanic)
|
||||
out := chopStack(test.in)
|
||||
if out != test.expected {
|
||||
t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected)
|
||||
t.Errorf("case %q: chopStack(%q): got %q want %q", test.name, test.in, out, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
215
vendor/cloud.google.com/go/errors/error_logging_test.go
generated
vendored
215
vendor/cloud.google.com/go/errors/error_logging_test.go
generated
vendored
@ -1,215 +0,0 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/logging"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
type fakeLogger struct {
|
||||
entry *logging.Entry
|
||||
fail bool
|
||||
}
|
||||
|
||||
func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
|
||||
if c.fail {
|
||||
return errors.New("request failed")
|
||||
}
|
||||
c.entry = &e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeLogger) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestClientUsingLogging(c *fakeLogger) *Client {
|
||||
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
|
||||
return c, nil
|
||||
}
|
||||
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.RepanicDefault = false
|
||||
return t
|
||||
}
|
||||
|
||||
func TestCatchNothingUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx)
|
||||
}
|
||||
|
||||
func entryMessage(e *logging.Entry) string {
|
||||
return e.Payload.(map[string]interface{})["message"].(string)
|
||||
}
|
||||
|
||||
func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) {
|
||||
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" {
|
||||
t.Errorf("error report didn't contain service name")
|
||||
}
|
||||
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" {
|
||||
t.Errorf("error report didn't contain version name")
|
||||
}
|
||||
if !strings.Contains(entryMessage(e), "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(entryMessage(e), panickingFunction) {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatchPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestCatchPanic")
|
||||
if !strings.Contains(entryMessage(e), "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchPanicNilClientUsingLogging(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "TestCatchPanicNilClient") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
var c *Client
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestLogFailedReportsUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{fail: true}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "errors.TestLogFailedReports") {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchNilPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestCatchNilPanic")
|
||||
if !strings.Contains(entryMessage(e), "nil") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
b := true
|
||||
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestNotCatchNilPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestReportUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
c.Report(ctx, nil, "hello, ", "error")
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestReport")
|
||||
}
|
||||
|
||||
func TestReportfUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestReportf")
|
||||
if !strings.Contains(entryMessage(e), "2+2=4") {
|
||||
t.Errorf("error report didn't contain formatted message")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloseUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
458
vendor/cloud.google.com/go/errors/errors.go
generated
vendored
458
vendor/cloud.google.com/go/errors/errors.go
generated
vendored
@ -1,458 +0,0 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package errors is a Google Stackdriver Error Reporting library.
|
||||
//
|
||||
// This package is still experimental and subject to change.
|
||||
//
|
||||
// See https://cloud.google.com/error-reporting/ for more information.
|
||||
//
|
||||
// To initialize a client, use the NewClient function.
|
||||
//
|
||||
// import "cloud.google.com/go/errors"
|
||||
// ...
|
||||
// errorsClient, err = errors.NewClient(ctx, projectID, "myservice", "v1.0", true)
|
||||
//
|
||||
// The client can recover panics in your program and report them as errors.
|
||||
// To use this functionality, defer its Catch method, as you would any other
|
||||
// function for recovering panics.
|
||||
//
|
||||
// func foo(ctx context.Context, ...) {
|
||||
// defer errorsClient.Catch(ctx)
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Catch writes an error report containing the recovered value and a stack trace
|
||||
// to Stackdriver Error Reporting.
|
||||
//
|
||||
// There are various options you can add to the call to Catch that modify how
|
||||
// panics are handled.
|
||||
//
|
||||
// WithMessage and WithMessagef add a custom message after the recovered value,
|
||||
// using fmt.Sprint and fmt.Sprintf respectively.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errors.WithMessagef("x=%d", x))
|
||||
//
|
||||
// WithRequest fills in various fields in the error report with information
|
||||
// about an http.Request that's being handled.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errors.WithRequest(httpReq))
|
||||
//
|
||||
// By default, after recovering a panic, Catch will panic again with the
|
||||
// recovered value. You can turn off this behavior with the Repanic option.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errors.Repanic(false))
|
||||
//
|
||||
// You can also change the default behavior for the client by changing the
|
||||
// RepanicDefault field.
|
||||
//
|
||||
// errorsClient.RepanicDefault = false
|
||||
//
|
||||
// It is also possible to write an error report directly without recovering a
|
||||
// panic, using Report or Reportf.
|
||||
//
|
||||
// if err != nil {
|
||||
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
|
||||
// }
|
||||
//
|
||||
// If you try to write an error report with a nil client, or if the client
|
||||
// fails to write the report to the server, the error report is logged using
|
||||
// log.Println.
|
||||
//
|
||||
// Deprecated: Use cloud.google.com/go/errorreporting instead.
|
||||
package errors // import "cloud.google.com/go/errors"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
api "cloud.google.com/go/errorreporting/apiv1beta1"
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/logging"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
userAgent = `gcloud-golang-errorreporting/20160701`
|
||||
)
|
||||
|
||||
type apiInterface interface {
|
||||
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
|
||||
client, err := api.NewReportErrorsClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetGoogleClientInfo("gccl", version.Repo)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
type loggerInterface interface {
|
||||
LogSync(ctx context.Context, e logging.Entry) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
*logging.Logger
|
||||
c *logging.Client
|
||||
}
|
||||
|
||||
func (l logger) Close() error {
|
||||
return l.c.Close()
|
||||
}
|
||||
|
||||
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
|
||||
lc, err := logging.NewClient(ctx, projectID, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Logging client: %v", err)
|
||||
}
|
||||
l := lc.Logger("errorreports")
|
||||
return logger{l, lc}, nil
|
||||
}
|
||||
|
||||
type sender interface {
|
||||
send(ctx context.Context, r *http.Request, message string)
|
||||
close() error
|
||||
}
|
||||
|
||||
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
|
||||
type errorApiSender struct {
|
||||
apiClient apiInterface
|
||||
projectID string
|
||||
serviceContext erpb.ServiceContext
|
||||
}
|
||||
|
||||
// loggingSender sends error reports using the Stackdriver Logging API.
|
||||
type loggingSender struct {
|
||||
logger loggerInterface
|
||||
projectID string
|
||||
serviceContext map[string]string
|
||||
}
|
||||
|
||||
// Client represents a Google Cloud Error Reporting client.
|
||||
type Client struct {
|
||||
sender
|
||||
// RepanicDefault determines whether Catch will re-panic after recovering a
|
||||
// panic. This behavior can be overridden for an individual call to Catch using
|
||||
// the Repanic option.
|
||||
RepanicDefault bool
|
||||
}
|
||||
|
||||
// NewClient returns a new error reporting client. Generally you will want
|
||||
// to create a client on program initialization and use it through the lifetime
|
||||
// of the process.
|
||||
//
|
||||
// The service name and version string identify the running program, and are
|
||||
// included in error reports. The version string can be left empty.
|
||||
//
|
||||
// Set useLogging to report errors also using Stackdriver Logging,
|
||||
// which will result in errors appearing in both the logs and the error
|
||||
// dashboard. This is useful if you are already a user of Stackdriver Logging.
|
||||
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
|
||||
if useLogging {
|
||||
l, err := newLoggerInterface(ctx, projectID, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Logging client: %v", err)
|
||||
}
|
||||
sender := &loggingSender{
|
||||
logger: l,
|
||||
projectID: projectID,
|
||||
serviceContext: map[string]string{
|
||||
"service": serviceName,
|
||||
},
|
||||
}
|
||||
if serviceVersion != "" {
|
||||
sender.serviceContext["version"] = serviceVersion
|
||||
}
|
||||
c := &Client{
|
||||
sender: sender,
|
||||
RepanicDefault: true,
|
||||
}
|
||||
return c, nil
|
||||
} else {
|
||||
a, err := newApiInterface(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Error Reporting client: %v", err)
|
||||
}
|
||||
c := &Client{
|
||||
sender: &errorApiSender{
|
||||
apiClient: a,
|
||||
projectID: "projects/" + projectID,
|
||||
serviceContext: erpb.ServiceContext{
|
||||
Service: serviceName,
|
||||
Version: serviceVersion,
|
||||
},
|
||||
},
|
||||
RepanicDefault: true,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
// Close should be called when the client is no longer needed.
|
||||
// It need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
err := c.sender.close()
|
||||
c.sender = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// An Option is an optional argument to Catch.
|
||||
type Option interface {
|
||||
isOption()
|
||||
}
|
||||
|
||||
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
|
||||
// If *p is true when Catch is called, an error report is made even if recover
|
||||
// returns nil. This allows Catch to report an error for panic(nil).
|
||||
// If p is nil, the option is ignored.
|
||||
//
|
||||
// Here is an example of how to use PanicFlag:
|
||||
//
|
||||
// func foo(ctx context.Context, ...) {
|
||||
// hasPanicked := true
|
||||
// defer errorsClient.Catch(ctx, errors.PanicFlag(&hasPanicked))
|
||||
// ...
|
||||
// ...
|
||||
// // We have reached the end of the function, so we're not panicking.
|
||||
// hasPanicked = false
|
||||
// }
|
||||
func PanicFlag(p *bool) Option { return panicFlag{p} }
|
||||
|
||||
type panicFlag struct {
|
||||
*bool
|
||||
}
|
||||
|
||||
func (h panicFlag) isOption() {}
|
||||
|
||||
// Repanic returns an Option that determines whether Catch will re-panic after
|
||||
// it reports an error. This overrides the default in the client.
|
||||
func Repanic(r bool) Option { return repanic(r) }
|
||||
|
||||
type repanic bool
|
||||
|
||||
func (r repanic) isOption() {}
|
||||
|
||||
// WithRequest returns an Option that informs Catch or Report of an http.Request
|
||||
// that is being handled. Information from the Request is included in the error
|
||||
// report, if one is made.
|
||||
func WithRequest(r *http.Request) Option { return withRequest{r} }
|
||||
|
||||
type withRequest struct {
|
||||
*http.Request
|
||||
}
|
||||
|
||||
func (w withRequest) isOption() {}
|
||||
|
||||
// WithMessage returns an Option that sets a message to be included in the error
|
||||
// report, if one is made. v is converted to a string with fmt.Sprint.
|
||||
func WithMessage(v ...interface{}) Option { return message(v) }
|
||||
|
||||
type message []interface{}
|
||||
|
||||
func (m message) isOption() {}
|
||||
|
||||
// WithMessagef returns an Option that sets a message to be included in the error
|
||||
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
|
||||
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
|
||||
|
||||
type messagef struct {
|
||||
format string
|
||||
v []interface{}
|
||||
}
|
||||
|
||||
func (m messagef) isOption() {}
|
||||
|
||||
// Catch tries to recover a panic; if it succeeds, it writes an error report.
|
||||
// It should be called by deferring it, like any other function for recovering
|
||||
// panics.
|
||||
//
|
||||
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Catch(ctx context.Context, opt ...Option) {
|
||||
panicked := false
|
||||
for _, o := range opt {
|
||||
switch o := o.(type) {
|
||||
case panicFlag:
|
||||
panicked = panicked || o.bool != nil && *o.bool
|
||||
}
|
||||
}
|
||||
x := recover()
|
||||
if x == nil && !panicked {
|
||||
return
|
||||
}
|
||||
var (
|
||||
r *http.Request
|
||||
shouldRepanic = true
|
||||
messages = []string{fmt.Sprint(x)}
|
||||
)
|
||||
if c != nil {
|
||||
shouldRepanic = c.RepanicDefault
|
||||
}
|
||||
for _, o := range opt {
|
||||
switch o := o.(type) {
|
||||
case repanic:
|
||||
shouldRepanic = bool(o)
|
||||
case withRequest:
|
||||
r = o.Request
|
||||
case message:
|
||||
messages = append(messages, fmt.Sprint(o...))
|
||||
case messagef:
|
||||
messages = append(messages, fmt.Sprintf(o.format, o.v...))
|
||||
}
|
||||
}
|
||||
c.logInternal(ctx, r, true, strings.Join(messages, " "))
|
||||
if shouldRepanic {
|
||||
panic(x)
|
||||
}
|
||||
}
|
||||
|
||||
// Report writes an error report unconditionally, instead of only when a panic
|
||||
// occurs.
|
||||
// If r is non-nil, information from the Request is included in the error report.
|
||||
//
|
||||
// Report can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
|
||||
c.logInternal(ctx, r, false, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
// Reportf writes an error report unconditionally, instead of only when a panic
|
||||
// occurs.
|
||||
// If r is non-nil, information from the Request is included in the error report.
|
||||
//
|
||||
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
|
||||
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
|
||||
// limit the stack trace to 16k.
|
||||
var buf [16384]byte
|
||||
stack := buf[0:runtime.Stack(buf[:], false)]
|
||||
message := msg + "\n" + chopStack(stack, isPanic)
|
||||
if c == nil {
|
||||
log.Println("Error report used nil client:", message)
|
||||
return
|
||||
}
|
||||
c.send(ctx, r, message)
|
||||
}
|
||||
|
||||
func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) {
|
||||
payload := map[string]interface{}{
|
||||
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
|
||||
"message": message,
|
||||
"serviceContext": s.serviceContext,
|
||||
}
|
||||
if r != nil {
|
||||
payload["context"] = map[string]interface{}{
|
||||
"httpRequest": map[string]interface{}{
|
||||
"method": r.Method,
|
||||
"url": r.Host + r.RequestURI,
|
||||
"userAgent": r.UserAgent(),
|
||||
"referrer": r.Referer(),
|
||||
"remoteIp": r.RemoteAddr,
|
||||
},
|
||||
}
|
||||
}
|
||||
e := logging.Entry{
|
||||
Severity: logging.Error,
|
||||
Payload: payload,
|
||||
}
|
||||
err := s.logger.LogSync(ctx, e)
|
||||
if err != nil {
|
||||
log.Println("Error writing error report:", err, "report:", payload)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *loggingSender) close() error {
|
||||
return s.logger.Close()
|
||||
}
|
||||
|
||||
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
|
||||
time := time.Now()
|
||||
var errorContext *erpb.ErrorContext
|
||||
if r != nil {
|
||||
errorContext = &erpb.ErrorContext{
|
||||
HttpRequest: &erpb.HttpRequestContext{
|
||||
Method: r.Method,
|
||||
Url: r.Host + r.RequestURI,
|
||||
UserAgent: r.UserAgent(),
|
||||
Referrer: r.Referer(),
|
||||
RemoteIp: r.RemoteAddr,
|
||||
},
|
||||
}
|
||||
}
|
||||
req := erpb.ReportErrorEventRequest{
|
||||
ProjectName: s.projectID,
|
||||
Event: &erpb.ReportedErrorEvent{
|
||||
EventTime: ×tamp.Timestamp{
|
||||
Seconds: time.Unix(),
|
||||
Nanos: int32(time.Nanosecond()),
|
||||
},
|
||||
ServiceContext: &s.serviceContext,
|
||||
Message: message,
|
||||
Context: errorContext,
|
||||
},
|
||||
}
|
||||
_, err := s.apiClient.ReportErrorEvent(ctx, &req)
|
||||
if err != nil {
|
||||
log.Println("Error writing error report:", err, "report:", message)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *errorApiSender) close() error {
|
||||
return s.apiClient.Close()
|
||||
}
|
||||
|
||||
// chopStack trims a stack trace so that the function which panics or calls
|
||||
// Report is first.
|
||||
func chopStack(s []byte, isPanic bool) string {
|
||||
var f []byte
|
||||
if isPanic {
|
||||
f = []byte("panic(")
|
||||
} else {
|
||||
f = []byte("cloud.google.com/go/errors.(*Client).Report")
|
||||
}
|
||||
|
||||
lfFirst := bytes.IndexByte(s, '\n')
|
||||
if lfFirst == -1 {
|
||||
return string(s)
|
||||
}
|
||||
stack := s[lfFirst:]
|
||||
panicLine := bytes.Index(stack, f)
|
||||
if panicLine == -1 {
|
||||
return string(s)
|
||||
}
|
||||
stack = stack[panicLine+1:]
|
||||
for i := 0; i < 2; i++ {
|
||||
nextLine := bytes.IndexByte(stack, '\n')
|
||||
if nextLine == -1 {
|
||||
return string(s)
|
||||
}
|
||||
stack = stack[nextLine+1:]
|
||||
}
|
||||
return string(s[:lfFirst+1]) + string(stack)
|
||||
}
|
212
vendor/cloud.google.com/go/errors/errors_test.go
generated
vendored
212
vendor/cloud.google.com/go/errors/errors_test.go
generated
vendored
@ -1,212 +0,0 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
)
|
||||
|
||||
const testProjectID = "testproject"
|
||||
|
||||
type fakeReportErrorsClient struct {
|
||||
req *erpb.ReportErrorEventRequest
|
||||
fail bool
|
||||
}
|
||||
|
||||
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
|
||||
if c.fail {
|
||||
return nil, errors.New("request failed")
|
||||
}
|
||||
c.req = req
|
||||
return &erpb.ReportErrorEventResponse{}, nil
|
||||
}
|
||||
|
||||
func (c *fakeReportErrorsClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestClient(c *fakeReportErrorsClient) *Client {
|
||||
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
|
||||
return c, nil
|
||||
}
|
||||
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.RepanicDefault = false
|
||||
return t
|
||||
}
|
||||
|
||||
var ctx context.Context
|
||||
|
||||
func init() {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
func TestCatchNothing(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx)
|
||||
}
|
||||
|
||||
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) {
|
||||
if req.Event.ServiceContext.Service != "myservice" {
|
||||
t.Errorf("error report didn't contain service name")
|
||||
}
|
||||
if req.Event.ServiceContext.Version != "v1.000" {
|
||||
t.Errorf("error report didn't contain version name")
|
||||
}
|
||||
if !strings.Contains(req.Event.Message, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(req.Event.Message, panickingFunction) {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatchPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errors.TestCatchPanic")
|
||||
if !strings.Contains(r.Event.Message, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchPanicNilClient(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "TestCatchPanicNilClient") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
var c *Client
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestLogFailedReports(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{fail: true}
|
||||
c := newTestClient(fc)
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "errors.TestLogFailedReports") {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchNilPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errors.TestCatchNilPanic")
|
||||
if !strings.Contains(r.Event.Message, "nil") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
b := true
|
||||
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestNotCatchNilPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestReport(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
c.Report(ctx, nil, "hello, ", "error")
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errors.TestReport")
|
||||
}
|
||||
|
||||
func TestReportf(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errors.TestReportf")
|
||||
if !strings.Contains(r.Event.Message, "2+2=4") {
|
||||
t.Errorf("error report didn't contain formatted message")
|
||||
}
|
||||
}
|
118
vendor/cloud.google.com/go/errors/stack_test.go
generated
vendored
118
vendor/cloud.google.com/go/errors/stack_test.go
generated
vendored
@ -1,118 +0,0 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestChopStack(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
in []byte
|
||||
expected string
|
||||
isPanic bool
|
||||
}{
|
||||
{
|
||||
name: "Catch",
|
||||
in: []byte(`goroutine 20 [running]:
|
||||
runtime/debug.Stack()
|
||||
/gopath/src/runtime/debug/stack.go:24 +0x79
|
||||
cloud.google.com/go/errors.(*Client).logInternal()
|
||||
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
|
||||
cloud.google.com/go/errors.(*Client).Catch()
|
||||
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
|
||||
panic()
|
||||
/gopath/src/runtime/panic.go:458 +0x243
|
||||
cloud.google.com/go/errors_test.TestCatchPanic()
|
||||
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
|
||||
testing.tRunner()
|
||||
/gopath/src/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/src/testing/testing.go:646 +0x2ec
|
||||
`),
|
||||
expected: `goroutine 20 [running]:
|
||||
cloud.google.com/go/errors_test.TestCatchPanic()
|
||||
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
|
||||
testing.tRunner()
|
||||
/gopath/src/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/src/testing/testing.go:646 +0x2ec
|
||||
`,
|
||||
isPanic: true,
|
||||
},
|
||||
{
|
||||
name: "function not found",
|
||||
in: []byte(`goroutine 20 [running]:
|
||||
runtime/debug.Stack()
|
||||
/gopath/src/runtime/debug/stack.go:24 +0x79
|
||||
cloud.google.com/go/errors.(*Client).logInternal()
|
||||
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
|
||||
cloud.google.com/go/errors.(*Client).Catch()
|
||||
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
|
||||
cloud.google.com/go/errors_test.TestCatchPanic()
|
||||
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
|
||||
testing.tRunner()
|
||||
/gopath/src/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/src/testing/testing.go:646 +0x2ec
|
||||
`),
|
||||
expected: `goroutine 20 [running]:
|
||||
runtime/debug.Stack()
|
||||
/gopath/src/runtime/debug/stack.go:24 +0x79
|
||||
cloud.google.com/go/errors.(*Client).logInternal()
|
||||
/gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b
|
||||
cloud.google.com/go/errors.(*Client).Catch()
|
||||
/gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed
|
||||
cloud.google.com/go/errors_test.TestCatchPanic()
|
||||
/gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171
|
||||
testing.tRunner()
|
||||
/gopath/src/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/src/testing/testing.go:646 +0x2ec
|
||||
`,
|
||||
isPanic: true,
|
||||
},
|
||||
{
|
||||
name: "Report",
|
||||
in: []byte(` goroutine 39 [running]:
|
||||
runtime/debug.Stack()
|
||||
/gopath/runtime/debug/stack.go:24 +0x79
|
||||
cloud.google.com/go/errors.(*Client).logInternal()
|
||||
/gopath/cloud.google.com/go/errors/errors.go:259 +0x18b
|
||||
cloud.google.com/go/errors.(*Client).Report()
|
||||
/gopath/cloud.google.com/go/errors/errors.go:248 +0x4ed
|
||||
cloud.google.com/go/errors_test.TestReport()
|
||||
/gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1
|
||||
testing.tRunner()
|
||||
/gopath/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/testing/testing.go:646 +0x2ec
|
||||
`),
|
||||
expected: ` goroutine 39 [running]:
|
||||
cloud.google.com/go/errors_test.TestReport()
|
||||
/gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1
|
||||
testing.tRunner()
|
||||
/gopath/testing/testing.go:610 +0x81
|
||||
created by testing.(*T).Run
|
||||
/gopath/testing/testing.go:646 +0x2ec
|
||||
`,
|
||||
isPanic: false,
|
||||
},
|
||||
} {
|
||||
out := chopStack(test.in, test.isPanic)
|
||||
if out != test.expected {
|
||||
t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
44
vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go
generated
vendored
Normal file
44
vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package firestore is an auto-generated package for the
|
||||
// Google Cloud Firestore API.
|
||||
//
|
||||
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
|
||||
//
|
||||
//
|
||||
// Use the client at cloud.google.com/go/firestore in preference to this.
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/datastore",
|
||||
}
|
||||
}
|
544
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go
generated
vendored
Normal file
544
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go
generated
vendored
Normal file
@ -0,0 +1,544 @@
|
||||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
GetDocument []gax.CallOption
|
||||
ListDocuments []gax.CallOption
|
||||
CreateDocument []gax.CallOption
|
||||
UpdateDocument []gax.CallOption
|
||||
DeleteDocument []gax.CallOption
|
||||
BatchGetDocuments []gax.CallOption
|
||||
BeginTransaction []gax.CallOption
|
||||
Commit []gax.CallOption
|
||||
Rollback []gax.CallOption
|
||||
RunQuery []gax.CallOption
|
||||
Write []gax.CallOption
|
||||
Listen []gax.CallOption
|
||||
ListCollectionIds []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("firestore.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultCallOptions() *CallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
{"streaming", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &CallOptions{
|
||||
GetDocument: retry[[2]string{"default", "idempotent"}],
|
||||
ListDocuments: retry[[2]string{"default", "idempotent"}],
|
||||
CreateDocument: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateDocument: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteDocument: retry[[2]string{"default", "idempotent"}],
|
||||
BatchGetDocuments: retry[[2]string{"streaming", "idempotent"}],
|
||||
BeginTransaction: retry[[2]string{"default", "idempotent"}],
|
||||
Commit: retry[[2]string{"default", "non_idempotent"}],
|
||||
Rollback: retry[[2]string{"default", "idempotent"}],
|
||||
RunQuery: retry[[2]string{"default", "idempotent"}],
|
||||
Write: retry[[2]string{"streaming", "non_idempotent"}],
|
||||
Listen: retry[[2]string{"streaming", "idempotent"}],
|
||||
ListCollectionIds: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// Client is a client for interacting with Google Cloud Firestore API.
|
||||
type Client struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
client firestorepb.FirestoreClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
}
|
||||
|
||||
// NewClient creates a new firestore client.
|
||||
//
|
||||
// The Cloud Firestore service.
|
||||
//
|
||||
// This service exposes several types of comparable timestamps:
|
||||
//
|
||||
// create_time - The time at which a document was created. Changes only
|
||||
// when a document is deleted, then re-created. Increases in a strict
|
||||
// monotonic fashion.
|
||||
//
|
||||
// update_time - The time at which a document was last updated. Changes
|
||||
// every time a document is modified. Does not change when a write results
|
||||
// in no modifications. Increases in a strict monotonic fashion.
|
||||
//
|
||||
// read_time - The time at which a particular state was observed. Used
|
||||
// to denote a consistent snapshot of the database or the time at which a
|
||||
// Document was observed to not exist.
|
||||
//
|
||||
// commit_time - The time at which the writes in a transaction were
|
||||
// committed. Any read with an equal or greater read_time is guaranteed
|
||||
// to see the effects of the transaction.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
conn: conn,
|
||||
CallOptions: defaultCallOptions(),
|
||||
|
||||
client: firestorepb.NewFirestoreClient(conn),
|
||||
}
|
||||
c.SetGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *Client) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// SetGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
}
|
||||
|
||||
// DatabaseRootPath returns the path for the database root resource.
|
||||
func DatabaseRootPath(project, database string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/databases/" +
|
||||
database +
|
||||
""
|
||||
}
|
||||
|
||||
// DocumentRootPath returns the path for the document root resource.
|
||||
func DocumentRootPath(project, database string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/databases/" +
|
||||
database +
|
||||
"/documents" +
|
||||
""
|
||||
}
|
||||
|
||||
// DocumentPathPath returns the path for the document path resource.
|
||||
func DocumentPathPath(project, database, documentPath string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/databases/" +
|
||||
database +
|
||||
"/documents/" +
|
||||
documentPath +
|
||||
""
|
||||
}
|
||||
|
||||
// AnyPathPath returns the path for the any path resource.
|
||||
func AnyPathPath(project, database, document, anyPath string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/databases/" +
|
||||
database +
|
||||
"/documents/" +
|
||||
document +
|
||||
"/" +
|
||||
anyPath +
|
||||
""
|
||||
}
|
||||
|
||||
// GetDocument gets a single document.
|
||||
func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.GetDocument[0:len(c.CallOptions.GetDocument):len(c.CallOptions.GetDocument)], opts...)
|
||||
var resp *firestorepb.Document
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetDocument(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListDocuments lists documents.
|
||||
func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest, opts ...gax.CallOption) *DocumentIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListDocuments[0:len(c.CallOptions.ListDocuments):len(c.CallOptions.ListDocuments)], opts...)
|
||||
it := &DocumentIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*firestorepb.Document, string, error) {
|
||||
var resp *firestorepb.ListDocumentsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListDocuments(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Documents, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// CreateDocument creates a new document.
|
||||
func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.CreateDocument[0:len(c.CallOptions.CreateDocument):len(c.CallOptions.CreateDocument)], opts...)
|
||||
var resp *firestorepb.Document
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CreateDocument(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateDocument updates or inserts a document.
|
||||
func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.UpdateDocument[0:len(c.CallOptions.UpdateDocument):len(c.CallOptions.UpdateDocument)], opts...)
|
||||
var resp *firestorepb.Document
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.UpdateDocument(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteDocument deletes a document.
|
||||
func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.DeleteDocument[0:len(c.CallOptions.DeleteDocument):len(c.CallOptions.DeleteDocument)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.DeleteDocument(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// BatchGetDocuments gets multiple documents.
|
||||
//
|
||||
// Documents returned by this method are not guaranteed to be returned in the
|
||||
// same order that they were requested.
|
||||
func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGetDocumentsRequest, opts ...gax.CallOption) (firestorepb.Firestore_BatchGetDocumentsClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.BatchGetDocuments[0:len(c.CallOptions.BatchGetDocuments):len(c.CallOptions.BatchGetDocuments)], opts...)
|
||||
var resp firestorepb.Firestore_BatchGetDocumentsClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.BatchGetDocuments(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// BeginTransaction starts a new transaction.
|
||||
func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest, opts ...gax.CallOption) (*firestorepb.BeginTransactionResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...)
|
||||
var resp *firestorepb.BeginTransactionResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Commit commits a transaction, while optionally updating documents.
|
||||
func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opts ...gax.CallOption) (*firestorepb.CommitResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...)
|
||||
var resp *firestorepb.CommitResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.Commit(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Rollback rolls back a transaction.
|
||||
func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.Rollback(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// RunQuery runs a query.
|
||||
func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest, opts ...gax.CallOption) (firestorepb.Firestore_RunQueryClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.RunQuery[0:len(c.CallOptions.RunQuery):len(c.CallOptions.RunQuery)], opts...)
|
||||
var resp firestorepb.Firestore_RunQueryClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.RunQuery(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Write streams batches of document updates and deletes, in order.
|
||||
func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_WriteClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.Write[0:len(c.CallOptions.Write):len(c.CallOptions.Write)], opts...)
|
||||
var resp firestorepb.Firestore_WriteClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.Write(ctx, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Listen listens to changes.
|
||||
func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_ListenClient, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.Listen[0:len(c.CallOptions.Listen):len(c.CallOptions.Listen)], opts...)
|
||||
var resp firestorepb.Firestore_ListenClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.Listen(ctx, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListCollectionIds lists all the collection IDs underneath a document.
|
||||
func (c *Client) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest, opts ...gax.CallOption) *StringIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
opts = append(c.CallOptions.ListCollectionIds[0:len(c.CallOptions.ListCollectionIds):len(c.CallOptions.ListCollectionIds)], opts...)
|
||||
it := &StringIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
|
||||
var resp *firestorepb.ListCollectionIdsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListCollectionIds(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.CollectionIds, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// DocumentIterator manages a stream of *firestorepb.Document.
|
||||
type DocumentIterator struct {
|
||||
items []*firestorepb.Document
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*firestorepb.Document, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *DocumentIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *DocumentIterator) Next() (*firestorepb.Document, error) {
|
||||
var item *firestorepb.Document
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *DocumentIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *DocumentIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// StringIterator manages a stream of string.
|
||||
type StringIterator struct {
|
||||
items []string
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *StringIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *StringIterator) Next() (string, error) {
|
||||
var item string
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *StringIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *StringIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
329
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go
generated
vendored
Normal file
329
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go
generated
vendored
Normal file
@ -0,0 +1,329 @@
|
||||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package firestore_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
firestore "cloud.google.com/go/firestore/apiv1beta1"
|
||||
firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleClient_GetDocument() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.GetDocumentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetDocument(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListDocuments() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.ListDocumentsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListDocuments(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_CreateDocument() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.CreateDocumentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateDocument(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_UpdateDocument() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.UpdateDocumentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateDocument(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeleteDocument() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.DeleteDocumentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteDocument(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_BatchGetDocuments() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.BatchGetDocumentsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
stream, err := c.BatchGetDocuments(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_BeginTransaction() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.BeginTransactionRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.BeginTransaction(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_Commit() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.CommitRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.Commit(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_Rollback() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.RollbackRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.Rollback(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_RunQuery() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.RunQueryRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
stream, err := c.RunQuery(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_Write() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
stream, err := c.Write(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
go func() {
|
||||
reqs := []*firestorepb.WriteRequest{
|
||||
// TODO: Create requests.
|
||||
}
|
||||
for _, req := range reqs {
|
||||
if err := stream.Send(req); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
stream.CloseSend()
|
||||
}()
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_Listen() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
stream, err := c.Listen(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
go func() {
|
||||
reqs := []*firestorepb.ListenRequest{
|
||||
// TODO: Create requests.
|
||||
}
|
||||
for _, req := range reqs {
|
||||
if err := stream.Send(req); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
stream.CloseSend()
|
||||
}()
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ListCollectionIds() {
|
||||
ctx := context.Background()
|
||||
c, err := firestore.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &firestorepb.ListCollectionIdsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListCollectionIds(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
1154
vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go
generated
vendored
Normal file
1154
vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
245
vendor/cloud.google.com/go/firestore/client.go
generated
vendored
Normal file
245
vendor/cloud.google.com/go/firestore/client.go
generated
vendored
Normal file
@ -0,0 +1,245 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/api/iterator"
|
||||
|
||||
vkit "cloud.google.com/go/firestore/apiv1beta1"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// resourcePrefixHeader is the name of the metadata header used to indicate
|
||||
// the resource being operated on.
|
||||
const resourcePrefixHeader = "google-cloud-resource-prefix"
|
||||
|
||||
// A Client provides access to the Firestore service.
|
||||
type Client struct {
|
||||
c *vkit.Client
|
||||
projectID string
|
||||
databaseID string // A client is tied to a single database.
|
||||
}
|
||||
|
||||
// NewClient creates a new Firestore client that uses the given project.
|
||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
|
||||
vc, err := vkit.NewClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vc.SetGoogleClientInfo("gccl", version.Repo)
|
||||
c := &Client{
|
||||
c: vc,
|
||||
projectID: projectID,
|
||||
databaseID: "(default)", // always "(default)", for now
|
||||
}
|
||||
return c, nil
|
||||
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
//
|
||||
// Close need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
return c.c.Close()
|
||||
}
|
||||
|
||||
func (c *Client) path() string {
|
||||
return fmt.Sprintf("projects/%s/databases/%s", c.projectID, c.databaseID)
|
||||
}
|
||||
|
||||
func withResourceHeader(ctx context.Context, resource string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md[resourcePrefixHeader] = []string{resource}
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
// Collection creates a reference to a collection with the given path.
|
||||
// A path is a sequence of IDs separated by slashes.
|
||||
//
|
||||
// Collection returns nil if path contains an even number of IDs or any ID is empty.
|
||||
func (c *Client) Collection(path string) *CollectionRef {
|
||||
coll, _ := c.idsToRef(strings.Split(path, "/"), c.path())
|
||||
return coll
|
||||
}
|
||||
|
||||
// Doc creates a reference to a document with the given path.
|
||||
// A path is a sequence of IDs separated by slashes.
|
||||
//
|
||||
// Doc returns nil if path contains an odd number of IDs or any ID is empty.
|
||||
func (c *Client) Doc(path string) *DocumentRef {
|
||||
_, doc := c.idsToRef(strings.Split(path, "/"), c.path())
|
||||
return doc
|
||||
}
|
||||
|
||||
func (c *Client) idsToRef(IDs []string, dbPath string) (*CollectionRef, *DocumentRef) {
|
||||
if len(IDs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
for _, id := range IDs {
|
||||
if id == "" {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
coll := newTopLevelCollRef(c, dbPath, IDs[0])
|
||||
i := 1
|
||||
for i < len(IDs) {
|
||||
doc := newDocRef(coll, IDs[i])
|
||||
i++
|
||||
if i == len(IDs) {
|
||||
return nil, doc
|
||||
}
|
||||
coll = newCollRefWithParent(c, doc, IDs[i])
|
||||
i++
|
||||
}
|
||||
return coll, nil
|
||||
}
|
||||
|
||||
// GetAll retrieves multiple documents with a single call. The DocumentSnapshots are
|
||||
// returned in the order of the given DocumentRefs.
|
||||
//
|
||||
// If a document is not present, the corresponding DocumentSnapshot will be nil.
|
||||
func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*DocumentSnapshot, error) {
|
||||
if err := checkTransaction(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var docNames []string
|
||||
for _, dr := range docRefs {
|
||||
if dr == nil {
|
||||
return nil, errNilDocRef
|
||||
}
|
||||
docNames = append(docNames, dr.Path)
|
||||
}
|
||||
req := &pb.BatchGetDocumentsRequest{
|
||||
Database: c.path(),
|
||||
Documents: docNames,
|
||||
}
|
||||
streamClient, err := c.c.BatchGetDocuments(withResourceHeader(ctx, req.Database), req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read results from the stream and add them to a map.
|
||||
docMap := map[string]*pb.Document{}
|
||||
for {
|
||||
res, err := streamClient.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch x := res.Result.(type) {
|
||||
case *pb.BatchGetDocumentsResponse_Found:
|
||||
docMap[x.Found.Name] = x.Found
|
||||
|
||||
case *pb.BatchGetDocumentsResponse_Missing:
|
||||
if docMap[x.Missing] != nil {
|
||||
return nil, fmt.Errorf("firestore: %q both missing and present", x.Missing)
|
||||
}
|
||||
docMap[x.Missing] = nil
|
||||
default:
|
||||
return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type")
|
||||
}
|
||||
}
|
||||
|
||||
// Put the documents we've gathered in the same order as the requesting slice of
|
||||
// DocumentRefs.
|
||||
docs := make([]*DocumentSnapshot, len(docNames))
|
||||
for i, name := range docNames {
|
||||
pbDoc, ok := docMap[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("firestore: passed %q to BatchGetDocuments but never saw response", name)
|
||||
}
|
||||
if pbDoc != nil {
|
||||
doc, err := newDocumentSnapshot(docRefs[i], pbDoc, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
docs[i] = doc
|
||||
}
|
||||
}
|
||||
return docs, nil
|
||||
}
|
||||
|
||||
// Collections returns an interator over the top-level collections.
|
||||
func (c *Client) Collections(ctx context.Context) *CollectionIterator {
|
||||
it := &CollectionIterator{
|
||||
err: checkTransaction(ctx),
|
||||
client: c,
|
||||
it: c.c.ListCollectionIds(
|
||||
withResourceHeader(ctx, c.path()),
|
||||
&pb.ListCollectionIdsRequest{Parent: c.path()}),
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// Batch returns a WriteBatch.
|
||||
func (c *Client) Batch() *WriteBatch {
|
||||
return &WriteBatch{c: c}
|
||||
}
|
||||
|
||||
// commit calls the Commit RPC outside of a transaction.
|
||||
func (c *Client) commit(ctx context.Context, ws []*pb.Write) (*WriteResult, error) {
|
||||
if err := checkTransaction(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req := &pb.CommitRequest{
|
||||
Database: c.path(),
|
||||
Writes: ws,
|
||||
}
|
||||
res, err := c.c.Commit(withResourceHeader(ctx, req.Database), req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(res.WriteResults) == 0 {
|
||||
return nil, errors.New("firestore: missing WriteResult")
|
||||
}
|
||||
return writeResultFromProto(res.WriteResults[0])
|
||||
}
|
||||
|
||||
// A WriteResult is returned by methods that write documents.
|
||||
type WriteResult struct {
|
||||
// The time at which the document was updated, or created if it did not
|
||||
// previously exist. Writes that do not actually change the document do
|
||||
// not change the update time.
|
||||
UpdateTime time.Time
|
||||
}
|
||||
|
||||
func writeResultFromProto(wr *pb.WriteResult) (*WriteResult, error) {
|
||||
t, err := ptypes.Timestamp(wr.UpdateTime)
|
||||
if err != nil {
|
||||
t = time.Time{}
|
||||
// TODO(jba): Follow up if Delete is supposed to return a nil timestamp.
|
||||
}
|
||||
return &WriteResult{UpdateTime: t}, nil
|
||||
}
|
221
vendor/cloud.google.com/go/firestore/client_test.go
generated
vendored
Normal file
221
vendor/cloud.google.com/go/firestore/client_test.go
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var testClient = &Client{
|
||||
projectID: "projectID",
|
||||
databaseID: "(default)",
|
||||
}
|
||||
|
||||
func TestClientCollectionAndDoc(t *testing.T) {
|
||||
coll1 := testClient.Collection("X")
|
||||
db := "projects/projectID/databases/(default)"
|
||||
wantc1 := &CollectionRef{
|
||||
c: testClient,
|
||||
parentPath: db,
|
||||
Parent: nil,
|
||||
ID: "X",
|
||||
Path: "projects/projectID/databases/(default)/documents/X",
|
||||
Query: Query{c: testClient, collectionID: "X", parentPath: db},
|
||||
}
|
||||
if !testEqual(coll1, wantc1) {
|
||||
t.Fatalf("got\n%+v\nwant\n%+v", coll1, wantc1)
|
||||
}
|
||||
doc1 := testClient.Doc("X/a")
|
||||
wantd1 := &DocumentRef{
|
||||
Parent: coll1,
|
||||
ID: "a",
|
||||
Path: "projects/projectID/databases/(default)/documents/X/a",
|
||||
}
|
||||
|
||||
if !testEqual(doc1, wantd1) {
|
||||
t.Fatalf("got %+v, want %+v", doc1, wantd1)
|
||||
}
|
||||
coll2 := testClient.Collection("X/a/Y")
|
||||
parentPath := "projects/projectID/databases/(default)/documents/X/a"
|
||||
wantc2 := &CollectionRef{
|
||||
c: testClient,
|
||||
parentPath: parentPath,
|
||||
Parent: doc1,
|
||||
ID: "Y",
|
||||
Path: "projects/projectID/databases/(default)/documents/X/a/Y",
|
||||
Query: Query{c: testClient, collectionID: "Y", parentPath: parentPath},
|
||||
}
|
||||
if !testEqual(coll2, wantc2) {
|
||||
t.Fatalf("\ngot %+v\nwant %+v", coll2, wantc2)
|
||||
}
|
||||
doc2 := testClient.Doc("X/a/Y/b")
|
||||
wantd2 := &DocumentRef{
|
||||
Parent: coll2,
|
||||
ID: "b",
|
||||
Path: "projects/projectID/databases/(default)/documents/X/a/Y/b",
|
||||
}
|
||||
if !testEqual(doc2, wantd2) {
|
||||
t.Fatalf("got %+v, want %+v", doc2, wantd2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientCollDocErrors(t *testing.T) {
|
||||
for _, badColl := range []string{"", "/", "/a/", "/a/b", "a/b/", "a//b"} {
|
||||
coll := testClient.Collection(badColl)
|
||||
if coll != nil {
|
||||
t.Errorf("coll path %q: got %+v, want nil", badColl, coll)
|
||||
}
|
||||
}
|
||||
for _, badDoc := range []string{"", "a", "/", "/a", "a/", "a/b/c", "a//b/c"} {
|
||||
doc := testClient.Doc(badDoc)
|
||||
if doc != nil {
|
||||
t.Errorf("doc path %q: got %+v, want nil", badDoc, doc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAll(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
const dbPath = "projects/projectID/databases/(default)"
|
||||
c, srv := newMock(t)
|
||||
defer c.Close()
|
||||
wantPBDocs := []*pb.Document{
|
||||
{
|
||||
Name: dbPath + "/documents/C/a",
|
||||
CreateTime: aTimestamp,
|
||||
UpdateTime: aTimestamp,
|
||||
Fields: map[string]*pb.Value{"f": intval(2)},
|
||||
},
|
||||
nil,
|
||||
{
|
||||
Name: dbPath + "/documents/C/c",
|
||||
CreateTime: aTimestamp,
|
||||
UpdateTime: aTimestamp,
|
||||
Fields: map[string]*pb.Value{"f": intval(1)},
|
||||
},
|
||||
}
|
||||
srv.addRPC(
|
||||
&pb.BatchGetDocumentsRequest{
|
||||
Database: dbPath,
|
||||
Documents: []string{
|
||||
dbPath + "/documents/C/a",
|
||||
dbPath + "/documents/C/b",
|
||||
dbPath + "/documents/C/c",
|
||||
},
|
||||
},
|
||||
[]interface{}{
|
||||
// deliberately put these out of order
|
||||
&pb.BatchGetDocumentsResponse{
|
||||
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]},
|
||||
},
|
||||
&pb.BatchGetDocumentsResponse{
|
||||
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]},
|
||||
},
|
||||
&pb.BatchGetDocumentsResponse{
|
||||
Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"},
|
||||
},
|
||||
},
|
||||
)
|
||||
coll := c.Collection("C")
|
||||
var docRefs []*DocumentRef
|
||||
for _, name := range []string{"a", "b", "c"} {
|
||||
docRefs = append(docRefs, coll.Doc(name))
|
||||
}
|
||||
docs, err := c.GetAll(ctx, docRefs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := len(docs), len(wantPBDocs); got != want {
|
||||
t.Errorf("got %d docs, wanted %d", got, want)
|
||||
}
|
||||
for i, got := range docs {
|
||||
var want *DocumentSnapshot
|
||||
if wantPBDocs[i] != nil {
|
||||
want, err = newDocumentSnapshot(docRefs[i], wantPBDocs[i], c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
got.c = nil
|
||||
want.c = nil
|
||||
t.Errorf("#%d: got %+v, want %+v", i, pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAllErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
const (
|
||||
dbPath = "projects/projectID/databases/(default)"
|
||||
docPath = dbPath + "/documents/C/a"
|
||||
)
|
||||
c, srv := newMock(t)
|
||||
if _, err := c.GetAll(ctx, []*DocumentRef{nil}); err != errNilDocRef {
|
||||
t.Errorf("got %v, want errNilDocRef", err)
|
||||
}
|
||||
|
||||
// Internal server error.
|
||||
srv.addRPC(
|
||||
&pb.BatchGetDocumentsRequest{
|
||||
Database: dbPath,
|
||||
Documents: []string{docPath},
|
||||
},
|
||||
[]interface{}{grpc.Errorf(codes.Internal, "")},
|
||||
)
|
||||
_, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")})
|
||||
codeEq(t, "GetAll #1", codes.Internal, err)
|
||||
|
||||
// Doc appears as both found and missing (server bug).
|
||||
srv.reset()
|
||||
srv.addRPC(
|
||||
&pb.BatchGetDocumentsRequest{
|
||||
Database: dbPath,
|
||||
Documents: []string{docPath},
|
||||
},
|
||||
[]interface{}{
|
||||
&pb.BatchGetDocumentsResponse{
|
||||
Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}},
|
||||
},
|
||||
&pb.BatchGetDocumentsResponse{
|
||||
Result: &pb.BatchGetDocumentsResponse_Missing{docPath},
|
||||
},
|
||||
},
|
||||
)
|
||||
if _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
|
||||
// Doc never appears (server bug).
|
||||
srv.reset()
|
||||
srv.addRPC(
|
||||
&pb.BatchGetDocumentsRequest{
|
||||
Database: dbPath,
|
||||
Documents: []string{docPath},
|
||||
},
|
||||
[]interface{}{},
|
||||
)
|
||||
if _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
124
vendor/cloud.google.com/go/firestore/collref.go
generated
vendored
Normal file
124
vendor/cloud.google.com/go/firestore/collref.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// A CollectionRef is a reference to Firestore collection.
|
||||
type CollectionRef struct {
|
||||
c *Client
|
||||
|
||||
// Typically Parent.Path, or c.path if Parent is nil.
|
||||
// May be different if this CollectionRef was created from a stored reference
|
||||
// to a different project/DB.
|
||||
parentPath string
|
||||
|
||||
// Parent is the document of which this collection is a part. It is
|
||||
// nil for top-level collections.
|
||||
Parent *DocumentRef
|
||||
|
||||
// The full resource path of the collection: "projects/P/databases/D/documents..."
|
||||
Path string
|
||||
|
||||
// ID is the collection identifier.
|
||||
ID string
|
||||
|
||||
// Use the methods of Query on a CollectionRef to create and run queries.
|
||||
Query
|
||||
}
|
||||
|
||||
func (c1 *CollectionRef) equal(c2 *CollectionRef) bool {
|
||||
return c1.c == c2.c &&
|
||||
c1.parentPath == c2.parentPath &&
|
||||
c1.Parent.equal(c2.Parent) &&
|
||||
c1.Path == c2.Path &&
|
||||
c1.ID == c2.ID &&
|
||||
reflect.DeepEqual(c1.Query, c2.Query)
|
||||
}
|
||||
|
||||
func newTopLevelCollRef(c *Client, dbPath, id string) *CollectionRef {
|
||||
return &CollectionRef{
|
||||
c: c,
|
||||
ID: id,
|
||||
parentPath: dbPath,
|
||||
Path: dbPath + "/documents/" + id,
|
||||
Query: Query{c: c, collectionID: id, parentPath: dbPath},
|
||||
}
|
||||
}
|
||||
|
||||
func newCollRefWithParent(c *Client, parent *DocumentRef, id string) *CollectionRef {
|
||||
return &CollectionRef{
|
||||
c: c,
|
||||
Parent: parent,
|
||||
ID: id,
|
||||
parentPath: parent.Path,
|
||||
Path: parent.Path + "/" + id,
|
||||
Query: Query{c: c, collectionID: id, parentPath: parent.Path},
|
||||
}
|
||||
}
|
||||
|
||||
// Doc returns a DocumentRef that refers to the document in the collection with the
|
||||
// given identifier.
|
||||
func (c *CollectionRef) Doc(id string) *DocumentRef {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return newDocRef(c, id)
|
||||
}
|
||||
|
||||
// NewDoc returns a DocumentRef with a uniquely generated ID.
|
||||
func (c *CollectionRef) NewDoc() *DocumentRef {
|
||||
return c.Doc(uniqueID())
|
||||
}
|
||||
|
||||
// Add generates a DocumentRef with a unique ID. It then creates the document
|
||||
// with the given data, which can be a map[string]interface{}, a struct or a
|
||||
// pointer to a struct.
|
||||
//
|
||||
// Add returns an error in the unlikely event that a document with the same ID
|
||||
// already exists.
|
||||
func (c *CollectionRef) Add(ctx context.Context, data interface{}) (*DocumentRef, *WriteResult, error) {
|
||||
d := c.NewDoc()
|
||||
wr, err := d.Create(ctx, data)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return d, wr, nil
|
||||
}
|
||||
|
||||
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
var (
|
||||
rngMu sync.Mutex
|
||||
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
|
||||
)
|
||||
|
||||
func uniqueID() string {
|
||||
var b [20]byte
|
||||
rngMu.Lock()
|
||||
for i := 0; i < len(b); i++ {
|
||||
b[i] = alphanum[rng.Intn(len(alphanum))]
|
||||
}
|
||||
rngMu.Unlock()
|
||||
return string(b[:])
|
||||
}
|
97
vendor/cloud.google.com/go/firestore/collref_test.go
generated
vendored
Normal file
97
vendor/cloud.google.com/go/firestore/collref_test.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestDoc(t *testing.T) {
|
||||
coll := testClient.Collection("C")
|
||||
got := coll.Doc("d")
|
||||
want := &DocumentRef{
|
||||
Parent: coll,
|
||||
ID: "d",
|
||||
Path: "projects/projectID/databases/(default)/documents/C/d",
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDoc(t *testing.T) {
|
||||
c := &Client{}
|
||||
coll := c.Collection("C")
|
||||
got := coll.NewDoc()
|
||||
if got.Parent != coll {
|
||||
t.Errorf("got %v, want %v", got.Parent, coll)
|
||||
}
|
||||
if len(got.ID) != 20 {
|
||||
t.Errorf("got %d-char ID, wanted 20", len(got.ID))
|
||||
}
|
||||
|
||||
got2 := coll.NewDoc()
|
||||
if got.ID == got2.ID {
|
||||
t.Error("got same ID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
wantReq := commitRequestForSet()
|
||||
w := wantReq.Writes[0]
|
||||
w.CurrentDocument = &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{false},
|
||||
}
|
||||
srv.addRPCAdjust(wantReq, commitResponseForSet, func(gotReq proto.Message) {
|
||||
// We can't know the doc ID before Add is called, so we take it from
|
||||
// the request.
|
||||
w.Operation.(*pb.Write_Update).Update.Name = gotReq.(*pb.CommitRequest).Writes[0].Operation.(*pb.Write_Update).Update.Name
|
||||
})
|
||||
_, wr, err := c.Collection("C").Add(ctx, testData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(wr, writeResultForSet) {
|
||||
t.Errorf("got %v, want %v", wr, writeResultForSet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, _ := newMock(t)
|
||||
// Test that a nil CollectionRef results in a nil DocumentRef and errors
|
||||
// where possible.
|
||||
coll := c.Collection("a/b") // nil because "a/b" denotes a doc.
|
||||
if coll != nil {
|
||||
t.Fatal("collection not nil")
|
||||
}
|
||||
if got := coll.Doc("d"); got != nil {
|
||||
t.Fatalf("got %v, want nil", got)
|
||||
}
|
||||
if got := coll.NewDoc(); got != nil {
|
||||
t.Fatalf("got %v, want nil", got)
|
||||
}
|
||||
if _, _, err := coll.Add(ctx, testData); err != errNilDocRef {
|
||||
t.Fatalf("got <%v>, want <%v>", err, errNilDocRef)
|
||||
}
|
||||
}
|
220
vendor/cloud.google.com/go/firestore/doc.go
generated
vendored
Normal file
220
vendor/cloud.google.com/go/firestore/doc.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// DO NOT EDIT doc.go. Modify internal/doc.template, then run make -C internal.
|
||||
|
||||
/*
|
||||
Package firestore provides a client for reading and writing to a Cloud Firestore
|
||||
database.
|
||||
|
||||
See https://cloud.google.com/firestore/docs for an introduction
|
||||
to Cloud Firestore and additional help on using the Firestore API.
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client with a project ID:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "projectID")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
CollectionRefs and DocumentRefs
|
||||
|
||||
In Firestore, documents are sets of key-value pairs, and collections are groups of
|
||||
documents. A Firestore database consists of a hierarchy of alternating collections
|
||||
and documents, referred to by slash-separated paths like
|
||||
"States/California/Cities/SanFrancisco".
|
||||
|
||||
This client is built around references to collections and documents. CollectionRefs
|
||||
and DocumentRefs are lightweight values that refer to the corresponding database
|
||||
entities. Creating a ref does not involve any network traffic.
|
||||
|
||||
states := client.Collection("States")
|
||||
ny := states.Doc("NewYork")
|
||||
// Or, in a single call:
|
||||
ny = client.Doc("States/NewYork")
|
||||
|
||||
Reading
|
||||
|
||||
Use DocumentRef.Get to read a document. The result is a DocumentSnapshot.
|
||||
Call its Data method to obtain the entire document contents as a map.
|
||||
|
||||
docsnap, err := ny.Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
dataMap := docsnap.Data()
|
||||
fmt.Println(dataMap)
|
||||
|
||||
You can also obtain a single field with DataAt, or extract the data into a struct
|
||||
with DataTo. With the type definition
|
||||
|
||||
type State struct {
|
||||
Capital string `firestore:"capital"`
|
||||
Population float64 `firestore:"pop"` // in millions
|
||||
}
|
||||
|
||||
we can extract the document's data into a value of type State:
|
||||
|
||||
var nyData State
|
||||
if err := docsnap.DataTo(&nyData); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that this client supports struct tags beginning with "firestore:" that work like
|
||||
the tags of the encoding/json package, letting you rename fields, ignore them, or
|
||||
omit their values when empty.
|
||||
|
||||
To retrieve multiple documents from their references in a single call, use
|
||||
Client.GetAll.
|
||||
|
||||
docsnaps, err := client.GetAll(ctx, []*firestore.DocumentRef{
|
||||
states.Doc("Wisconsin"), states.Doc("Ohio"),
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, ds := range docsnaps {
|
||||
_ = ds // TODO: Use ds.
|
||||
}
|
||||
|
||||
|
||||
Writing
|
||||
|
||||
For writing individual documents, use the methods on DocumentReference.
|
||||
Create creates a new document.
|
||||
|
||||
wr, err := ny.Create(ctx, State{
|
||||
Capital: "Albany",
|
||||
Population: 19.8,
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(wr)
|
||||
|
||||
The first return value is a WriteResult, which contains the time
|
||||
at which the document was updated.
|
||||
|
||||
Create fails if the document exists. Another method, Set, either replaces an existing
|
||||
document or creates a new one.
|
||||
|
||||
ca := states.Doc("California")
|
||||
_, err = ca.Set(ctx, State{
|
||||
Capital: "Sacramento",
|
||||
Population: 39.14,
|
||||
})
|
||||
|
||||
To update some fields of an existing document, use UpdateMap, UpdateStruct or
|
||||
UpdatePaths. For UpdateMap, the keys of the map specify which fields to change. The
|
||||
others are untouched.
|
||||
|
||||
_, err = ca.UpdateMap(ctx, map[string]interface{}{"pop": 39.2})
|
||||
|
||||
For UpdateStruct, you must explicitly provide the fields to update. The field names
|
||||
must match exactly.
|
||||
|
||||
_, err = ca.UpdateStruct(ctx, []string{"pop"}, State{Population: 39.2})
|
||||
|
||||
Use DocumentRef.Delete to delete a document.
|
||||
|
||||
_, err = ny.Delete(ctx)
|
||||
|
||||
Preconditions
|
||||
|
||||
You can condition Deletes or Updates on when a document was last changed. Specify
|
||||
these preconditions as an option to a Delete or Update method. The check and the
|
||||
write happen atomically with a single RPC.
|
||||
|
||||
docsnap, err = ca.Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_, err = ca.UpdateStruct(ctx, []string{"capital"}, State{Capital: "Sacramento"},
|
||||
firestore.LastUpdateTime(docsnap.UpdateTime))
|
||||
|
||||
Here we update a doc only if it hasn't changed since we read it.
|
||||
You could also do this with a transaction.
|
||||
|
||||
To perform multiple writes at once, use a WriteBatch. Its methods chain
|
||||
for convenience.
|
||||
|
||||
WriteBatch.Commit sends the collected writes to the server, where they happen
|
||||
atomically.
|
||||
|
||||
writeResults, err := client.Batch().
|
||||
Create(ny, State{Capital: "Albany"}).
|
||||
UpdateStruct(ca, []string{"capital"}, State{Capital: "Sacramento"}).
|
||||
Delete(client.Doc("States/WestDakota")).
|
||||
Commit(ctx)
|
||||
|
||||
Queries
|
||||
|
||||
You can use SQL to select documents from a collection. Begin with the collection, and
|
||||
build up a query using Select, Where and other methods of Query.
|
||||
|
||||
q := states.Where("pop", ">", 10).OrderBy("pop", firestore.Desc)
|
||||
|
||||
Call the Query's Documents method to get an iterator, and use it like
|
||||
the other Google Cloud Client iterators.
|
||||
|
||||
iter := q.Documents(ctx)
|
||||
for {
|
||||
doc, err := iter.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(doc.Data())
|
||||
}
|
||||
|
||||
To get all the documents in a collection, you can use the collection itself
|
||||
as a query.
|
||||
|
||||
iter = client.Collection("States").Documents(ctx)
|
||||
|
||||
Transactions
|
||||
|
||||
Use a transaction to execute reads and writes atomically. All reads must happen
|
||||
before any writes. Transaction creation, commit, rollback and retry are handled for
|
||||
you by the Client.RunTransaction method; just provide a function and use the
|
||||
read and write methods of the Transaction passed to it.
|
||||
|
||||
ny := client.Doc("States/NewYork")
|
||||
err := client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error {
|
||||
doc, err := tx.Get(ny) // tx.Get, NOT ny.Get!
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pop, err := doc.DataAt("pop")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.UpdateStruct(ny, []string{"pop"},
|
||||
State{Population: pop.(float64) + 0.2})
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package firestore
|
599
vendor/cloud.google.com/go/firestore/docref.go
generated
vendored
Normal file
599
vendor/cloud.google.com/go/firestore/docref.go
generated
vendored
Normal file
@ -0,0 +1,599 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
|
||||
vkit "cloud.google.com/go/firestore/apiv1beta1"
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
)
|
||||
|
||||
var errNilDocRef = errors.New("firestore: nil DocumentRef")
|
||||
|
||||
// A DocumentRef is a reference to a Firestore document.
|
||||
type DocumentRef struct {
|
||||
// The CollectionRef that this document is a part of. Never nil.
|
||||
Parent *CollectionRef
|
||||
|
||||
// The full resource path of the document: "projects/P/databases/D/documents..."
|
||||
Path string
|
||||
|
||||
// The ID of the document: the last component of the resource path.
|
||||
ID string
|
||||
}
|
||||
|
||||
func newDocRef(parent *CollectionRef, id string) *DocumentRef {
|
||||
return &DocumentRef{
|
||||
Parent: parent,
|
||||
ID: id,
|
||||
Path: parent.Path + "/" + id,
|
||||
}
|
||||
}
|
||||
|
||||
func (d1 *DocumentRef) equal(d2 *DocumentRef) bool {
|
||||
if d1 == nil || d2 == nil {
|
||||
return d1 == d2
|
||||
}
|
||||
return d1.Parent.equal(d2.Parent) && d1.Path == d2.Path && d1.ID == d2.ID
|
||||
}
|
||||
|
||||
// Collection returns a reference to sub-collection of this document.
|
||||
func (d *DocumentRef) Collection(id string) *CollectionRef {
|
||||
return newCollRefWithParent(d.Parent.c, d, id)
|
||||
}
|
||||
|
||||
// Get retrieves the document. It returns an error if the document does not exist.
|
||||
func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) {
|
||||
if err := checkTransaction(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d == nil {
|
||||
return nil, errNilDocRef
|
||||
}
|
||||
doc, err := d.Parent.c.c.GetDocument(withResourceHeader(ctx, d.Parent.c.path()),
|
||||
&pb.GetDocumentRequest{Name: d.Path})
|
||||
// TODO(jba): verify that GetDocument returns NOT_FOUND.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newDocumentSnapshot(d, doc, d.Parent.c)
|
||||
}
|
||||
|
||||
// Create creates the document with the given data.
|
||||
// It returns an error if a document with the same ID already exists.
|
||||
//
|
||||
// The data argument can be a map with string keys, a struct, or a pointer to a
|
||||
// struct. The map keys or exported struct fields become the fields of the firestore
|
||||
// document.
|
||||
// The values of data are converted to Firestore values as follows:
|
||||
//
|
||||
// - bool converts to Bool.
|
||||
// - string converts to String.
|
||||
// - int, int8, int16, int32 and int64 convert to Integer.
|
||||
// - uint8, uint16 and uint32 convert to Integer. uint64 is disallowed,
|
||||
// because it can represent values that cannot be represented in an int64, which
|
||||
// is the underlying type of a Integer.
|
||||
// - float32 and float64 convert to Double.
|
||||
// - []byte converts to Bytes.
|
||||
// - time.Time converts to Timestamp.
|
||||
// - latlng.LatLng converts to GeoPoint. latlng is the package
|
||||
// "google.golang.org/genproto/googleapis/type/latlng".
|
||||
// - Slices convert to Array.
|
||||
// - Maps and structs convert to Map.
|
||||
// - nils of any type convert to Null.
|
||||
//
|
||||
// Pointers and interface{} are also permitted, and their elements processed
|
||||
// recursively.
|
||||
//
|
||||
// Struct fields can have tags like those used by the encoding/json package. Tags
|
||||
// begin with "firestore:" and are followed by "-", meaning "ignore this field," or
|
||||
// an alternative name for the field. Following the name, these comma-separated
|
||||
// options may be provided:
|
||||
//
|
||||
// - omitempty: Do not encode this field if it is empty. A value is empty
|
||||
// if it is a zero value, or an array, slice or map of length zero.
|
||||
// - serverTimestamp: The field must be of type time.Time. When writing, if
|
||||
// the field has the zero value, the server will populate the stored document with
|
||||
// the time that the request is processed.
|
||||
func (d *DocumentRef) Create(ctx context.Context, data interface{}) (*WriteResult, error) {
|
||||
ws, err := d.newReplaceWrites(data, nil, Exists(false))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Parent.c.commit(ctx, ws)
|
||||
}
|
||||
|
||||
// Set creates or overwrites the document with the given data. See DocumentRef.Create
|
||||
// for the acceptable values of data. Without options, Set overwrites the document
|
||||
// completely. Specify one of the Merge options to preserve an existing document's
|
||||
// fields.
|
||||
func (d *DocumentRef) Set(ctx context.Context, data interface{}, opts ...SetOption) (*WriteResult, error) {
|
||||
ws, err := d.newReplaceWrites(data, opts, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Parent.c.commit(ctx, ws)
|
||||
}
|
||||
|
||||
// Delete deletes the document. If the document doesn't exist, it does nothing
|
||||
// and returns no error.
|
||||
func (d *DocumentRef) Delete(ctx context.Context, preconds ...Precondition) (*WriteResult, error) {
|
||||
ws, err := d.newDeleteWrites(preconds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Parent.c.commit(ctx, ws)
|
||||
}
|
||||
|
||||
func (d *DocumentRef) newReplaceWrites(data interface{}, opts []SetOption, p Precondition) ([]*pb.Write, error) {
|
||||
if d == nil {
|
||||
return nil, errNilDocRef
|
||||
}
|
||||
origFieldPaths, allPaths, err := processSetOptions(opts)
|
||||
isMerge := len(origFieldPaths) > 0 || allPaths // was some Merge option specified?
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
doc, serverTimestampPaths, err := toProtoDocument(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(origFieldPaths) > 0 {
|
||||
// Keep only data fields corresponding to the given field paths.
|
||||
doc.Fields = applyFieldPaths(doc.Fields, origFieldPaths, nil)
|
||||
}
|
||||
doc.Name = d.Path
|
||||
|
||||
var fieldPaths []FieldPath
|
||||
if allPaths {
|
||||
// MergeAll was passed. Check that the data is a map, and extract its field paths.
|
||||
v := reflect.ValueOf(data)
|
||||
if v.Kind() != reflect.Map {
|
||||
return nil, errors.New("firestore: MergeAll can only be specified with map data")
|
||||
}
|
||||
fieldPaths = fieldPathsFromMap(v, nil)
|
||||
} else if len(origFieldPaths) > 0 {
|
||||
// Remove server timestamp paths that are not in the list of paths to merge.
|
||||
// Note: this is technically O(n^2), but it is unlikely that there is more
|
||||
// than one server timestamp path.
|
||||
serverTimestampPaths = removePathsIf(serverTimestampPaths, func(fp FieldPath) bool {
|
||||
return !fp.in(origFieldPaths)
|
||||
})
|
||||
// Remove server timestamp fields from fieldPaths. Those fields were removed
|
||||
// from the document by toProtoDocument, so they should not be in the update
|
||||
// mask.
|
||||
// Note: this is technically O(n^2), but it is unlikely that there is
|
||||
// more than one server timestamp path.
|
||||
fieldPaths = removePathsIf(origFieldPaths, func(fp FieldPath) bool {
|
||||
return fp.in(serverTimestampPaths)
|
||||
})
|
||||
// Check that all the remaining field paths in the merge option are in the document.
|
||||
for _, fp := range fieldPaths {
|
||||
if _, err := valueAtPath(fp, doc.Fields); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
var pc *pb.Precondition
|
||||
if p != nil {
|
||||
pc, err = p.preconditionProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var w *pb.Write
|
||||
switch {
|
||||
case len(fieldPaths) > 0:
|
||||
// There are field paths, so we need an update mask.
|
||||
sfps := toServiceFieldPaths(fieldPaths)
|
||||
sort.Strings(sfps) // TODO(jba): make tests pass without this
|
||||
w = &pb.Write{
|
||||
Operation: &pb.Write_Update{doc},
|
||||
UpdateMask: &pb.DocumentMask{FieldPaths: sfps},
|
||||
CurrentDocument: pc,
|
||||
}
|
||||
case isMerge && pc != nil:
|
||||
// There were field paths, but they all got removed.
|
||||
// The write does nothing but enforce the precondition.
|
||||
w = &pb.Write{CurrentDocument: pc}
|
||||
case !isMerge:
|
||||
// Set without merge, so no update mask.
|
||||
w = &pb.Write{
|
||||
Operation: &pb.Write_Update{doc},
|
||||
CurrentDocument: pc,
|
||||
}
|
||||
}
|
||||
return d.writeWithTransform(w, serverTimestampPaths), nil
|
||||
}
|
||||
|
||||
// Create a new map that contains only the field paths in fps.
|
||||
func applyFieldPaths(fields map[string]*pb.Value, fps []FieldPath, root FieldPath) map[string]*pb.Value {
|
||||
r := map[string]*pb.Value{}
|
||||
for k, v := range fields {
|
||||
kpath := root.with(k)
|
||||
if kpath.in(fps) {
|
||||
r[k] = v
|
||||
} else if mv := v.GetMapValue(); mv != nil {
|
||||
if m2 := applyFieldPaths(mv.Fields, fps, kpath); m2 != nil {
|
||||
r[k] = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m2}}}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(r) == 0 {
|
||||
return nil
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func fieldPathsFromMap(vmap reflect.Value, prefix FieldPath) []FieldPath {
|
||||
// vmap is a map and its keys are strings.
|
||||
// Each map key denotes a field; no splitting or escaping.
|
||||
var fps []FieldPath
|
||||
for _, k := range vmap.MapKeys() {
|
||||
v := vmap.MapIndex(k)
|
||||
fp := prefix.with(k.String())
|
||||
if vm := extractMap(v); vm.IsValid() {
|
||||
fps = append(fps, fieldPathsFromMap(vm, fp)...)
|
||||
} else if v.Interface() != ServerTimestamp {
|
||||
// ServerTimestamp fields do not go into the update mask.
|
||||
fps = append(fps, fp)
|
||||
}
|
||||
}
|
||||
return fps
|
||||
}
|
||||
|
||||
func extractMap(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
return v
|
||||
case reflect.Interface:
|
||||
return extractMap(v.Elem())
|
||||
default:
|
||||
return reflect.Value{}
|
||||
}
|
||||
}
|
||||
|
||||
// removePathsIf creates a new slice of FieldPaths that contains
|
||||
// exactly those elements of fps for which pred returns false.
|
||||
func removePathsIf(fps []FieldPath, pred func(FieldPath) bool) []FieldPath {
|
||||
var result []FieldPath
|
||||
for _, fp := range fps {
|
||||
if !pred(fp) {
|
||||
result = append(result, fp)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (d *DocumentRef) newDeleteWrites(preconds []Precondition) ([]*pb.Write, error) {
|
||||
if d == nil {
|
||||
return nil, errNilDocRef
|
||||
}
|
||||
pc, err := processPreconditionsForDelete(preconds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*pb.Write{{
|
||||
Operation: &pb.Write_Delete{d.Path},
|
||||
CurrentDocument: pc,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (d *DocumentRef) newUpdateMapWrites(data map[string]interface{}, preconds []Precondition) ([]*pb.Write, error) {
|
||||
// Collect all the (top-level) keys map; they will comprise the update mask.
|
||||
// Also, translate the map into a sequence of FieldPathUpdates.
|
||||
var fps []FieldPath
|
||||
var fpus []FieldPathUpdate
|
||||
for k, v := range data {
|
||||
fp, err := parseDotSeparatedString(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fps = append(fps, fp)
|
||||
fpus = append(fpus, FieldPathUpdate{Path: fp, Value: v})
|
||||
}
|
||||
// Check that there are no duplicate field paths, and that no field
|
||||
// path is a prefix of another.
|
||||
if err := checkNoDupOrPrefix(fps); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Re-create the map from the field paths and their corresponding values. A field path
|
||||
// with a Delete value will not appear in the map but it will appear in the
|
||||
// update mask, which will cause it to be deleted.
|
||||
m := createMapFromFieldPathUpdates(fpus)
|
||||
return d.newUpdateWrites(m, fps, preconds)
|
||||
}
|
||||
|
||||
func (d *DocumentRef) newUpdateStructWrites(fieldPaths []string, data interface{}, preconds []Precondition) ([]*pb.Write, error) {
|
||||
if !isStructOrStructPtr(data) {
|
||||
return nil, errors.New("firestore: data is not struct or struct pointer")
|
||||
}
|
||||
fps, err := parseDotSeparatedStrings(fieldPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := checkNoDupOrPrefix(fps); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.newUpdateWrites(data, fps, preconds)
|
||||
}
|
||||
|
||||
func (d *DocumentRef) newUpdatePathWrites(data []FieldPathUpdate, preconds []Precondition) ([]*pb.Write, error) {
|
||||
var fps []FieldPath
|
||||
for _, fpu := range data {
|
||||
if err := fpu.Path.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fps = append(fps, fpu.Path)
|
||||
}
|
||||
if err := checkNoDupOrPrefix(fps); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := createMapFromFieldPathUpdates(data)
|
||||
return d.newUpdateWrites(m, fps, preconds)
|
||||
}
|
||||
|
||||
// newUpdateWrites creates Write operations for an update.
|
||||
func (d *DocumentRef) newUpdateWrites(data interface{}, fieldPaths []FieldPath, preconds []Precondition) ([]*pb.Write, error) {
|
||||
if len(fieldPaths) == 0 {
|
||||
return nil, errors.New("firestore: no paths to update")
|
||||
}
|
||||
if d == nil {
|
||||
return nil, errNilDocRef
|
||||
}
|
||||
pc, err := processPreconditionsForUpdate(preconds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
doc, serverTimestampPaths, err := toProtoDocument(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sfps := toServiceFieldPaths(fieldPaths)
|
||||
doc.Name = d.Path
|
||||
return d.writeWithTransform(&pb.Write{
|
||||
Operation: &pb.Write_Update{doc},
|
||||
UpdateMask: &pb.DocumentMask{FieldPaths: sfps},
|
||||
CurrentDocument: pc,
|
||||
}, serverTimestampPaths), nil
|
||||
}
|
||||
|
||||
var requestTimeTransform = &pb.DocumentTransform_FieldTransform_SetToServerValue{
|
||||
pb.DocumentTransform_FieldTransform_REQUEST_TIME,
|
||||
}
|
||||
|
||||
func (d *DocumentRef) writeWithTransform(w *pb.Write, serverTimestampFieldPaths []FieldPath) []*pb.Write {
|
||||
var ws []*pb.Write
|
||||
if w != nil {
|
||||
ws = append(ws, w)
|
||||
}
|
||||
if len(serverTimestampFieldPaths) > 0 {
|
||||
ws = append(ws, d.newTransform(serverTimestampFieldPaths))
|
||||
}
|
||||
return ws
|
||||
}
|
||||
|
||||
func (d *DocumentRef) newTransform(serverTimestampFieldPaths []FieldPath) *pb.Write {
|
||||
sort.Sort(byPath(serverTimestampFieldPaths)) // TODO(jba): make tests pass without this
|
||||
var fts []*pb.DocumentTransform_FieldTransform
|
||||
for _, p := range serverTimestampFieldPaths {
|
||||
fts = append(fts, &pb.DocumentTransform_FieldTransform{
|
||||
FieldPath: p.toServiceFieldPath(),
|
||||
TransformType: requestTimeTransform,
|
||||
})
|
||||
}
|
||||
return &pb.Write{
|
||||
Operation: &pb.Write_Transform{
|
||||
&pb.DocumentTransform{
|
||||
Document: d.Path,
|
||||
FieldTransforms: fts,
|
||||
// TODO(jba): should the transform have the same preconditions as the write?
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// Delete is used as a value in a call to UpdateMap to indicate that the
|
||||
// corresponding key should be deleted.
|
||||
Delete = new(int)
|
||||
// Not new(struct{}), because addresses of zero-sized values
|
||||
// may not be unique.
|
||||
|
||||
// ServerTimestamp is used as a value in a call to UpdateMap to indicate that the
|
||||
// key's value should be set to the time at which the server processed
|
||||
// the request.
|
||||
ServerTimestamp = new(int)
|
||||
)
|
||||
|
||||
// UpdateMap updates the document using the given data. Map keys replace the stored
|
||||
// values, but other fields of the stored document are untouched.
|
||||
// See DocumentRef.Create for acceptable map values.
|
||||
//
|
||||
// If a map key is a multi-element field path, like "a.b", then only key "b" of
|
||||
// the map value at "a" is changed; the rest of the map is preserved.
|
||||
// For example, if the stored data is
|
||||
// {"a": {"b": 1, "c": 2}}
|
||||
// then
|
||||
// UpdateMap({"a": {"b": 3}}) => {"a": {"b": 3}}
|
||||
// while
|
||||
// UpdateMap({"a.b": 3}) => {"a": {"b": 3, "c": 2}}
|
||||
//
|
||||
// To delete a key, specify it in the input with a value of firestore.Delete.
|
||||
//
|
||||
// Field paths expressed as map keys must not contain any of the runes "˜*/[]".
|
||||
// Use UpdatePaths instead for such paths.
|
||||
//
|
||||
// UpdateMap returns an error if the document does not exist.
|
||||
func (d *DocumentRef) UpdateMap(ctx context.Context, data map[string]interface{}, preconds ...Precondition) (*WriteResult, error) {
|
||||
ws, err := d.newUpdateMapWrites(data, preconds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Parent.c.commit(ctx, ws)
|
||||
}
|
||||
|
||||
func isStructOrStructPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
if v.Kind() == reflect.Struct {
|
||||
return true
|
||||
}
|
||||
if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// UpdateStruct updates the given field paths of the stored document from the fields
|
||||
// of data, which must be a struct or a pointer to a struct. Other fields of the
|
||||
// stored document are untouched.
|
||||
// See DocumentRef.Create for the acceptable values of the struct's fields.
|
||||
//
|
||||
// Each element of fieldPaths is a single field or a dot-separated sequence of
|
||||
// fields, none of which contain the runes "˜*/[]".
|
||||
//
|
||||
// If an element of fieldPaths does not have a corresponding field in the struct,
|
||||
// that key is deleted from the stored document.
|
||||
//
|
||||
// UpdateStruct returns an error if the document does not exist.
|
||||
func (d *DocumentRef) UpdateStruct(ctx context.Context, fieldPaths []string, data interface{}, preconds ...Precondition) (*WriteResult, error) {
|
||||
ws, err := d.newUpdateStructWrites(fieldPaths, data, preconds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Parent.c.commit(ctx, ws)
|
||||
}
|
||||
|
||||
// A FieldPathUpdate describes an update to a value referred to by a FieldPath.
|
||||
// See DocumentRef.Create for acceptable values.
|
||||
// To delete a field, specify firestore.Delete as the value.
|
||||
type FieldPathUpdate struct {
|
||||
Path FieldPath
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// UpdatePaths updates the document using the given data. The values at the given
|
||||
// field paths are replaced, but other fields of the stored document are untouched.
|
||||
func (d *DocumentRef) UpdatePaths(ctx context.Context, data []FieldPathUpdate, preconds ...Precondition) (*WriteResult, error) {
|
||||
ws, err := d.newUpdatePathWrites(data, preconds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Parent.c.commit(ctx, ws)
|
||||
}
|
||||
|
||||
// Collections returns an interator over the immediate sub-collections of the document.
|
||||
func (d *DocumentRef) Collections(ctx context.Context) *CollectionIterator {
|
||||
client := d.Parent.c
|
||||
it := &CollectionIterator{
|
||||
err: checkTransaction(ctx),
|
||||
client: client,
|
||||
parent: d,
|
||||
it: client.c.ListCollectionIds(
|
||||
withResourceHeader(ctx, client.path()),
|
||||
&pb.ListCollectionIdsRequest{Parent: d.Path}),
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// CollectionIterator is an iterator over sub-collections of a document.
|
||||
type CollectionIterator struct {
|
||||
client *Client
|
||||
parent *DocumentRef
|
||||
it *vkit.StringIterator
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*CollectionRef
|
||||
err error
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *CollectionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there
|
||||
// are no more results. Once Next returns Done, all subsequent calls will return
|
||||
// Done.
|
||||
func (it *CollectionIterator) Next() (*CollectionRef, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *CollectionIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
if it.err != nil {
|
||||
return "", it.err
|
||||
}
|
||||
return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error {
|
||||
id, err := it.it.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var cr *CollectionRef
|
||||
if it.parent == nil {
|
||||
cr = newTopLevelCollRef(it.client, it.client.path(), id)
|
||||
} else {
|
||||
cr = newCollRefWithParent(it.client, it.parent, id)
|
||||
}
|
||||
it.items = append(it.items, cr)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetAll returns all the collections remaining from the iterator.
|
||||
func (it *CollectionIterator) GetAll() ([]*CollectionRef, error) {
|
||||
var crs []*CollectionRef
|
||||
for {
|
||||
cr, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crs = append(crs, cr)
|
||||
}
|
||||
return crs, nil
|
||||
}
|
||||
|
||||
// Common fetch code for iterators that are backed by vkit iterators.
|
||||
// TODO(jba): dedup with same function in logging/logadmin.
|
||||
func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func() error) (string, error) {
|
||||
pi.MaxSize = pageSize
|
||||
pi.Token = pageToken
|
||||
// Get one item, which will fill the buffer.
|
||||
if err := next(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Collect the rest of the buffer.
|
||||
for pi.Remaining() > 0 {
|
||||
if err := next(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return pi.Token, nil
|
||||
}
|
733
vendor/cloud.google.com/go/firestore/docref_test.go
generated
vendored
Normal file
733
vendor/cloud.google.com/go/firestore/docref_test.go
generated
vendored
Normal file
@ -0,0 +1,733 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/genproto/googleapis/type/latlng"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
var (
|
||||
writeResultForSet = &WriteResult{UpdateTime: aTime}
|
||||
commitResponseForSet = &pb.CommitResponse{
|
||||
WriteResults: []*pb.WriteResult{{UpdateTime: aTimestamp}},
|
||||
}
|
||||
)
|
||||
|
||||
func TestDocGet(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
path := "projects/projectID/databases/(default)/documents/C/a"
|
||||
pdoc := &pb.Document{
|
||||
Name: path,
|
||||
CreateTime: aTimestamp,
|
||||
UpdateTime: aTimestamp,
|
||||
Fields: map[string]*pb.Value{"f": intval(1)},
|
||||
}
|
||||
srv.addRPC(&pb.GetDocumentRequest{Name: path}, pdoc)
|
||||
ref := c.Collection("C").Doc("a")
|
||||
gotDoc, err := ref.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantDoc := &DocumentSnapshot{
|
||||
Ref: ref,
|
||||
CreateTime: aTime,
|
||||
UpdateTime: aTime,
|
||||
proto: pdoc,
|
||||
c: c,
|
||||
}
|
||||
if !testEqual(gotDoc, wantDoc) {
|
||||
t.Fatalf("\ngot %+v\nwant %+v", gotDoc, wantDoc)
|
||||
}
|
||||
|
||||
srv.addRPC(
|
||||
&pb.GetDocumentRequest{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/b",
|
||||
},
|
||||
grpc.Errorf(codes.NotFound, "not found"),
|
||||
)
|
||||
_, err = c.Collection("C").Doc("b").Get(ctx)
|
||||
if grpc.Code(err) != codes.NotFound {
|
||||
t.Errorf("got %v, want NotFound", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocSet(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
data interface{}
|
||||
opt SetOption
|
||||
write map[string]*pb.Value
|
||||
mask []string
|
||||
transform []string
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
desc: "Set with no options",
|
||||
data: map[string]interface{}{"a": 1},
|
||||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
},
|
||||
{
|
||||
desc: "Merge with a field",
|
||||
data: map[string]interface{}{"a": 1, "b": 2},
|
||||
opt: Merge("a"),
|
||||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
mask: []string{"a"},
|
||||
},
|
||||
{
|
||||
desc: "Merge field is not a leaf",
|
||||
data: map[string]interface{}{
|
||||
"a": map[string]interface{}{"b": 1, "c": 2},
|
||||
"d": 3,
|
||||
},
|
||||
opt: Merge("a"),
|
||||
write: map[string]*pb.Value{"a": mapval(map[string]*pb.Value{
|
||||
"b": intval(1),
|
||||
"c": intval(2),
|
||||
})},
|
||||
mask: []string{"a"},
|
||||
},
|
||||
{
|
||||
desc: "MergeAll",
|
||||
data: map[string]interface{}{"a": 1, "b": 2},
|
||||
opt: MergeAll,
|
||||
write: map[string]*pb.Value{"a": intval(1), "b": intval(2)},
|
||||
mask: []string{"a", "b"},
|
||||
},
|
||||
{
|
||||
desc: "MergeAll with nested fields",
|
||||
data: map[string]interface{}{
|
||||
"a": 1,
|
||||
"b": map[string]interface{}{"c": 2},
|
||||
},
|
||||
opt: MergeAll,
|
||||
write: map[string]*pb.Value{
|
||||
"a": intval(1),
|
||||
"b": mapval(map[string]*pb.Value{"c": intval(2)}),
|
||||
},
|
||||
mask: []string{"a", "b.c"},
|
||||
},
|
||||
{
|
||||
desc: "Merge with FieldPaths",
|
||||
data: map[string]interface{}{"*": map[string]interface{}{"~": true}},
|
||||
opt: MergePaths([]string{"*", "~"}),
|
||||
write: map[string]*pb.Value{
|
||||
"*": mapval(map[string]*pb.Value{
|
||||
"~": boolval(true),
|
||||
}),
|
||||
},
|
||||
mask: []string{"`*`.`~`"},
|
||||
},
|
||||
{
|
||||
desc: "Merge with a struct and FieldPaths",
|
||||
data: struct {
|
||||
A map[string]bool `firestore:"*"`
|
||||
}{A: map[string]bool{"~": true}},
|
||||
opt: MergePaths([]string{"*", "~"}),
|
||||
write: map[string]*pb.Value{
|
||||
"*": mapval(map[string]*pb.Value{
|
||||
"~": boolval(true),
|
||||
}),
|
||||
},
|
||||
mask: []string{"`*`.`~`"},
|
||||
},
|
||||
{
|
||||
desc: "a ServerTimestamp field becomes a transform",
|
||||
data: map[string]interface{}{"a": 1, "b": ServerTimestamp},
|
||||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
transform: []string{"b"},
|
||||
},
|
||||
{
|
||||
desc: "nested ServerTimestamp field",
|
||||
data: map[string]interface{}{
|
||||
"a": 1,
|
||||
"b": map[string]interface{}{"c": ServerTimestamp},
|
||||
},
|
||||
// TODO(jba): make this be map[string]*pb.Value{"a": intval(1)},
|
||||
write: map[string]*pb.Value{"a": intval(1), "b": mapval(map[string]*pb.Value{})},
|
||||
transform: []string{"b.c"},
|
||||
},
|
||||
{
|
||||
desc: "multiple ServerTimestamp fields",
|
||||
data: map[string]interface{}{
|
||||
"a": 1,
|
||||
"b": ServerTimestamp,
|
||||
"c": map[string]interface{}{"d": ServerTimestamp},
|
||||
},
|
||||
// TODO(jba): make this be map[string]*pb.Value{"a": intval(1)},
|
||||
write: map[string]*pb.Value{"a": intval(1),
|
||||
"c": mapval(map[string]*pb.Value{})},
|
||||
transform: []string{"b", "c.d"},
|
||||
},
|
||||
{
|
||||
desc: "ServerTimestamp with MergeAll",
|
||||
data: map[string]interface{}{"a": 1, "b": ServerTimestamp},
|
||||
opt: MergeAll,
|
||||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
mask: []string{"a"},
|
||||
transform: []string{"b"},
|
||||
},
|
||||
{
|
||||
desc: "ServerTimestamp with Merge of both fields",
|
||||
data: map[string]interface{}{"a": 1, "b": ServerTimestamp},
|
||||
opt: Merge("a", "b"),
|
||||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
mask: []string{"a"},
|
||||
transform: []string{"b"},
|
||||
},
|
||||
{
|
||||
desc: "If is ServerTimestamp not in Merge, no transform",
|
||||
data: map[string]interface{}{"a": 1, "b": ServerTimestamp},
|
||||
opt: Merge("a"),
|
||||
write: map[string]*pb.Value{"a": intval(1)},
|
||||
mask: []string{"a"},
|
||||
},
|
||||
{
|
||||
desc: "If no ordinary values in Merge, no write",
|
||||
data: map[string]interface{}{"a": 1, "b": ServerTimestamp},
|
||||
opt: Merge("b"),
|
||||
transform: []string{"b"},
|
||||
},
|
||||
{
|
||||
desc: "Merge fields must all be present in data.",
|
||||
data: map[string]interface{}{"a": 1},
|
||||
opt: Merge("b", "a"),
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
desc: "MergeAll cannot be used with structs",
|
||||
data: struct{ A int }{A: 1},
|
||||
opt: MergeAll,
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
desc: "Delete cannot appear in data",
|
||||
data: map[string]interface{}{"a": 1, "b": Delete},
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
desc: "Delete cannot even appear in an unmerged field (allow?)",
|
||||
data: map[string]interface{}{"a": 1, "b": Delete},
|
||||
opt: Merge("a"),
|
||||
isErr: true,
|
||||
},
|
||||
} {
|
||||
srv.reset()
|
||||
if !test.isErr {
|
||||
var writes []*pb.Write
|
||||
if test.write != nil || test.mask != nil {
|
||||
w := &pb.Write{}
|
||||
if test.write != nil {
|
||||
w.Operation = &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/d",
|
||||
Fields: test.write,
|
||||
},
|
||||
}
|
||||
}
|
||||
if test.mask != nil {
|
||||
w.UpdateMask = &pb.DocumentMask{FieldPaths: test.mask}
|
||||
}
|
||||
writes = append(writes, w)
|
||||
}
|
||||
if test.transform != nil {
|
||||
var fts []*pb.DocumentTransform_FieldTransform
|
||||
for _, p := range test.transform {
|
||||
fts = append(fts, &pb.DocumentTransform_FieldTransform{
|
||||
FieldPath: p,
|
||||
TransformType: requestTimeTransform,
|
||||
})
|
||||
}
|
||||
writes = append(writes, &pb.Write{
|
||||
Operation: &pb.Write_Transform{
|
||||
&pb.DocumentTransform{
|
||||
Document: "projects/projectID/databases/(default)/documents/C/d",
|
||||
FieldTransforms: fts,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
srv.addRPC(&pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: writes,
|
||||
}, commitResponseForSet)
|
||||
}
|
||||
var opts []SetOption
|
||||
if test.opt != nil {
|
||||
opts = []SetOption{test.opt}
|
||||
}
|
||||
wr, err := c.Collection("C").Doc("d").Set(ctx, test.data, opts...)
|
||||
if test.isErr && err == nil {
|
||||
t.Errorf("%s: got nil, want error")
|
||||
continue
|
||||
}
|
||||
if !test.isErr && err != nil {
|
||||
t.Errorf("%s: %v", test.desc, err)
|
||||
continue
|
||||
}
|
||||
if err == nil && !testEqual(wr, writeResultForSet) {
|
||||
t.Errorf("%s: got %v, want %v", test.desc, wr, writeResultForSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocCreate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
wantReq := commitRequestForSet()
|
||||
wantReq.Writes[0].CurrentDocument = &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{false},
|
||||
}
|
||||
srv.addRPC(wantReq, commitResponseForSet)
|
||||
wr, err := c.Collection("C").Doc("d").Create(ctx, testData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(wr, writeResultForSet) {
|
||||
t.Errorf("got %v, want %v", wr, writeResultForSet)
|
||||
}
|
||||
|
||||
// Verify creation with structs. In particular, make sure zero values
|
||||
// are handled well.
|
||||
type create struct {
|
||||
Time time.Time
|
||||
Bytes []byte
|
||||
Geo *latlng.LatLng
|
||||
}
|
||||
srv.addRPC(
|
||||
&pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: []*pb.Write{
|
||||
{
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/d",
|
||||
Fields: map[string]*pb.Value{
|
||||
"Time": tsval(time.Time{}),
|
||||
"Bytes": bytesval(nil),
|
||||
"Geo": nullValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{false},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
commitResponseForSet,
|
||||
)
|
||||
_, err = c.Collection("C").Doc("d").Create(ctx, &create{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
srv.addRPC(
|
||||
&pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: []*pb.Write{
|
||||
{Operation: &pb.Write_Delete{"projects/projectID/databases/(default)/documents/C/d"}},
|
||||
},
|
||||
},
|
||||
&pb.CommitResponse{
|
||||
WriteResults: []*pb.WriteResult{{}},
|
||||
})
|
||||
wr, err := c.Collection("C").Doc("d").Delete(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(wr, &WriteResult{}) {
|
||||
t.Errorf("got %+v, want %+v", wr, writeResultForSet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDocDeleteLastUpdateTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
wantReq := &pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: []*pb.Write{
|
||||
{
|
||||
Operation: &pb.Write_Delete{"projects/projectID/databases/(default)/documents/C/d"},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_UpdateTime{aTimestamp2},
|
||||
},
|
||||
}},
|
||||
}
|
||||
srv.addRPC(wantReq, commitResponseForSet)
|
||||
wr, err := c.Collection("C").Doc("d").Delete(ctx, LastUpdateTime(aTime2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(wr, writeResultForSet) {
|
||||
t.Errorf("got %+v, want %+v", wr, writeResultForSet)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
testData = map[string]interface{}{"a": 1}
|
||||
testFields = map[string]*pb.Value{"a": intval(1)}
|
||||
)
|
||||
|
||||
func TestUpdateMap(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
for _, test := range []struct {
|
||||
data map[string]interface{}
|
||||
wantFields map[string]*pb.Value
|
||||
wantPaths []string
|
||||
}{
|
||||
{
|
||||
data: map[string]interface{}{"a.b": 1},
|
||||
wantFields: map[string]*pb.Value{
|
||||
"a": mapval(map[string]*pb.Value{"b": intval(1)}),
|
||||
},
|
||||
wantPaths: []string{"a.b"},
|
||||
},
|
||||
{
|
||||
data: map[string]interface{}{
|
||||
"a": 1,
|
||||
"b": Delete,
|
||||
},
|
||||
wantFields: map[string]*pb.Value{"a": intval(1)},
|
||||
wantPaths: []string{"a", "b"},
|
||||
},
|
||||
} {
|
||||
srv.reset()
|
||||
wantReq := &pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: []*pb.Write{{
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/d",
|
||||
Fields: test.wantFields,
|
||||
}},
|
||||
UpdateMask: &pb.DocumentMask{FieldPaths: test.wantPaths},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{true},
|
||||
},
|
||||
}},
|
||||
}
|
||||
// Sort update masks, because map iteration order is random.
|
||||
sort.Strings(wantReq.Writes[0].UpdateMask.FieldPaths)
|
||||
srv.addRPCAdjust(wantReq, commitResponseForSet, func(gotReq proto.Message) {
|
||||
sort.Strings(gotReq.(*pb.CommitRequest).Writes[0].UpdateMask.FieldPaths)
|
||||
})
|
||||
wr, err := c.Collection("C").Doc("d").UpdateMap(ctx, test.data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(wr, writeResultForSet) {
|
||||
t.Errorf("%v:\ngot %+v, want %+v", test.data, wr, writeResultForSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateMapLastUpdateTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
|
||||
wantReq := &pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: []*pb.Write{{
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/d",
|
||||
Fields: map[string]*pb.Value{"a": intval(1)},
|
||||
}},
|
||||
UpdateMask: &pb.DocumentMask{FieldPaths: []string{"a"}},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_UpdateTime{aTimestamp2},
|
||||
},
|
||||
}},
|
||||
}
|
||||
srv.addRPC(wantReq, commitResponseForSet)
|
||||
wr, err := c.Collection("C").Doc("d").UpdateMap(ctx, map[string]interface{}{"a": 1}, LastUpdateTime(aTime2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(wr, writeResultForSet) {
|
||||
t.Errorf("got %v, want %v", wr, writeResultForSet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateMapErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, _ := newMock(t)
|
||||
for _, in := range []map[string]interface{}{
|
||||
nil, // no paths
|
||||
map[string]interface{}{"a~b": 1}, // invalid character
|
||||
map[string]interface{}{"a..b": 1}, // empty path component
|
||||
map[string]interface{}{"a.b": 1, "a": 2}, // prefix
|
||||
} {
|
||||
_, err := c.Collection("C").Doc("d").UpdateMap(ctx, in)
|
||||
if err == nil {
|
||||
t.Errorf("%v: got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateStruct(t *testing.T) {
|
||||
type update struct{ A int }
|
||||
c, srv := newMock(t)
|
||||
wantReq := &pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: []*pb.Write{{
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/d",
|
||||
Fields: map[string]*pb.Value{"A": intval(2)},
|
||||
},
|
||||
},
|
||||
UpdateMask: &pb.DocumentMask{FieldPaths: []string{"A", "b.c"}},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{true},
|
||||
},
|
||||
}},
|
||||
}
|
||||
srv.addRPC(wantReq, commitResponseForSet)
|
||||
wr, err := c.Collection("C").Doc("d").
|
||||
UpdateStruct(context.Background(), []string{"A", "b.c"}, &update{A: 2})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(wr, writeResultForSet) {
|
||||
t.Errorf("got %+v, want %+v", wr, writeResultForSet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateStructErrors(t *testing.T) {
|
||||
type update struct{ A int }
|
||||
|
||||
ctx := context.Background()
|
||||
c, _ := newMock(t)
|
||||
doc := c.Collection("C").Doc("d")
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
fields []string
|
||||
data interface{}
|
||||
}{
|
||||
{
|
||||
desc: "data is not a struct or *struct",
|
||||
data: map[string]interface{}{"a": 1},
|
||||
},
|
||||
{
|
||||
desc: "no paths",
|
||||
fields: nil,
|
||||
data: update{},
|
||||
},
|
||||
{
|
||||
desc: "empty",
|
||||
fields: []string{""},
|
||||
data: update{},
|
||||
},
|
||||
{
|
||||
desc: "empty component",
|
||||
fields: []string{"a.b..c"},
|
||||
data: update{},
|
||||
},
|
||||
{
|
||||
desc: "duplicate field",
|
||||
fields: []string{"a", "b", "c", "a"},
|
||||
data: update{},
|
||||
},
|
||||
{
|
||||
desc: "invalid character",
|
||||
fields: []string{"a", "b]"},
|
||||
data: update{},
|
||||
},
|
||||
{
|
||||
desc: "prefix",
|
||||
fields: []string{"a", "b", "c", "b.c"},
|
||||
data: update{},
|
||||
},
|
||||
} {
|
||||
_, err := doc.UpdateStruct(ctx, test.fields, test.data)
|
||||
if err == nil {
|
||||
t.Errorf("%s: got nil, want error", test.desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePaths(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
for _, test := range []struct {
|
||||
data []FieldPathUpdate
|
||||
wantFields map[string]*pb.Value
|
||||
wantPaths []string
|
||||
}{
|
||||
{
|
||||
data: []FieldPathUpdate{
|
||||
{Path: []string{"*", "~"}, Value: 1},
|
||||
{Path: []string{"*", "/"}, Value: 2},
|
||||
},
|
||||
wantFields: map[string]*pb.Value{
|
||||
"*": mapval(map[string]*pb.Value{
|
||||
"~": intval(1),
|
||||
"/": intval(2),
|
||||
}),
|
||||
},
|
||||
wantPaths: []string{"`*`.`~`", "`*`.`/`"},
|
||||
},
|
||||
{
|
||||
data: []FieldPathUpdate{
|
||||
{Path: []string{"*"}, Value: 1},
|
||||
{Path: []string{"]"}, Value: Delete},
|
||||
},
|
||||
wantFields: map[string]*pb.Value{"*": intval(1)},
|
||||
wantPaths: []string{"`*`", "`]`"},
|
||||
},
|
||||
} {
|
||||
srv.reset()
|
||||
wantReq := &pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: []*pb.Write{{
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/d",
|
||||
Fields: test.wantFields,
|
||||
}},
|
||||
UpdateMask: &pb.DocumentMask{FieldPaths: test.wantPaths},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{true},
|
||||
},
|
||||
}},
|
||||
}
|
||||
// Sort update masks, because map iteration order is random.
|
||||
sort.Strings(wantReq.Writes[0].UpdateMask.FieldPaths)
|
||||
srv.addRPCAdjust(wantReq, commitResponseForSet, func(gotReq proto.Message) {
|
||||
sort.Strings(gotReq.(*pb.CommitRequest).Writes[0].UpdateMask.FieldPaths)
|
||||
})
|
||||
wr, err := c.Collection("C").Doc("d").UpdatePaths(ctx, test.data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(wr, writeResultForSet) {
|
||||
t.Errorf("%v:\ngot %+v, want %+v", test.data, wr, writeResultForSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePathsErrors(t *testing.T) {
|
||||
fpu := func(s ...string) FieldPathUpdate { return FieldPathUpdate{Path: s} }
|
||||
|
||||
ctx := context.Background()
|
||||
c, _ := newMock(t)
|
||||
doc := c.Collection("C").Doc("d")
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
data []FieldPathUpdate
|
||||
}{
|
||||
{"no updates", nil},
|
||||
{"empty", []FieldPathUpdate{fpu("")}},
|
||||
{"empty component", []FieldPathUpdate{fpu("*", "")}},
|
||||
{"duplicate field", []FieldPathUpdate{fpu("~"), fpu("*"), fpu("~")}},
|
||||
{"prefix", []FieldPathUpdate{fpu("*", "a"), fpu("b"), fpu("*", "a", "b")}},
|
||||
} {
|
||||
_, err := doc.UpdatePaths(ctx, test.data)
|
||||
if err == nil {
|
||||
t.Errorf("%s: got nil, want error", test.desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyFieldPaths(t *testing.T) {
|
||||
submap := mapval(map[string]*pb.Value{
|
||||
"b": intval(1),
|
||||
"c": intval(2),
|
||||
})
|
||||
fields := map[string]*pb.Value{
|
||||
"a": submap,
|
||||
"d": intval(3),
|
||||
}
|
||||
for _, test := range []struct {
|
||||
fps []FieldPath
|
||||
want map[string]*pb.Value
|
||||
}{
|
||||
{nil, nil},
|
||||
{[]FieldPath{[]string{"z"}}, nil},
|
||||
{[]FieldPath{[]string{"a"}}, map[string]*pb.Value{"a": submap}},
|
||||
{[]FieldPath{[]string{"a", "b", "c"}}, nil},
|
||||
{[]FieldPath{[]string{"d"}}, map[string]*pb.Value{"d": intval(3)}},
|
||||
{
|
||||
[]FieldPath{[]string{"d"}, []string{"a", "c"}},
|
||||
map[string]*pb.Value{
|
||||
"a": mapval(map[string]*pb.Value{"c": intval(2)}),
|
||||
"d": intval(3),
|
||||
},
|
||||
},
|
||||
} {
|
||||
got := applyFieldPaths(fields, test.fps, nil)
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%v:\ngot %v\nwant \n%v", test.fps, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldPathsFromMap(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in map[string]interface{}
|
||||
want []string
|
||||
}{
|
||||
{nil, nil},
|
||||
{map[string]interface{}{"a": 1}, []string{"a"}},
|
||||
{map[string]interface{}{
|
||||
"a": 1,
|
||||
"b": map[string]interface{}{"c": 2},
|
||||
}, []string{"a", "b.c"}},
|
||||
} {
|
||||
fps := fieldPathsFromMap(reflect.ValueOf(test.in), nil)
|
||||
got := toServiceFieldPaths(fps)
|
||||
sort.Strings(got)
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%+v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func commitRequestForSet() *pb.CommitRequest {
|
||||
return &pb.CommitRequest{
|
||||
Database: "projects/projectID/databases/(default)",
|
||||
Writes: []*pb.Write{
|
||||
{
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/d",
|
||||
Fields: testFields,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
266
vendor/cloud.google.com/go/firestore/document.go
generated
vendored
Normal file
266
vendor/cloud.google.com/go/firestore/document.go
generated
vendored
Normal file
@ -0,0 +1,266 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
)
|
||||
|
||||
// A DocumentSnapshot contains document data and metadata.
|
||||
type DocumentSnapshot struct {
|
||||
// The DocumentRef for this document.
|
||||
Ref *DocumentRef
|
||||
|
||||
// Read-only. The time at which the document was created.
|
||||
// Increases monotonically when a document is deleted then
|
||||
// recreated. It can also be compared to values from other documents and
|
||||
// the read time of a query.
|
||||
CreateTime time.Time
|
||||
|
||||
// Read-only. The time at which the document was last changed. This value
|
||||
// is initally set to CreateTime then increases monotonically with each
|
||||
// change to the document. It can also be compared to values from other
|
||||
// documents and the read time of a query.
|
||||
UpdateTime time.Time
|
||||
|
||||
c *Client
|
||||
proto *pb.Document
|
||||
}
|
||||
|
||||
func (d1 *DocumentSnapshot) equal(d2 *DocumentSnapshot) bool {
|
||||
if d1 == nil || d2 == nil {
|
||||
return d1 == d2
|
||||
}
|
||||
return d1.Ref.equal(d2.Ref) &&
|
||||
d1.CreateTime.Equal(d2.CreateTime) &&
|
||||
d1.UpdateTime.Equal(d2.UpdateTime) &&
|
||||
d1.c == d2.c &&
|
||||
proto.Equal(d1.proto, d2.proto)
|
||||
}
|
||||
|
||||
// Data returns the DocumentSnapshot's fields as a map.
|
||||
// It is equivalent to
|
||||
// var m map[string]interface{}
|
||||
// d.DataTo(&m)
|
||||
func (d *DocumentSnapshot) Data() map[string]interface{} {
|
||||
m, err := createMapFromValueMap(d.proto.Fields, d.c)
|
||||
// Any error here is a bug in the client.
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("firestore: %v", err))
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// DataTo uses the document's fields to populate p, which can be a pointer to a
|
||||
// map[string]interface{} or a pointer to a struct.
|
||||
//
|
||||
// Firestore field values are converted to Go values as follows:
|
||||
// - Null converts to nil.
|
||||
// - Bool converts to bool.
|
||||
// - String converts to string.
|
||||
// - Integer converts int64. When setting a struct field, any signed or unsigned
|
||||
// integer type is permitted except uint64. Overflow is detected and results in
|
||||
// an error.
|
||||
// - Double converts to float64. When setting a struct field, float32 is permitted.
|
||||
// Overflow is detected and results in an error.
|
||||
// - Bytes is converted to []byte.
|
||||
// - Timestamp converts to time.Time.
|
||||
// - GeoPoint converts to latlng.LatLng, where latlng is the package
|
||||
// "google.golang.org/genproto/googleapis/type/latlng".
|
||||
// - Arrays convert to []interface{}. When setting a struct field, the field
|
||||
// may be a slice or array of any type and is populated recursively.
|
||||
// Slices are resized to the incoming value's size, while arrays that are too
|
||||
// long have excess elements filled with zero values. If the array is too short,
|
||||
// excess incoming values will be dropped.
|
||||
// - Maps convert to map[string]interface{}. When setting a struct field,
|
||||
// maps of key type string and any value type are permitted, and are populated
|
||||
// recursively.
|
||||
// - References are converted to DocumentRefs.
|
||||
//
|
||||
// Field names given by struct field tags are observed, as described in
|
||||
// DocumentRef.Create.
|
||||
func (d *DocumentSnapshot) DataTo(p interface{}) error {
|
||||
return setFromProtoValue(p, &pb.Value{&pb.Value_MapValue{&pb.MapValue{d.proto.Fields}}}, d.c)
|
||||
}
|
||||
|
||||
// DataAt returns the data value denoted by fieldPath.
|
||||
//
|
||||
// The fieldPath argument can be a single field or a dot-separated sequence of
|
||||
// fields, and must not contain any of the runes "˜*/[]". Use DataAtPath instead for
|
||||
// such a path.
|
||||
//
|
||||
// See DocumentSnapshot.DataTo for how Firestore values are converted to Go values.
|
||||
func (d *DocumentSnapshot) DataAt(fieldPath string) (interface{}, error) {
|
||||
fp, err := parseDotSeparatedString(fieldPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.DataAtPath(fp)
|
||||
}
|
||||
|
||||
// DataAtPath returns the data value denoted by the FieldPath fp.
|
||||
func (d *DocumentSnapshot) DataAtPath(fp FieldPath) (interface{}, error) {
|
||||
v, err := valueAtPath(fp, d.proto.Fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return createFromProtoValue(v, d.c)
|
||||
}
|
||||
|
||||
// valueAtPath returns the value of m referred to by fp.
|
||||
func valueAtPath(fp FieldPath, m map[string]*pb.Value) (*pb.Value, error) {
|
||||
for _, k := range fp[:len(fp)-1] {
|
||||
v := m[k]
|
||||
if v == nil {
|
||||
return nil, fmt.Errorf("firestore: no field %q", k)
|
||||
}
|
||||
mv := v.GetMapValue()
|
||||
if mv == nil {
|
||||
return nil, fmt.Errorf("firestore: value for field %q is not a map", k)
|
||||
}
|
||||
m = mv.Fields
|
||||
}
|
||||
k := fp[len(fp)-1]
|
||||
v := m[k]
|
||||
if v == nil {
|
||||
return nil, fmt.Errorf("firestore: no field %q", k)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// toProtoDocument converts a Go value to a Document proto.
|
||||
// Valid values are: map[string]T, struct, or pointer to a valid value.
|
||||
// It also returns a list of field paths for DocumentTransform (server timestamp).
|
||||
func toProtoDocument(x interface{}) (*pb.Document, []FieldPath, error) {
|
||||
if x == nil {
|
||||
return nil, nil, errors.New("firestore: nil document contents")
|
||||
}
|
||||
v := reflect.ValueOf(x)
|
||||
pv, err := toProtoValue(v)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fieldPaths, err := extractTransformPaths(v, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
m := pv.GetMapValue()
|
||||
if m == nil {
|
||||
return nil, nil, fmt.Errorf("firestore: cannot covert value of type %T into a map", x)
|
||||
}
|
||||
return &pb.Document{Fields: m.Fields}, fieldPaths, nil
|
||||
}
|
||||
|
||||
func extractTransformPaths(v reflect.Value, prefix FieldPath) ([]FieldPath, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
return extractTransformPathsFromMap(v, prefix)
|
||||
case reflect.Struct:
|
||||
return extractTransformPathsFromStruct(v, prefix)
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return nil, nil
|
||||
}
|
||||
return extractTransformPaths(v.Elem(), prefix)
|
||||
case reflect.Interface:
|
||||
if v.NumMethod() == 0 { // empty interface: recurse on its contents
|
||||
return extractTransformPaths(v.Elem(), prefix)
|
||||
}
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func extractTransformPathsFromMap(v reflect.Value, prefix FieldPath) ([]FieldPath, error) {
|
||||
var paths []FieldPath
|
||||
for _, k := range v.MapKeys() {
|
||||
sk := k.Interface().(string) // assume keys are strings; checked in toProtoValue
|
||||
path := prefix.with(sk)
|
||||
mi := v.MapIndex(k)
|
||||
if mi.Interface() == ServerTimestamp {
|
||||
paths = append(paths, path)
|
||||
} else {
|
||||
ps, err := extractTransformPaths(mi, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
paths = append(paths, ps...)
|
||||
}
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func extractTransformPathsFromStruct(v reflect.Value, prefix FieldPath) ([]FieldPath, error) {
|
||||
var paths []FieldPath
|
||||
fields, err := fieldCache.Fields(v.Type())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range fields {
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
path := prefix.with(f.Name)
|
||||
opts := f.ParsedTag.(tagOptions)
|
||||
if opts.serverTimestamp {
|
||||
var isZero bool
|
||||
switch f.Type {
|
||||
case typeOfGoTime:
|
||||
isZero = fv.Interface().(time.Time).IsZero()
|
||||
case reflect.PtrTo(typeOfGoTime):
|
||||
isZero = fv.IsNil() || fv.Elem().Interface().(time.Time).IsZero()
|
||||
default:
|
||||
return nil, fmt.Errorf("firestore: field %s of struct %s with serverTimestamp tag must be of type time.Time or *time.Time",
|
||||
f.Name, v.Type())
|
||||
}
|
||||
if isZero {
|
||||
paths = append(paths, path)
|
||||
}
|
||||
} else {
|
||||
ps, err := extractTransformPaths(fv, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
paths = append(paths, ps...)
|
||||
}
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client) (*DocumentSnapshot, error) {
|
||||
d := &DocumentSnapshot{
|
||||
Ref: ref,
|
||||
c: c,
|
||||
proto: proto,
|
||||
}
|
||||
ts, err := ptypes.Timestamp(proto.CreateTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.CreateTime = ts
|
||||
ts, err = ptypes.Timestamp(proto.UpdateTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.UpdateTime = ts
|
||||
return d, nil
|
||||
}
|
238
vendor/cloud.google.com/go/firestore/document_test.go
generated
vendored
Normal file
238
vendor/cloud.google.com/go/firestore/document_test.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
func TestToProtoDocument(t *testing.T) {
|
||||
type s struct{ I int }
|
||||
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want *pb.Document
|
||||
wantErr bool
|
||||
}{
|
||||
{nil, nil, true},
|
||||
{[]int{1}, nil, true},
|
||||
{map[string]int{"a": 1},
|
||||
&pb.Document{Fields: map[string]*pb.Value{"a": intval(1)}},
|
||||
false},
|
||||
{s{2}, &pb.Document{Fields: map[string]*pb.Value{"I": intval(2)}}, false},
|
||||
{&s{3}, &pb.Document{Fields: map[string]*pb.Value{"I": intval(3)}}, false},
|
||||
} {
|
||||
got, _, gotErr := toProtoDocument(test.in)
|
||||
if (gotErr != nil) != test.wantErr {
|
||||
t.Errorf("%v: got error %v, want %t", test.in, gotErr, test.wantErr)
|
||||
}
|
||||
if gotErr != nil {
|
||||
continue
|
||||
}
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewDocumentSnapshot(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "projID",
|
||||
databaseID: "(database)",
|
||||
}
|
||||
docRef := c.Doc("C/a")
|
||||
in := &pb.Document{
|
||||
CreateTime: &tspb.Timestamp{Seconds: 10},
|
||||
UpdateTime: &tspb.Timestamp{Seconds: 20},
|
||||
Fields: map[string]*pb.Value{"a": intval(1)},
|
||||
}
|
||||
want := &DocumentSnapshot{
|
||||
Ref: docRef,
|
||||
CreateTime: time.Unix(10, 0).UTC(),
|
||||
UpdateTime: time.Unix(20, 0).UTC(),
|
||||
proto: in,
|
||||
c: c,
|
||||
}
|
||||
got, err := newDocumentSnapshot(docRef, in, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got %+v\nwant %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestData(t *testing.T) {
|
||||
doc := &DocumentSnapshot{
|
||||
proto: &pb.Document{
|
||||
Fields: map[string]*pb.Value{"a": intval(1), "b": strval("x")},
|
||||
},
|
||||
}
|
||||
got := doc.Data()
|
||||
want := map[string]interface{}{"a": int64(1), "b": "x"}
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got %#v\nwant %#v", got, want)
|
||||
}
|
||||
var got2 map[string]interface{}
|
||||
if err := doc.DataTo(&got2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(got2, want) {
|
||||
t.Errorf("got %#v\nwant %#v", got2, want)
|
||||
}
|
||||
|
||||
type s struct {
|
||||
A int
|
||||
B string
|
||||
}
|
||||
var got3 s
|
||||
if err := doc.DataTo(&got3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want2 := s{A: 1, B: "x"}
|
||||
if !testEqual(got3, want2) {
|
||||
t.Errorf("got %#v\nwant %#v", got3, want2)
|
||||
}
|
||||
}
|
||||
|
||||
var testDoc = &DocumentSnapshot{
|
||||
proto: &pb.Document{
|
||||
Fields: map[string]*pb.Value{
|
||||
"a": intval(1),
|
||||
"b": mapval(map[string]*pb.Value{
|
||||
"`": intval(2),
|
||||
"~": mapval(map[string]*pb.Value{
|
||||
"x": intval(3),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestDataAt(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
fieldPath string
|
||||
want interface{}
|
||||
}{
|
||||
{"a", int64(1)},
|
||||
{"b.`", int64(2)},
|
||||
} {
|
||||
got, err := testDoc.DataAt(test.fieldPath)
|
||||
if err != nil {
|
||||
t.Errorf("%q: %v", test.fieldPath, err)
|
||||
continue
|
||||
}
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%q: got %v, want %v", test.fieldPath, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
for _, bad := range []string{
|
||||
"c.~.x", // bad field path
|
||||
"a.b", // "a" isn't a map
|
||||
"z.b", // bad non-final key
|
||||
"b.z", // bad final key
|
||||
} {
|
||||
_, err := testDoc.DataAt(bad)
|
||||
if err == nil {
|
||||
t.Errorf("%q: got nil, want error", bad)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataAtPath(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
fieldPath FieldPath
|
||||
want interface{}
|
||||
}{
|
||||
{[]string{"a"}, int64(1)},
|
||||
{[]string{"b", "`"}, int64(2)},
|
||||
{[]string{"b", "~"}, map[string]interface{}{"x": int64(3)}},
|
||||
{[]string{"b", "~", "x"}, int64(3)},
|
||||
} {
|
||||
got, err := testDoc.DataAtPath(test.fieldPath)
|
||||
if err != nil {
|
||||
t.Errorf("%v: %v", test.fieldPath, err)
|
||||
continue
|
||||
}
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.fieldPath, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
for _, bad := range []FieldPath{
|
||||
[]string{"c", "", "x"}, // bad field path
|
||||
[]string{"a", "b"}, // "a" isn't a map
|
||||
[]string{"z", "~"}, // bad non-final key
|
||||
[]string{"b", "z"}, // bad final key
|
||||
} {
|
||||
_, err := testDoc.DataAtPath(bad)
|
||||
if err == nil {
|
||||
t.Errorf("%v: got nil, want error", bad)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractTransformPaths(t *testing.T) {
|
||||
type S struct {
|
||||
A time.Time `firestore:",serverTimestamp"`
|
||||
B time.Time `firestore:",serverTimestamp"`
|
||||
C *time.Time `firestore:",serverTimestamp"`
|
||||
D *time.Time `firestore:"d.d,serverTimestamp"`
|
||||
E *time.Time `firestore:",serverTimestamp"`
|
||||
F time.Time
|
||||
G int
|
||||
}
|
||||
|
||||
m := map[string]interface{}{
|
||||
"x": 1,
|
||||
"y": &S{
|
||||
// A is a zero time: included
|
||||
B: aTime, // not a zero time: excluded
|
||||
// C is nil: included
|
||||
D: &time.Time{}, // pointer to a zero time: included
|
||||
E: &aTime, // pointer to a non-zero time: excluded
|
||||
// F is a zero time, but does not have the right tag: excluded
|
||||
G: 15, // not a time.Time
|
||||
},
|
||||
"z": map[string]interface{}{"w": ServerTimestamp},
|
||||
}
|
||||
got, err := extractTransformPaths(reflect.ValueOf(m), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sort.Sort(byPath(got))
|
||||
want := []FieldPath{{"y", "A"}, {"y", "C"}, {"y", "d.d"}, {"z", "w"}}
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractTransformPathsErrors(t *testing.T) {
|
||||
type S struct {
|
||||
A int `firestore:",serverTimestamp"`
|
||||
}
|
||||
_, err := extractTransformPaths(reflect.ValueOf(S{}), nil)
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
552
vendor/cloud.google.com/go/firestore/examples_test.go
generated
vendored
Normal file
552
vendor/cloud.google.com/go/firestore/examples_test.go
generated
vendored
Normal file
@ -0,0 +1,552 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// TODO(jba): add Output comments to examples when feasible.
|
||||
|
||||
package firestore_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"cloud.google.com/go/firestore"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close() // Close client when done.
|
||||
_ = client // TODO: Use client.
|
||||
}
|
||||
|
||||
func ExampleClient_Collection() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
coll1 := client.Collection("States")
|
||||
coll2 := client.Collection("States/NewYork/Cities")
|
||||
fmt.Println(coll1, coll2)
|
||||
}
|
||||
|
||||
func ExampleClient_Doc() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
doc1 := client.Doc("States/NewYork")
|
||||
doc2 := client.Doc("States/NewYork/Cities/Albany")
|
||||
fmt.Println(doc1, doc2)
|
||||
}
|
||||
|
||||
func ExampleClient_GetAll() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
docs, err := client.GetAll(ctx, []*firestore.DocumentRef{
|
||||
client.Doc("States/NorthCarolina"),
|
||||
client.Doc("States/SouthCarolina"),
|
||||
client.Doc("States/WestCarolina"),
|
||||
client.Doc("States/EastCarolina"),
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// docs is a slice with four DocumentSnapshots, but the last two are
|
||||
// nil because there is no West or East Carolina.
|
||||
fmt.Println(docs)
|
||||
}
|
||||
|
||||
func ExampleClient_Batch() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
b := client.Batch()
|
||||
_ = b // TODO: Use batch.
|
||||
}
|
||||
|
||||
func ExampleWriteBatch_Commit() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
type State struct {
|
||||
Capital string `firestore:"capital"`
|
||||
Population float64 `firestore:"pop"` // in millions
|
||||
}
|
||||
|
||||
ny := client.Doc("States/NewYork")
|
||||
ca := client.Doc("States/California")
|
||||
|
||||
writeResults, err := client.Batch().
|
||||
Create(ny, State{Capital: "Albany", Population: 19.8}).
|
||||
Set(ca, State{Capital: "Sacramento", Population: 39.14}).
|
||||
Delete(client.Doc("States/WestDakota")).
|
||||
Commit(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(writeResults)
|
||||
}
|
||||
|
||||
func ExampleCollectionRef_Add() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
doc, wr, err := client.Collection("Users").Add(ctx, map[string]interface{}{
|
||||
"name": "Alice",
|
||||
"email": "aj@example.com",
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(doc, wr)
|
||||
}
|
||||
|
||||
func ExampleCollectionRef_Doc() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
fl := client.Collection("States").Doc("Florida")
|
||||
ta := client.Collection("States").Doc("Florida/Cities/Tampa")
|
||||
|
||||
fmt.Println(fl, ta)
|
||||
}
|
||||
|
||||
func ExampleCollectionRef_NewDoc() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
doc := client.Collection("Users").NewDoc()
|
||||
|
||||
fmt.Println(doc)
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_Collection() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
mi := client.Collection("States").Doc("Michigan")
|
||||
cities := mi.Collection("Cities")
|
||||
|
||||
fmt.Println(cities)
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_Create_map() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
wr, err := client.Doc("States/Colorado").Create(ctx, map[string]interface{}{
|
||||
"capital": "Denver",
|
||||
"pop": 5.5,
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(wr.UpdateTime)
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_Create_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
type State struct {
|
||||
Capital string `firestore:"capital"`
|
||||
Population float64 `firestore:"pop"` // in millions
|
||||
}
|
||||
|
||||
wr, err := client.Doc("States/Colorado").Create(ctx, State{
|
||||
Capital: "Denver",
|
||||
Population: 5.5,
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(wr.UpdateTime)
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_Set() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Overwrite the document with the given data. Any other fields currently
|
||||
// in the document will be removed.
|
||||
wr, err := client.Doc("States/Alabama").Set(ctx, map[string]interface{}{
|
||||
"capital": "Montgomery",
|
||||
"pop": 4.9,
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(wr.UpdateTime)
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_Set_merge() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Overwrite only the fields in the map; preserve all others.
|
||||
_, err = client.Doc("States/Alabama").Set(ctx, map[string]interface{}{
|
||||
"pop": 5.2,
|
||||
}, firestore.MergeAll)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
type State struct {
|
||||
Capital string `firestore:"capital"`
|
||||
Population float64 `firestore:"pop"` // in millions
|
||||
}
|
||||
|
||||
// To do a merging Set with struct data, specify the exact fields to overwrite.
|
||||
// MergeAll is disallowed here, because it would probably be a mistake: the "capital"
|
||||
// field would be overwritten with the empty string.
|
||||
_, err = client.Doc("States/Alabama").Set(ctx, State{Population: 5.2}, firestore.Merge("pop"))
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_UpdateMap() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
tenn := client.Doc("States/Tennessee")
|
||||
wr, err := tenn.UpdateMap(ctx, map[string]interface{}{"pop": 6.6})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(wr.UpdateTime)
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_UpdateStruct() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
type State struct {
|
||||
Capital string `firestore:"capital"`
|
||||
Population float64 `firestore:"pop"` // in millions
|
||||
}
|
||||
|
||||
tenn := client.Doc("States/Tennessee")
|
||||
wr, err := tenn.UpdateStruct(ctx, []string{"pop"}, State{
|
||||
Capital: "does not matter",
|
||||
Population: 6.6,
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(wr.UpdateTime)
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_UpdatePaths() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
tenn := client.Doc("States/Tennessee")
|
||||
wr, err := tenn.UpdatePaths(ctx, []firestore.FieldPathUpdate{
|
||||
{Path: []string{"pop"}, Value: 6.6},
|
||||
// This odd field path cannot be expressed using the dot-separated form:
|
||||
{Path: []string{".", "*", "/"}, Value: "odd"},
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(wr.UpdateTime)
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Oops, Ontario is a Canadian province...
|
||||
if _, err = client.Doc("States/Ontario").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDocumentRef_Get() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
docsnap, err := client.Doc("States/Ohio").Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = docsnap // TODO: Use DocumentSnapshot.
|
||||
}
|
||||
|
||||
func ExampleDocumentSnapshot_Data() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
docsnap, err := client.Doc("States/Ohio").Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ohioMap := docsnap.Data()
|
||||
fmt.Println(ohioMap["capital"])
|
||||
}
|
||||
|
||||
func ExampleDocumentSnapshot_DataAt() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
docsnap, err := client.Doc("States/Ohio").Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
cap, err := docsnap.DataAt("capital")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(cap)
|
||||
}
|
||||
|
||||
func ExampleDocumentSnapshot_DataAtPath() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
docsnap, err := client.Doc("States/Ohio").Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
pop, err := docsnap.DataAtPath([]string{"capital", "population"})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(pop)
|
||||
}
|
||||
|
||||
func ExampleDocumentSnapshot_DataTo() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
docsnap, err := client.Doc("States/Ohio").Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
type State struct {
|
||||
Capital string `firestore:"capital"`
|
||||
Population float64 `firestore:"pop"` // in millions
|
||||
}
|
||||
|
||||
var s State
|
||||
if err := docsnap.DataTo(&s); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(s)
|
||||
}
|
||||
|
||||
func ExampleQuery_Documents() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
q := client.Collection("States").Select("pop").
|
||||
Where("pop", ">", 10).
|
||||
OrderBy("pop", firestore.Desc).
|
||||
Limit(10)
|
||||
iter1 := q.Documents(ctx)
|
||||
_ = iter1 // TODO: Use iter1.
|
||||
|
||||
// You can call Documents directly on a CollectionRef as well.
|
||||
iter2 := client.Collection("States").Documents(ctx)
|
||||
_ = iter2 // TODO: Use iter2.
|
||||
}
|
||||
|
||||
// This example is just like the one above, but illustrates
|
||||
// how to use the XXXPath methods of Query for field paths
|
||||
// that can't be expressed as a dot-separated string.
|
||||
func ExampleQuery_Documents_path_methods() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
q := client.Collection("Unusual").SelectPaths([]string{"*"}, []string{"[~]"}).
|
||||
WherePath([]string{"/"}, ">", 10).
|
||||
OrderByPath([]string{"/"}, firestore.Desc).
|
||||
Limit(10)
|
||||
iter1 := q.Documents(ctx)
|
||||
_ = iter1 // TODO: Use iter1.
|
||||
|
||||
// You can call Documents directly on a CollectionRef as well.
|
||||
iter2 := client.Collection("States").Documents(ctx)
|
||||
_ = iter2 // TODO: Use iter2.
|
||||
}
|
||||
|
||||
func ExampleDocumentIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
q := client.Collection("States").
|
||||
Where("pop", ">", 10).
|
||||
OrderBy("pop", firestore.Desc)
|
||||
iter := q.Documents(ctx)
|
||||
for {
|
||||
doc, err := iter.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(doc.Data())
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDocumentIterator_GetAll() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
q := client.Collection("States").
|
||||
Where("pop", ">", 10).
|
||||
OrderBy("pop", firestore.Desc).
|
||||
Limit(10) // a good idea with GetAll, to avoid filling memory
|
||||
docs, err := q.Documents(ctx).GetAll()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, doc := range docs {
|
||||
fmt.Println(doc.Data())
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_RunTransaction() {
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
nm := client.Doc("States/NewMexico")
|
||||
err = client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error {
|
||||
doc, err := tx.Get(nm) // tx.Get, NOT nm.Get!
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pop, err := doc.DataAt("pop")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.UpdateMap(nm, map[string]interface{}{
|
||||
"pop": pop.(float64) + 0.2,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
222
vendor/cloud.google.com/go/firestore/fieldpath.go
generated
vendored
Normal file
222
vendor/cloud.google.com/go/firestore/fieldpath.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A FieldPath is a non-empty sequence of non-empty fields that reference a value.
|
||||
//
|
||||
// A FieldPath value should only be necessary if one of the field names contains
|
||||
// one of the runes ".˜*/[]". Most methods accept a simpler form of field path
|
||||
// as a string in which the individual fields are separated by dots.
|
||||
// For example,
|
||||
// []string{"a", "b"}
|
||||
// is equivalent to the string form
|
||||
// "a.b"
|
||||
// but
|
||||
// []string{"*"}
|
||||
// has no equivalent string form.
|
||||
type FieldPath []string
|
||||
|
||||
// parseDotSeparatedString constructs a FieldPath from a string that separates
|
||||
// path components with dots. Other than splitting at dots and checking for invalid
|
||||
// characters, it ignores everything else about the string,
|
||||
// including attempts to quote field path compontents. So "a.`b.c`.d" is parsed into
|
||||
// four parts, "a", "`b", "c`" and "d".
|
||||
func parseDotSeparatedString(s string) (FieldPath, error) {
|
||||
const invalidRunes = "~*/[]"
|
||||
if strings.ContainsAny(s, invalidRunes) {
|
||||
return nil, fmt.Errorf("firestore: %q contains an invalid rune (one of %s)", s, invalidRunes)
|
||||
}
|
||||
fp := FieldPath(strings.Split(s, "."))
|
||||
if err := fp.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fp, nil
|
||||
}
|
||||
|
||||
func parseDotSeparatedStrings(strs []string) ([]FieldPath, error) {
|
||||
var fps []FieldPath
|
||||
for _, s := range strs {
|
||||
fp, err := parseDotSeparatedString(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fps = append(fps, fp)
|
||||
}
|
||||
return fps, nil
|
||||
}
|
||||
|
||||
func (fp1 FieldPath) equal(fp2 FieldPath) bool {
|
||||
if len(fp1) != len(fp2) {
|
||||
return false
|
||||
}
|
||||
for i, c1 := range fp1 {
|
||||
if c1 != fp2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (fp1 FieldPath) prefixOf(fp2 FieldPath) bool {
|
||||
return len(fp1) <= len(fp2) && fp1.equal(fp2[:len(fp1)])
|
||||
}
|
||||
|
||||
// Lexicographic ordering.
|
||||
func (fp1 FieldPath) less(fp2 FieldPath) bool {
|
||||
for i := range fp1 {
|
||||
switch {
|
||||
case i >= len(fp2):
|
||||
return false
|
||||
case fp1[i] < fp2[i]:
|
||||
return true
|
||||
case fp1[i] > fp2[i]:
|
||||
return false
|
||||
}
|
||||
}
|
||||
// fp1 and fp2 are equal up to len(fp1).
|
||||
return len(fp1) < len(fp2)
|
||||
}
|
||||
|
||||
// validate checks the validity of fp and returns an error if it is invalid.
|
||||
func (fp FieldPath) validate() error {
|
||||
if len(fp) == 0 {
|
||||
return errors.New("firestore: empty field path")
|
||||
}
|
||||
for _, c := range fp {
|
||||
if len(c) == 0 {
|
||||
return errors.New("firestore: empty component in field path")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// with creates a new FieldPath consisting of fp followed by k.
|
||||
func (fp FieldPath) with(k string) FieldPath {
|
||||
r := make(FieldPath, len(fp), len(fp)+1)
|
||||
copy(r, fp)
|
||||
return append(r, k)
|
||||
}
|
||||
|
||||
// in reports whether fp is equal to one of the fps.
|
||||
func (fp FieldPath) in(fps []FieldPath) bool {
|
||||
for _, e := range fps {
|
||||
if fp.equal(e) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkNoDupOrPrefix checks whether any FieldPath is a prefix of (or equal to)
|
||||
// another.
|
||||
// It modifies the order of FieldPaths in its argument (via sorting).
|
||||
func checkNoDupOrPrefix(fps []FieldPath) error {
|
||||
// Sort fps lexicographically.
|
||||
sort.Sort(byPath(fps))
|
||||
// Check adjacent pairs for prefix.
|
||||
for i := 1; i < len(fps); i++ {
|
||||
if fps[i-1].prefixOf(fps[i]) {
|
||||
return fmt.Errorf("field path %v cannot be used in the same update as %v", fps[i-1], fps[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type byPath []FieldPath
|
||||
|
||||
func (b byPath) Len() int { return len(b) }
|
||||
func (b byPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byPath) Less(i, j int) bool { return b[i].less(b[j]) }
|
||||
|
||||
// createMapFromFieldPathUpdates uses fpus to construct a valid Firestore data value
|
||||
// in the form of a map. It assumes the FieldPaths in fpus have already been
|
||||
// validated and checked for prefixes. If any field path is associated with the
|
||||
// Delete value, it is not stored in the map.
|
||||
func createMapFromFieldPathUpdates(fpus []FieldPathUpdate) map[string]interface{} {
|
||||
m := map[string]interface{}{}
|
||||
for _, fpu := range fpus {
|
||||
if fpu.Value != Delete {
|
||||
setAtPath(m, fpu.Path, fpu.Value)
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// setAtPath sets val at the location in m specified by fp, creating sub-maps as
|
||||
// needed. m must not be nil. fp is assumed to be valid.
|
||||
func setAtPath(m map[string]interface{}, fp FieldPath, val interface{}) {
|
||||
if len(fp) == 1 {
|
||||
m[fp[0]] = val
|
||||
} else {
|
||||
v, ok := m[fp[0]]
|
||||
if !ok {
|
||||
v = map[string]interface{}{}
|
||||
m[fp[0]] = v
|
||||
}
|
||||
// The type assertion below cannot fail, because setAtPath is only called
|
||||
// with either an empty map or one filled by setAtPath itself, and the
|
||||
// set of FieldPaths it is called with has been checked to make sure that
|
||||
// no path is the prefix of any other.
|
||||
setAtPath(v.(map[string]interface{}), fp[1:], val)
|
||||
}
|
||||
}
|
||||
|
||||
// toServiceFieldPath converts fp the form required by the Firestore service.
|
||||
// It assumes fp has been validated.
|
||||
func (fp FieldPath) toServiceFieldPath() string {
|
||||
cs := make([]string, len(fp))
|
||||
for i, c := range fp {
|
||||
cs[i] = toServiceFieldPathComponent(c)
|
||||
}
|
||||
return strings.Join(cs, ".")
|
||||
}
|
||||
|
||||
func toServiceFieldPaths(fps []FieldPath) []string {
|
||||
var sfps []string
|
||||
for _, fp := range fps {
|
||||
sfps = append(sfps, fp.toServiceFieldPath())
|
||||
}
|
||||
return sfps
|
||||
}
|
||||
|
||||
// Google SQL syntax for an unquoted field.
|
||||
var unquotedFieldRegexp = regexp.MustCompile("^[A-Za-z_][A-Za-z_0-9]*$")
|
||||
|
||||
// toServiceFieldPathComponent returns a string that represents key and is a valid
|
||||
// field path component.
|
||||
func toServiceFieldPathComponent(key string) string {
|
||||
if unquotedFieldRegexp.MatchString(key) {
|
||||
return key
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteRune('`')
|
||||
for _, r := range key {
|
||||
if r == '`' || r == '\\' {
|
||||
buf.WriteRune('\\')
|
||||
}
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
buf.WriteRune('`')
|
||||
return buf.String()
|
||||
}
|
152
vendor/cloud.google.com/go/firestore/fieldpath_test.go
generated
vendored
Normal file
152
vendor/cloud.google.com/go/firestore/fieldpath_test.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFieldPathValidate(t *testing.T) {
|
||||
for _, in := range [][]string{nil, []string{}, []string{"a", "", "b"}} {
|
||||
if err := FieldPath(in).validate(); err == nil {
|
||||
t.Errorf("%v: want error, got nil", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldPathLess(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in1, in2 string
|
||||
want bool
|
||||
}{
|
||||
{"a b", "a b", false},
|
||||
{"a", "b", true},
|
||||
{"b", "a", false},
|
||||
{"a", "a b", true},
|
||||
{"a b", "a", false},
|
||||
{"a b c", "a b d", true},
|
||||
{"a b d", "a b c", false},
|
||||
} {
|
||||
fp1 := FieldPath(strings.Fields(test.in1))
|
||||
fp2 := FieldPath(strings.Fields(test.in2))
|
||||
got := fp1.less(fp2)
|
||||
if got != test.want {
|
||||
t.Errorf("%q.less(%q): got %t, want %t", test.in1, test.in2, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckForPrefix(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []string // field paths as space-separated strings
|
||||
wantErr bool
|
||||
}{
|
||||
{in: []string{"a", "b", "c"}, wantErr: false},
|
||||
{in: []string{"a b", "b", "c d"}, wantErr: false},
|
||||
{in: []string{"a b", "a c", "a d"}, wantErr: false},
|
||||
{in: []string{"a b", "b", "b d"}, wantErr: true},
|
||||
{in: []string{"a b", "b", "b d"}, wantErr: true},
|
||||
{in: []string{"b c d", "c d", "b c"}, wantErr: true},
|
||||
} {
|
||||
var fps []FieldPath
|
||||
for _, s := range test.in {
|
||||
fps = append(fps, strings.Fields(s))
|
||||
}
|
||||
err := checkNoDupOrPrefix(fps)
|
||||
if got, want := (err != nil), test.wantErr; got != want {
|
||||
t.Errorf("%#v: got '%v', want %t", test.in, err, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience function for creating a FieldPathUpdate.
|
||||
func fpu(val int, fields ...string) FieldPathUpdate {
|
||||
return FieldPathUpdate{Path: fields, Value: val}
|
||||
}
|
||||
|
||||
func TestCreateMapFromFieldPathUpdates(t *testing.T) {
|
||||
type M map[string]interface{}
|
||||
|
||||
for _, test := range []struct {
|
||||
in []FieldPathUpdate
|
||||
want M
|
||||
}{
|
||||
{
|
||||
in: nil,
|
||||
want: M{},
|
||||
},
|
||||
{
|
||||
in: []FieldPathUpdate{fpu(1, "a"), fpu(2, "b")},
|
||||
want: M{"a": 1, "b": 2},
|
||||
},
|
||||
{
|
||||
in: []FieldPathUpdate{fpu(1, "a", "b"), fpu(2, "c")},
|
||||
want: M{"a": map[string]interface{}{"b": 1}, "c": 2},
|
||||
},
|
||||
{
|
||||
in: []FieldPathUpdate{fpu(1, "a", "b"), fpu(2, "c", "d")},
|
||||
want: M{
|
||||
"a": map[string]interface{}{"b": 1},
|
||||
"c": map[string]interface{}{"d": 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: []FieldPathUpdate{fpu(1, "a", "b"), fpu(2, "a", "c")},
|
||||
want: M{"a": map[string]interface{}{"b": 1, "c": 2}},
|
||||
},
|
||||
} {
|
||||
gotm := createMapFromFieldPathUpdates(test.in)
|
||||
got := M(gotm)
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%v: got %#v, want %#v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToServiceFieldPath(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in FieldPath
|
||||
want string
|
||||
}{
|
||||
{[]string{"a"}, "a"},
|
||||
{[]string{"a", "b"}, "a.b"},
|
||||
{[]string{"a.", "[b*", "c2"}, "`a.`.`[b*`.c2"},
|
||||
{[]string{"`a", `b\`}, "`\\`a`.`b\\\\`"},
|
||||
} {
|
||||
got := test.in.toServiceFieldPath()
|
||||
if got != test.want {
|
||||
t.Errorf("%v: got %s, want %s", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToServiceFieldPathComponent(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in, want string
|
||||
}{
|
||||
{"", "``"},
|
||||
{"clam_chowder23", "clam_chowder23"},
|
||||
{"23skidoo", "`23skidoo`"},
|
||||
{"bak`tik", "`bak\\`tik`"},
|
||||
{"a\\b", "`a\\\\b`"},
|
||||
{"dots.are.confusing", "`dots.are.confusing`"},
|
||||
} {
|
||||
got := toServiceFieldPathComponent(test.in)
|
||||
if got != test.want {
|
||||
t.Errorf("%q: got %q, want %q", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
400
vendor/cloud.google.com/go/firestore/from_value.go
generated
vendored
Normal file
400
vendor/cloud.google.com/go/firestore/from_value.go
generated
vendored
Normal file
@ -0,0 +1,400 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
)
|
||||
|
||||
func setFromProtoValue(x interface{}, vproto *pb.Value, c *Client) error {
|
||||
v := reflect.ValueOf(x)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return errors.New("firestore: nil or not a pointer")
|
||||
}
|
||||
return setReflectFromProtoValue(v.Elem(), vproto, c)
|
||||
}
|
||||
|
||||
// setReflectFromProtoValue sets v from a Firestore Value.
|
||||
// v must be a settable value.
|
||||
func setReflectFromProtoValue(v reflect.Value, vproto *pb.Value, c *Client) error {
|
||||
typeErr := func() error {
|
||||
return fmt.Errorf("firestore: cannot set type %s to %s", v.Type(), typeString(vproto))
|
||||
}
|
||||
|
||||
val := vproto.ValueType
|
||||
// A Null value sets anything nullable to nil, and has no effect
|
||||
// on anything else.
|
||||
if _, ok := val.(*pb.Value_NullValue); ok {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle special types first.
|
||||
switch v.Type() {
|
||||
case typeOfByteSlice:
|
||||
x, ok := val.(*pb.Value_BytesValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
v.SetBytes(x.BytesValue)
|
||||
return nil
|
||||
|
||||
case typeOfGoTime:
|
||||
x, ok := val.(*pb.Value_TimestampValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
t, err := ptypes.Timestamp(x.TimestampValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(t))
|
||||
return nil
|
||||
|
||||
case typeOfLatLng:
|
||||
x, ok := val.(*pb.Value_GeoPointValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
v.Set(reflect.ValueOf(x.GeoPointValue))
|
||||
return nil
|
||||
|
||||
case typeOfDocumentRef:
|
||||
x, ok := val.(*pb.Value_ReferenceValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
dr, err := pathToDoc(x.ReferenceValue, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(dr))
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
x, ok := val.(*pb.Value_BooleanValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
v.SetBool(x.BooleanValue)
|
||||
|
||||
case reflect.String:
|
||||
x, ok := val.(*pb.Value_StringValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
v.SetString(x.StringValue)
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x, ok := val.(*pb.Value_IntegerValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
i := x.IntegerValue
|
||||
if v.OverflowInt(i) {
|
||||
return overflowErr(v, i)
|
||||
}
|
||||
v.SetInt(i)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
x, ok := val.(*pb.Value_IntegerValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
u := uint64(x.IntegerValue)
|
||||
if v.OverflowUint(u) {
|
||||
return overflowErr(v, u)
|
||||
}
|
||||
v.SetUint(u)
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
x, ok := val.(*pb.Value_DoubleValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
f := x.DoubleValue
|
||||
if v.OverflowFloat(f) {
|
||||
return overflowErr(v, f)
|
||||
}
|
||||
v.SetFloat(f)
|
||||
|
||||
case reflect.Slice:
|
||||
x, ok := val.(*pb.Value_ArrayValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
vals := x.ArrayValue.Values
|
||||
vlen := v.Len()
|
||||
xlen := len(vals)
|
||||
// Make a slice of the right size, avoiding allocation if possible.
|
||||
switch {
|
||||
case vlen < xlen:
|
||||
v.Set(reflect.MakeSlice(v.Type(), xlen, xlen))
|
||||
case vlen > xlen:
|
||||
v.SetLen(xlen)
|
||||
}
|
||||
return populateRepeated(v, vals, xlen, c)
|
||||
|
||||
case reflect.Array:
|
||||
x, ok := val.(*pb.Value_ArrayValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
vals := x.ArrayValue.Values
|
||||
xlen := len(vals)
|
||||
vlen := v.Len()
|
||||
minlen := vlen
|
||||
// Set extra elements to their zero value.
|
||||
if vlen > xlen {
|
||||
z := reflect.Zero(v.Type().Elem())
|
||||
for i := xlen; i < vlen; i++ {
|
||||
v.Index(i).Set(z)
|
||||
}
|
||||
minlen = xlen
|
||||
}
|
||||
return populateRepeated(v, vals, minlen, c)
|
||||
|
||||
case reflect.Map:
|
||||
x, ok := val.(*pb.Value_MapValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
return populateMap(v, x.MapValue.Fields, c)
|
||||
|
||||
case reflect.Ptr:
|
||||
// If the pointer is nil, set it to a zero value.
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return setReflectFromProtoValue(v.Elem(), vproto, c)
|
||||
|
||||
case reflect.Struct:
|
||||
x, ok := val.(*pb.Value_MapValue)
|
||||
if !ok {
|
||||
return typeErr()
|
||||
}
|
||||
return populateStruct(v, x.MapValue.Fields, c)
|
||||
|
||||
case reflect.Interface:
|
||||
if v.NumMethod() == 0 { // empty interface
|
||||
// If v holds a pointer, set the pointer.
|
||||
if !v.IsNil() && v.Elem().Kind() == reflect.Ptr {
|
||||
return setReflectFromProtoValue(v.Elem(), vproto, c)
|
||||
}
|
||||
// Otherwise, create a fresh value.
|
||||
x, err := createFromProtoValue(vproto, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
return nil
|
||||
}
|
||||
// Any other kind of interface is an error.
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
return fmt.Errorf("firestore: cannot set type %s", v.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateRepeated sets the first n elements of vr, which must be a slice or
|
||||
// array, to the corresponding elements of vals.
|
||||
func populateRepeated(vr reflect.Value, vals []*pb.Value, n int, c *Client) error {
|
||||
for i := 0; i < n; i++ {
|
||||
if err := setReflectFromProtoValue(vr.Index(i), vals[i], c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateMap sets the elements of vm, which must be a map, from the
|
||||
// corresponding elements of pm.
|
||||
//
|
||||
// Since a map value is not settable, this function always creates a new
|
||||
// element for each corresponding map key. Existing values of vm are
|
||||
// overwritten. This happens even if the map value is something like a pointer
|
||||
// to a struct, where we could in theory populate the existing struct value
|
||||
// instead of discarding it. This behavior matches encoding/json.
|
||||
func populateMap(vm reflect.Value, pm map[string]*pb.Value, c *Client) error {
|
||||
t := vm.Type()
|
||||
if t.Key().Kind() != reflect.String {
|
||||
return errors.New("firestore: map key type is not string")
|
||||
}
|
||||
if vm.IsNil() {
|
||||
vm.Set(reflect.MakeMap(t))
|
||||
}
|
||||
et := t.Elem()
|
||||
for k, vproto := range pm {
|
||||
el := reflect.New(et).Elem()
|
||||
if err := setReflectFromProtoValue(el, vproto, c); err != nil {
|
||||
return err
|
||||
}
|
||||
vm.SetMapIndex(reflect.ValueOf(k), el)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createMapFromValueMap creates a fresh map and populates it with pm.
|
||||
func createMapFromValueMap(pm map[string]*pb.Value, c *Client) (map[string]interface{}, error) {
|
||||
m := map[string]interface{}{}
|
||||
for k, pv := range pm {
|
||||
v, err := createFromProtoValue(pv, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[k] = v
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// populateStruct sets the fields of vs, which must be a struct, from
|
||||
// the matching elements of pm.
|
||||
func populateStruct(vs reflect.Value, pm map[string]*pb.Value, c *Client) error {
|
||||
fields, err := fieldCache.Fields(vs.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, vproto := range pm {
|
||||
f := fields.Match(k)
|
||||
if f == nil {
|
||||
continue
|
||||
}
|
||||
if err := setReflectFromProtoValue(vs.FieldByIndex(f.Index), vproto, c); err != nil {
|
||||
return fmt.Errorf("%s.%s: %v", vs.Type(), f.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createFromProtoValue(vproto *pb.Value, c *Client) (interface{}, error) {
|
||||
switch v := vproto.ValueType.(type) {
|
||||
case *pb.Value_NullValue:
|
||||
return nil, nil
|
||||
case *pb.Value_BooleanValue:
|
||||
return v.BooleanValue, nil
|
||||
case *pb.Value_IntegerValue:
|
||||
return v.IntegerValue, nil
|
||||
case *pb.Value_DoubleValue:
|
||||
return v.DoubleValue, nil
|
||||
case *pb.Value_TimestampValue:
|
||||
return ptypes.Timestamp(v.TimestampValue)
|
||||
case *pb.Value_StringValue:
|
||||
return v.StringValue, nil
|
||||
case *pb.Value_BytesValue:
|
||||
return v.BytesValue, nil
|
||||
case *pb.Value_ReferenceValue:
|
||||
return pathToDoc(v.ReferenceValue, c)
|
||||
case *pb.Value_GeoPointValue:
|
||||
return v.GeoPointValue, nil
|
||||
|
||||
case *pb.Value_ArrayValue:
|
||||
vals := v.ArrayValue.Values
|
||||
ret := make([]interface{}, len(vals))
|
||||
for i, v := range vals {
|
||||
r, err := createFromProtoValue(v, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret[i] = r
|
||||
}
|
||||
return ret, nil
|
||||
|
||||
case *pb.Value_MapValue:
|
||||
fields := v.MapValue.Fields
|
||||
ret := make(map[string]interface{}, len(fields))
|
||||
for k, v := range fields {
|
||||
r, err := createFromProtoValue(v, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret[k] = r
|
||||
}
|
||||
return ret, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("firestore: unknown value type %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a document path to a DocumentRef.
|
||||
func pathToDoc(docPath string, c *Client) (*DocumentRef, error) {
|
||||
projID, dbID, docIDs, err := parseDocumentPath(docPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentResourceName := fmt.Sprintf("projects/%s/databases/%s", projID, dbID)
|
||||
_, doc := c.idsToRef(docIDs, parentResourceName)
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
// A document path should be of the form "projects/P/databases/D/documents/coll1/doc1/coll2/doc2/...".
|
||||
func parseDocumentPath(path string) (projectID, databaseID string, docPath []string, err error) {
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) < 6 || parts[0] != "projects" || parts[2] != "databases" || parts[4] != "documents" {
|
||||
return "", "", nil, fmt.Errorf("firestore: malformed document path %q", path)
|
||||
}
|
||||
docp := parts[5:]
|
||||
if len(docp)%2 != 0 {
|
||||
return "", "", nil, fmt.Errorf("firestore: path %q refers to collection, not document", path)
|
||||
}
|
||||
return parts[1], parts[3], docp, nil
|
||||
}
|
||||
|
||||
func typeString(vproto *pb.Value) string {
|
||||
switch vproto.ValueType.(type) {
|
||||
case *pb.Value_NullValue:
|
||||
return "null"
|
||||
case *pb.Value_BooleanValue:
|
||||
return "bool"
|
||||
case *pb.Value_IntegerValue:
|
||||
return "int"
|
||||
case *pb.Value_DoubleValue:
|
||||
return "float"
|
||||
case *pb.Value_TimestampValue:
|
||||
return "timestamp"
|
||||
case *pb.Value_StringValue:
|
||||
return "string"
|
||||
case *pb.Value_BytesValue:
|
||||
return "bytes"
|
||||
case *pb.Value_ReferenceValue:
|
||||
return "reference"
|
||||
case *pb.Value_GeoPointValue:
|
||||
return "GeoPoint"
|
||||
case *pb.Value_MapValue:
|
||||
return "map"
|
||||
case *pb.Value_ArrayValue:
|
||||
return "array"
|
||||
default:
|
||||
return "<unknown Value type>"
|
||||
}
|
||||
}
|
||||
|
||||
func overflowErr(v reflect.Value, x interface{}) error {
|
||||
return fmt.Errorf("firestore: value %v overflows type %s", x, v.Type())
|
||||
}
|
541
vendor/cloud.google.com/go/firestore/from_value_test.go
generated
vendored
Normal file
541
vendor/cloud.google.com/go/firestore/from_value_test.go
generated
vendored
Normal file
@ -0,0 +1,541 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"google.golang.org/genproto/googleapis/type/latlng"
|
||||
)
|
||||
|
||||
var (
|
||||
tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC)
|
||||
ll = &latlng.LatLng{Latitude: 20, Longitude: 30}
|
||||
)
|
||||
|
||||
func TestCreateFromProtoValue(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *pb.Value
|
||||
want interface{}
|
||||
}{
|
||||
{in: nullValue, want: nil},
|
||||
{in: boolval(true), want: true},
|
||||
{in: intval(3), want: int64(3)},
|
||||
{in: floatval(1.5), want: 1.5},
|
||||
{in: strval("str"), want: "str"},
|
||||
{in: tsval(tm), want: tm},
|
||||
{
|
||||
in: bytesval([]byte{1, 2}),
|
||||
want: []byte{1, 2},
|
||||
},
|
||||
{
|
||||
in: &pb.Value{&pb.Value_GeoPointValue{ll}},
|
||||
want: ll,
|
||||
},
|
||||
{
|
||||
in: arrayval(intval(1), intval(2)),
|
||||
want: []interface{}{int64(1), int64(2)},
|
||||
},
|
||||
{
|
||||
in: arrayval(),
|
||||
want: []interface{}{},
|
||||
},
|
||||
{
|
||||
in: mapval(map[string]*pb.Value{"a": intval(1), "b": intval(2)}),
|
||||
want: map[string]interface{}{"a": int64(1), "b": int64(2)},
|
||||
},
|
||||
{
|
||||
in: mapval(map[string]*pb.Value{}),
|
||||
want: map[string]interface{}{},
|
||||
},
|
||||
{
|
||||
in: refval("projects/P/databases/D/documents/c/d"),
|
||||
want: &DocumentRef{
|
||||
ID: "d",
|
||||
Parent: &CollectionRef{
|
||||
ID: "c",
|
||||
parentPath: "projects/P/databases/D",
|
||||
Path: "projects/P/databases/D/documents/c",
|
||||
Query: Query{collectionID: "c", parentPath: "projects/P/databases/D"},
|
||||
},
|
||||
Path: "projects/P/databases/D/documents/c/d",
|
||||
},
|
||||
},
|
||||
} {
|
||||
got, err := createFromProtoValue(test.in, nil)
|
||||
if err != nil {
|
||||
t.Errorf("%v: %v", test.in, err)
|
||||
continue
|
||||
}
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%+v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetFromProtoValue(t *testing.T) {
|
||||
testSetFromProtoValue(t, "json", jsonTester{})
|
||||
testSetFromProtoValue(t, "firestore", protoTester{})
|
||||
}
|
||||
|
||||
func testSetFromProtoValue(t *testing.T, prefix string, r tester) {
|
||||
pi := newfloat(7)
|
||||
s := []float64{7, 8}
|
||||
ar1 := [1]float64{7}
|
||||
ar2 := [2]float64{7, 8}
|
||||
ar3 := [3]float64{7, 8, 9}
|
||||
mf := map[string]float64{"a": 7}
|
||||
|
||||
type T struct {
|
||||
I **float64
|
||||
J float64
|
||||
}
|
||||
|
||||
one := newfloat(1)
|
||||
six := newfloat(6)
|
||||
st := []*T{&T{I: &six}, nil, &T{I: &six, J: 7}}
|
||||
vs := interface{}(T{J: 1})
|
||||
vm := interface{}(map[string]float64{"i": 1})
|
||||
var (
|
||||
i int
|
||||
i8 int8
|
||||
i16 int16
|
||||
i32 int32
|
||||
i64 int64
|
||||
u8 uint8
|
||||
u16 uint16
|
||||
u32 uint32
|
||||
b bool
|
||||
ll *latlng.LatLng
|
||||
mi map[string]interface{}
|
||||
ms map[string]T
|
||||
)
|
||||
|
||||
for i, test := range []struct {
|
||||
in interface{}
|
||||
val interface{}
|
||||
want interface{}
|
||||
}{
|
||||
{&pi, r.Null(), (*float64)(nil)},
|
||||
{pi, r.Float(1), 1.0},
|
||||
{&s, r.Null(), ([]float64)(nil)},
|
||||
{&s, r.Array(r.Float(1), r.Float(2)), []float64{1, 2}},
|
||||
{&ar1, r.Array(r.Float(1), r.Float(2)), [1]float64{1}},
|
||||
{&ar2, r.Array(r.Float(1), r.Float(2)), [2]float64{1, 2}},
|
||||
{&ar3, r.Array(r.Float(1), r.Float(2)), [3]float64{1, 2, 0}},
|
||||
{&mf, r.Null(), (map[string]float64)(nil)},
|
||||
{&mf, r.Map("a", r.Float(1), "b", r.Float(2)), map[string]float64{"a": 1, "b": 2}},
|
||||
{&st, r.Array(
|
||||
r.Null(), // overwrites st[0] with nil
|
||||
r.Map("i", r.Float(1)), // sets st[1] to a new struct
|
||||
r.Map("i", r.Float(2)), // modifies st[2]
|
||||
),
|
||||
[]*T{nil, &T{I: &one}, &T{I: &six, J: 7}}},
|
||||
{&mi, r.Map("a", r.Float(1), "b", r.Float(2)), map[string]interface{}{"a": 1.0, "b": 2.0}},
|
||||
{&ms, r.Map("a", r.Map("j", r.Float(1))), map[string]T{"a": T{J: 1}}},
|
||||
{&vs, r.Map("i", r.Float(2)), map[string]interface{}{"i": 2.0}},
|
||||
{&vm, r.Map("i", r.Float(2)), map[string]interface{}{"i": 2.0}},
|
||||
{&ll, r.Null(), (*latlng.LatLng)(nil)},
|
||||
{&i, r.Int(1), int(1)},
|
||||
{&i8, r.Int(1), int8(1)},
|
||||
{&i16, r.Int(1), int16(1)},
|
||||
{&i32, r.Int(1), int32(1)},
|
||||
{&i64, r.Int(1), int64(1)},
|
||||
{&u8, r.Int(1), uint8(1)},
|
||||
{&u16, r.Int(1), uint16(1)},
|
||||
{&u32, r.Int(1), uint32(1)},
|
||||
{&b, r.Bool(true), true},
|
||||
} {
|
||||
if err := r.Set(test.in, test.val); err != nil {
|
||||
t.Errorf("%s: #%d: got error %v", prefix, i, err)
|
||||
continue
|
||||
}
|
||||
got := reflect.ValueOf(test.in).Elem().Interface()
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%s: #%d, %v:\ngot\n%+v (%T)\nwant\n%+v (%T)",
|
||||
prefix, i, test.val, got, got, test.want, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetFromProtoValueNoJSON(t *testing.T) {
|
||||
// Test code paths that we cannot compare to JSON.
|
||||
var (
|
||||
bs []byte
|
||||
tmi time.Time
|
||||
lli *latlng.LatLng
|
||||
)
|
||||
bytes := []byte{1, 2, 3}
|
||||
|
||||
for i, test := range []struct {
|
||||
in interface{}
|
||||
val *pb.Value
|
||||
want interface{}
|
||||
}{
|
||||
{&bs, bytesval(bytes), bytes},
|
||||
{&tmi, tsval(tm), tm},
|
||||
{&lli, geoval(ll), ll},
|
||||
} {
|
||||
if err := setFromProtoValue(test.in, test.val, &Client{}); err != nil {
|
||||
t.Errorf("#%d: got error %v", i, err)
|
||||
continue
|
||||
}
|
||||
got := reflect.ValueOf(test.in).Elem().Interface()
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("#%d, %v:\ngot\n%+v (%T)\nwant\n%+v (%T)",
|
||||
i, test.val, got, got, test.want, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetFromProtoValueErrors(t *testing.T) {
|
||||
c := &Client{}
|
||||
ival := intval(3)
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
val *pb.Value
|
||||
}{
|
||||
{3, ival}, // not a pointer
|
||||
{new(int8), intval(128)}, // int overflow
|
||||
{new(uint8), intval(256)}, // uint overflow
|
||||
{new(float32), floatval(2 * math.MaxFloat32)}, // float overflow
|
||||
{new(uint), ival}, // cannot set type
|
||||
{new(uint64), ival}, // cannot set type
|
||||
{new(io.Reader), ival}, // cannot set type
|
||||
{new(map[int]int),
|
||||
mapval(map[string]*pb.Value{"x": ival})}, // map key type is not string
|
||||
// the rest are all type mismatches
|
||||
{new(bool), ival},
|
||||
{new(*latlng.LatLng), ival},
|
||||
{new(time.Time), ival},
|
||||
{new(string), ival},
|
||||
{new(float64), ival},
|
||||
{new([]byte), ival},
|
||||
{new([]int), ival},
|
||||
{new([1]int), ival},
|
||||
{new(map[string]int), ival},
|
||||
{new(*bool), ival},
|
||||
{new(struct{}), ival},
|
||||
{new(int), floatval(3)},
|
||||
{new(uint16), floatval(3)},
|
||||
} {
|
||||
err := setFromProtoValue(test.in, test.val, c)
|
||||
if err == nil {
|
||||
t.Errorf("%v, %v: got nil, want error", test.in, test.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetFromProtoValuePointers(t *testing.T) {
|
||||
// Verify that pointers are set, instead of being replaced.
|
||||
// Confirm that the behavior matches encoding/json.
|
||||
testSetPointer(t, "json", jsonTester{})
|
||||
testSetPointer(t, "firestore", protoTester{&Client{}})
|
||||
}
|
||||
|
||||
func testSetPointer(t *testing.T, prefix string, r tester) {
|
||||
// If an interface{} holds a pointer, the pointer is set.
|
||||
|
||||
set := func(x, val interface{}) {
|
||||
if err := r.Set(x, val); err != nil {
|
||||
t.Fatalf("%s: set(%v, %v): %v", prefix, x, val, err)
|
||||
}
|
||||
}
|
||||
p := new(float64)
|
||||
var st struct {
|
||||
I interface{}
|
||||
}
|
||||
|
||||
// A pointer in a slice of interface{} is set.
|
||||
s := []interface{}{p}
|
||||
set(&s, r.Array(r.Float(1)))
|
||||
if s[0] != p {
|
||||
t.Errorf("%s: pointers not identical", prefix)
|
||||
}
|
||||
if *p != 1 {
|
||||
t.Errorf("%s: got %f, want 1", prefix, *p)
|
||||
}
|
||||
// Setting a null will set the pointer to nil.
|
||||
set(&s, r.Array(r.Null()))
|
||||
if got := s[0]; got != nil {
|
||||
t.Errorf("%s: got %v, want null", prefix, got)
|
||||
}
|
||||
|
||||
// It doesn't matter how deep the pointers nest.
|
||||
p = new(float64)
|
||||
p2 := &p
|
||||
p3 := &p2
|
||||
s = []interface{}{p3}
|
||||
set(&s, r.Array(r.Float(1)))
|
||||
if s[0] != p3 {
|
||||
t.Errorf("%s: pointers not identical", prefix)
|
||||
}
|
||||
if *p != 1 {
|
||||
t.Errorf("%s: got %f, want 1", prefix, *p)
|
||||
}
|
||||
|
||||
// A pointer in an interface{} field is set.
|
||||
p = new(float64)
|
||||
st.I = p
|
||||
set(&st, r.Map("i", r.Float(1)))
|
||||
if st.I != p {
|
||||
t.Errorf("%s: pointers not identical", prefix)
|
||||
}
|
||||
if *p != 1 {
|
||||
t.Errorf("%s: got %f, want 1", prefix, *p)
|
||||
}
|
||||
// Setting a null will set the pointer to nil.
|
||||
set(&st, r.Map("i", r.Null()))
|
||||
if got := st.I; got != nil {
|
||||
t.Errorf("%s: got %v, want null", prefix, got)
|
||||
}
|
||||
|
||||
// A pointer to a slice (instead of to float64) is set.
|
||||
psi := &[]float64{7, 8, 9}
|
||||
st.I = psi
|
||||
set(&st, r.Map("i", r.Array(r.Float(1))))
|
||||
if st.I != psi {
|
||||
t.Errorf("%s: pointers not identical", prefix)
|
||||
}
|
||||
// The slice itself should be truncated and filled, not replaced.
|
||||
if got, want := cap(*psi), 3; got != want {
|
||||
t.Errorf("cap: got %d, want %d", got, want)
|
||||
}
|
||||
if want := &[]float64{1}; !testEqual(st.I, want) {
|
||||
t.Errorf("got %+v, want %+v", st.I, want)
|
||||
}
|
||||
|
||||
// A pointer to a map is set.
|
||||
pmf := &map[string]float64{"a": 7, "b": 8}
|
||||
st.I = pmf
|
||||
set(&st, r.Map("i", r.Map("a", r.Float(1))))
|
||||
if st.I != pmf {
|
||||
t.Errorf("%s: pointers not identical", prefix)
|
||||
}
|
||||
if want := map[string]float64{"a": 1, "b": 8}; !testEqual(*pmf, want) {
|
||||
t.Errorf("%s: got %+v, want %+v", prefix, *pmf, want)
|
||||
}
|
||||
|
||||
// Maps are different: since the map values aren't addressable, they
|
||||
// are always discarded, even if the map element type is not interface{}.
|
||||
|
||||
// A map's values are discarded if the value type is a pointer type.
|
||||
p = new(float64)
|
||||
m := map[string]*float64{"i": p}
|
||||
set(&m, r.Map("i", r.Float(1)))
|
||||
if m["i"] == p {
|
||||
t.Errorf("%s: pointers are identical", prefix)
|
||||
}
|
||||
if got, want := *m["i"], 1.0; got != want {
|
||||
t.Errorf("%s: got %v, want %v", prefix, got, want)
|
||||
}
|
||||
// A map's values are discarded if the value type is interface{}.
|
||||
p = new(float64)
|
||||
m2 := map[string]interface{}{"i": p}
|
||||
set(&m2, r.Map("i", r.Float(1)))
|
||||
if m2["i"] == p {
|
||||
t.Errorf("%s: pointers are identical", prefix)
|
||||
}
|
||||
if got, want := m2["i"].(float64), 1.0; got != want {
|
||||
t.Errorf("%s: got %f, want %f", prefix, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// An interface for setting and building values, to facilitate comparing firestore deserialization
|
||||
// with encoding/json.
|
||||
type tester interface {
|
||||
Set(x, val interface{}) error
|
||||
Null() interface{}
|
||||
Int(int) interface{}
|
||||
Float(float64) interface{}
|
||||
Bool(bool) interface{}
|
||||
Array(...interface{}) interface{}
|
||||
Map(keysvals ...interface{}) interface{}
|
||||
}
|
||||
|
||||
type protoTester struct {
|
||||
c *Client
|
||||
}
|
||||
|
||||
func (p protoTester) Set(x, val interface{}) error { return setFromProtoValue(x, val.(*pb.Value), p.c) }
|
||||
func (protoTester) Null() interface{} { return nullValue }
|
||||
func (protoTester) Int(i int) interface{} { return intval(i) }
|
||||
func (protoTester) Float(f float64) interface{} { return floatval(f) }
|
||||
func (protoTester) Bool(b bool) interface{} { return boolval(b) }
|
||||
|
||||
func (protoTester) Array(els ...interface{}) interface{} {
|
||||
var s []*pb.Value
|
||||
for _, el := range els {
|
||||
s = append(s, el.(*pb.Value))
|
||||
}
|
||||
return arrayval(s...)
|
||||
}
|
||||
|
||||
func (protoTester) Map(keysvals ...interface{}) interface{} {
|
||||
m := map[string]*pb.Value{}
|
||||
for i := 0; i < len(keysvals); i += 2 {
|
||||
m[keysvals[i].(string)] = keysvals[i+1].(*pb.Value)
|
||||
}
|
||||
return mapval(m)
|
||||
}
|
||||
|
||||
type jsonTester struct{}
|
||||
|
||||
func (jsonTester) Set(x, val interface{}) error { return json.Unmarshal([]byte(val.(string)), x) }
|
||||
func (jsonTester) Null() interface{} { return "null" }
|
||||
func (jsonTester) Int(i int) interface{} { return fmt.Sprint(i) }
|
||||
func (jsonTester) Float(f float64) interface{} { return fmt.Sprint(f) }
|
||||
|
||||
func (jsonTester) Bool(b bool) interface{} {
|
||||
if b {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (jsonTester) Array(els ...interface{}) interface{} {
|
||||
var s []string
|
||||
for _, el := range els {
|
||||
s = append(s, el.(string))
|
||||
}
|
||||
return "[" + strings.Join(s, ", ") + "]"
|
||||
}
|
||||
|
||||
func (jsonTester) Map(keysvals ...interface{}) interface{} {
|
||||
var s []string
|
||||
for i := 0; i < len(keysvals); i += 2 {
|
||||
s = append(s, fmt.Sprintf("%q: %v", keysvals[i], keysvals[i+1]))
|
||||
}
|
||||
return "{" + strings.Join(s, ", ") + "}"
|
||||
}
|
||||
|
||||
func newfloat(f float64) *float64 {
|
||||
p := new(float64)
|
||||
*p = f
|
||||
return p
|
||||
}
|
||||
|
||||
func TestParseDocumentPath(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
pid, dbid string
|
||||
dpath []string
|
||||
}{
|
||||
{"projects/foo-bar/databases/db2/documents/c1/d1",
|
||||
"foo-bar", "db2", []string{"c1", "d1"}},
|
||||
{"projects/P/databases/D/documents/c1/d1/c2/d2",
|
||||
"P", "D", []string{"c1", "d1", "c2", "d2"}},
|
||||
} {
|
||||
gotPid, gotDbid, gotDpath, err := parseDocumentPath(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := gotPid, test.pid; got != want {
|
||||
t.Errorf("project ID: got %q, want %q", got, want)
|
||||
}
|
||||
if got, want := gotDbid, test.dbid; got != want {
|
||||
t.Errorf("db ID: got %q, want %q", got, want)
|
||||
}
|
||||
if got, want := gotDpath, test.dpath; !testEqual(got, want) {
|
||||
t.Errorf("doc path: got %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseDocumentPathErrors(t *testing.T) {
|
||||
for _, badPath := range []string{
|
||||
"projects/P/databases/D/documents/c", // collection path
|
||||
"/projects/P/databases/D/documents/c/d", // initial slash
|
||||
"projects/P/databases/D/c/d", // missing "documents"
|
||||
"project/P/database/D/document/c/d",
|
||||
} {
|
||||
// Every prefix of a bad path is also bad.
|
||||
for i := 0; i <= len(badPath); i++ {
|
||||
in := badPath[:i]
|
||||
_, _, _, err := parseDocumentPath(in)
|
||||
if err == nil {
|
||||
t.Errorf("%q: got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathToDoc(t *testing.T) {
|
||||
c := &Client{}
|
||||
path := "projects/P/databases/D/documents/c1/d1/c2/d2"
|
||||
got, err := pathToDoc(path, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := &DocumentRef{
|
||||
ID: "d2",
|
||||
Path: "projects/P/databases/D/documents/c1/d1/c2/d2",
|
||||
Parent: &CollectionRef{
|
||||
ID: "c2",
|
||||
parentPath: "projects/P/databases/D/documents/c1/d1",
|
||||
Path: "projects/P/databases/D/documents/c1/d1/c2",
|
||||
c: c,
|
||||
Query: Query{c: c, collectionID: "c2", parentPath: "projects/P/databases/D/documents/c1/d1"},
|
||||
Parent: &DocumentRef{
|
||||
ID: "d1",
|
||||
Path: "projects/P/databases/D/documents/c1/d1",
|
||||
Parent: &CollectionRef{
|
||||
ID: "c1",
|
||||
c: c,
|
||||
parentPath: "projects/P/databases/D",
|
||||
Path: "projects/P/databases/D/documents/c1",
|
||||
Parent: nil,
|
||||
Query: Query{c: c, collectionID: "c1", parentPath: "projects/P/databases/D"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("\ngot %+v\nwant %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *pb.Value
|
||||
want string
|
||||
}{
|
||||
{nullValue, "null"},
|
||||
{intval(1), "int"},
|
||||
{floatval(1), "float"},
|
||||
{boolval(true), "bool"},
|
||||
{strval(""), "string"},
|
||||
{tsval(tm), "timestamp"},
|
||||
{geoval(ll), "GeoPoint"},
|
||||
{bytesval(nil), "bytes"},
|
||||
{refval(""), "reference"},
|
||||
{arrayval(nil), "array"},
|
||||
{mapval(nil), "map"},
|
||||
} {
|
||||
got := typeString(test.in)
|
||||
if got != test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
954
vendor/cloud.google.com/go/firestore/integration_test.go
generated
vendored
Normal file
954
vendor/cloud.google.com/go/firestore/integration_test.go
generated
vendored
Normal file
@ -0,0 +1,954 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/genproto/googleapis/type/latlng"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
initIntegrationTest()
|
||||
status := m.Run()
|
||||
cleanupIntegrationTest()
|
||||
os.Exit(status)
|
||||
}
|
||||
|
||||
const (
|
||||
envProjID = "GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID"
|
||||
envPrivateKey = "GCLOUD_TESTS_GOLANG_FIRESTORE_KEY"
|
||||
)
|
||||
|
||||
var (
|
||||
iClient *Client
|
||||
iColl *CollectionRef
|
||||
collectionIDs = testutil.NewUIDSpace("go-integration-test")
|
||||
)
|
||||
|
||||
func initIntegrationTest() {
|
||||
flag.Parse() // needed for testing.Short()
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
testProjectID := os.Getenv(envProjID)
|
||||
if testProjectID == "" {
|
||||
log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
return
|
||||
}
|
||||
ts := testutil.TokenSourceEnv(ctx, envPrivateKey,
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/datastore")
|
||||
if ts == nil {
|
||||
log.Fatal("The project key must be set. See CONTRIBUTING.md for details")
|
||||
}
|
||||
ti := &testInterceptor{dbPath: "projects/" + testProjectID + "/databases/(default)"}
|
||||
c, err := NewClient(ctx, testProjectID,
|
||||
option.WithTokenSource(ts),
|
||||
option.WithGRPCDialOption(grpc.WithUnaryInterceptor(ti.interceptUnary)),
|
||||
option.WithGRPCDialOption(grpc.WithStreamInterceptor(ti.interceptStream)),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
iClient = c
|
||||
iColl = c.Collection(collectionIDs.New())
|
||||
refDoc := iColl.NewDoc()
|
||||
integrationTestMap["ref"] = refDoc
|
||||
wantIntegrationTestMap["ref"] = refDoc
|
||||
integrationTestStruct.Ref = refDoc
|
||||
}
|
||||
|
||||
type testInterceptor struct {
|
||||
dbPath string
|
||||
}
|
||||
|
||||
func (ti *testInterceptor) interceptUnary(ctx context.Context, method string, req, res interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
ti.checkMetadata(ctx, method)
|
||||
return invoker(ctx, method, req, res, cc, opts...)
|
||||
}
|
||||
|
||||
func (ti *testInterceptor) interceptStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
ti.checkMetadata(ctx, method)
|
||||
return streamer(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
|
||||
func (ti *testInterceptor) checkMetadata(ctx context.Context, method string) {
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
if !ok {
|
||||
log.Fatalf("method %s: bad metadata", method)
|
||||
}
|
||||
for _, h := range []string{"google-cloud-resource-prefix", "x-goog-api-client"} {
|
||||
v, ok := md[h]
|
||||
if !ok {
|
||||
log.Fatalf("method %s, header %s missing", method, h)
|
||||
}
|
||||
if len(v) != 1 {
|
||||
log.Fatalf("method %s, header %s: bad value %v", method, h, v)
|
||||
}
|
||||
}
|
||||
v := md["google-cloud-resource-prefix"][0]
|
||||
if v != ti.dbPath {
|
||||
log.Fatalf("method %s: bad resource prefix header: %q", method, v)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanupIntegrationTest() {
|
||||
if iClient == nil {
|
||||
return
|
||||
}
|
||||
// TODO(jba): delete everything in integrationColl.
|
||||
iClient.Close()
|
||||
}
|
||||
|
||||
// integrationClient should be called by integration tests to get a valid client. It will never
|
||||
// return nil. If integrationClient returns, an integration test can proceed without
|
||||
// further checks.
|
||||
func integrationClient(t *testing.T) *Client {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
if iClient == nil {
|
||||
t.SkipNow() // log message printed in initIntegrationTest
|
||||
}
|
||||
return iClient
|
||||
}
|
||||
|
||||
func integrationColl(t *testing.T) *CollectionRef {
|
||||
_ = integrationClient(t)
|
||||
return iColl
|
||||
}
|
||||
|
||||
type integrationTestStructType struct {
|
||||
Int int
|
||||
Str string
|
||||
Bool bool
|
||||
Float float32
|
||||
Null interface{}
|
||||
Bytes []byte
|
||||
Time time.Time
|
||||
Geo, NilGeo *latlng.LatLng
|
||||
Ref *DocumentRef
|
||||
}
|
||||
|
||||
var (
|
||||
integrationTime = time.Date(2017, 3, 20, 1, 2, 3, 456789, time.UTC)
|
||||
// Firestore times are accurate only to microseconds.
|
||||
wantIntegrationTime = time.Date(2017, 3, 20, 1, 2, 3, 456000, time.UTC)
|
||||
|
||||
integrationGeo = &latlng.LatLng{Latitude: 30, Longitude: 70}
|
||||
|
||||
// Use this when writing a doc.
|
||||
integrationTestMap = map[string]interface{}{
|
||||
"int": 1,
|
||||
"str": "two",
|
||||
"bool": true,
|
||||
"float": 3.14,
|
||||
"null": nil,
|
||||
"bytes": []byte("bytes"),
|
||||
"*": map[string]interface{}{"`": 4},
|
||||
"time": integrationTime,
|
||||
"geo": integrationGeo,
|
||||
"ref": nil, // populated by initIntegrationTest
|
||||
}
|
||||
|
||||
// The returned data is slightly different.
|
||||
wantIntegrationTestMap = map[string]interface{}{
|
||||
"int": int64(1),
|
||||
"str": "two",
|
||||
"bool": true,
|
||||
"float": 3.14,
|
||||
"null": nil,
|
||||
"bytes": []byte("bytes"),
|
||||
"*": map[string]interface{}{"`": int64(4)},
|
||||
"time": wantIntegrationTime,
|
||||
"geo": integrationGeo,
|
||||
"ref": nil, // populated by initIntegrationTest
|
||||
}
|
||||
|
||||
integrationTestStruct = integrationTestStructType{
|
||||
Int: 1,
|
||||
Str: "two",
|
||||
Bool: true,
|
||||
Float: 3.14,
|
||||
Null: nil,
|
||||
Bytes: []byte("bytes"),
|
||||
Time: integrationTime,
|
||||
Geo: integrationGeo,
|
||||
NilGeo: nil,
|
||||
Ref: nil, // populated by initIntegrationTest
|
||||
}
|
||||
)
|
||||
|
||||
func TestIntegration_Create(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
start := time.Now()
|
||||
wr := mustCreate("Create #1", t, doc, integrationTestMap)
|
||||
end := time.Now()
|
||||
checkTimeBetween(t, wr.UpdateTime, start, end)
|
||||
_, err := doc.Create(ctx, integrationTestMap)
|
||||
codeEq(t, "Create on a present doc", codes.AlreadyExists, err)
|
||||
}
|
||||
|
||||
func TestIntegration_Get(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
mustCreate("Get #1", t, doc, integrationTestMap)
|
||||
ds, err := doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ds.CreateTime != ds.UpdateTime {
|
||||
t.Errorf("create time %s != update time %s", ds.CreateTime, ds.UpdateTime)
|
||||
}
|
||||
got := ds.Data()
|
||||
if want := wantIntegrationTestMap; !testEqual(got, want) {
|
||||
t.Errorf("got\n%v\nwant\n%v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
|
||||
//
|
||||
_, err = integrationColl(t).NewDoc().Get(ctx)
|
||||
codeEq(t, "Get on a missing doc", codes.NotFound, err)
|
||||
}
|
||||
|
||||
func TestIntegration_GetAll(t *testing.T) {
|
||||
type getAll struct{ N int }
|
||||
|
||||
coll := integrationColl(t)
|
||||
ctx := context.Background()
|
||||
var docRefs []*DocumentRef
|
||||
for i := 0; i < 5; i++ {
|
||||
doc := coll.NewDoc()
|
||||
docRefs = append(docRefs, doc)
|
||||
mustCreate("GetAll #1", t, doc, getAll{N: i})
|
||||
}
|
||||
docSnapshots, err := iClient.GetAll(ctx, docRefs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := len(docSnapshots), len(docRefs); got != want {
|
||||
t.Fatalf("got %d snapshots, want %d", got, want)
|
||||
}
|
||||
for i, ds := range docSnapshots {
|
||||
var got getAll
|
||||
if err := ds.DataTo(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := getAll{N: i}
|
||||
if got != want {
|
||||
t.Errorf("%d: got %+v, want %+v", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Add(t *testing.T) {
|
||||
start := time.Now()
|
||||
_, wr, err := integrationColl(t).Add(context.Background(), integrationTestMap)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
end := time.Now()
|
||||
checkTimeBetween(t, wr.UpdateTime, start, end)
|
||||
}
|
||||
|
||||
func TestIntegration_Set(t *testing.T) {
|
||||
coll := integrationColl(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// Set Should be able to create a new doc.
|
||||
doc := coll.NewDoc()
|
||||
wr1, err := doc.Set(ctx, integrationTestMap)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Calling Set on the doc completely replaces the contents.
|
||||
// The update time should increase.
|
||||
newData := map[string]interface{}{
|
||||
"str": "change",
|
||||
"x": "1",
|
||||
}
|
||||
wr2, err := doc.Set(ctx, newData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !wr1.UpdateTime.Before(wr2.UpdateTime) {
|
||||
t.Errorf("update time did not increase: old=%s, new=%s", wr1.UpdateTime, wr2.UpdateTime)
|
||||
}
|
||||
ds, err := doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := ds.Data()
|
||||
if !testEqual(got, newData) {
|
||||
t.Errorf("got %v, want %v", got, newData)
|
||||
}
|
||||
|
||||
newData = map[string]interface{}{
|
||||
"str": "1",
|
||||
"x": "2",
|
||||
"y": "3",
|
||||
}
|
||||
// SetOptions:
|
||||
// Only fields mentioned in the Merge option will be changed.
|
||||
// In this case, "str" will not be changed to "1".
|
||||
wr3, err := doc.Set(ctx, newData, Merge("x", "y"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ds, err = doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got = ds.Data()
|
||||
want := map[string]interface{}{
|
||||
"str": "change",
|
||||
"x": "2",
|
||||
"y": "3",
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
if !wr2.UpdateTime.Before(wr3.UpdateTime) {
|
||||
t.Errorf("update time did not increase: old=%s, new=%s", wr2.UpdateTime, wr3.UpdateTime)
|
||||
}
|
||||
|
||||
// Another way to change only x and y is to pass a map with only
|
||||
// those keys, and use MergeAll.
|
||||
wr4, err := doc.Set(ctx, map[string]interface{}{"x": "4", "y": "5"}, MergeAll)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ds, err = doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got = ds.Data()
|
||||
want = map[string]interface{}{
|
||||
"str": "change",
|
||||
"x": "4",
|
||||
"y": "5",
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
if !wr3.UpdateTime.Before(wr4.UpdateTime) {
|
||||
t.Errorf("update time did not increase: old=%s, new=%s", wr3.UpdateTime, wr4.UpdateTime)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Delete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
mustCreate("Delete #1", t, doc, integrationTestMap)
|
||||
wr, err := doc.Delete(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Confirm that doc doesn't exist.
|
||||
if _, err := doc.Get(ctx); grpc.Code(err) != codes.NotFound {
|
||||
t.Fatalf("got error <%v>, want NotFound", err)
|
||||
}
|
||||
|
||||
er := func(_ *WriteResult, err error) error { return err }
|
||||
|
||||
codeEq(t, "Delete on a missing doc", codes.OK,
|
||||
er(doc.Delete(ctx)))
|
||||
// TODO(jba): confirm that the server should return InvalidArgument instead of
|
||||
// FailedPrecondition.
|
||||
wr = mustCreate("Delete #2", t, doc, integrationTestMap)
|
||||
codeEq(t, "Delete with wrong LastUpdateTime", codes.FailedPrecondition,
|
||||
er(doc.Delete(ctx, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond)))))
|
||||
codeEq(t, "Delete with right LastUpdateTime", codes.OK,
|
||||
er(doc.Delete(ctx, LastUpdateTime(wr.UpdateTime))))
|
||||
}
|
||||
|
||||
func TestIntegration_UpdateMap(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
mustCreate("UpdateMap", t, doc, integrationTestMap)
|
||||
um := map[string]interface{}{
|
||||
"bool": false,
|
||||
"time": 17,
|
||||
"null": Delete,
|
||||
"noSuchField": Delete, // deleting a non-existent field is a no-op
|
||||
}
|
||||
wr, err := doc.UpdateMap(ctx, um)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ds, err := doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := ds.Data()
|
||||
want := copyMap(wantIntegrationTestMap)
|
||||
want["bool"] = false
|
||||
want["time"] = int64(17)
|
||||
delete(want, "null")
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got\n%#v\nwant\n%#v", got, want)
|
||||
}
|
||||
|
||||
er := func(_ *WriteResult, err error) error { return err }
|
||||
codeEq(t, "UpdateMap on missing doc", codes.NotFound,
|
||||
er(integrationColl(t).NewDoc().UpdateMap(ctx, um)))
|
||||
codeEq(t, "UpdateMap with wrong LastUpdateTime", codes.FailedPrecondition,
|
||||
er(doc.UpdateMap(ctx, um, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond)))))
|
||||
codeEq(t, "UpdateMap with right LastUpdateTime", codes.OK,
|
||||
er(doc.UpdateMap(ctx, um, LastUpdateTime(wr.UpdateTime))))
|
||||
}
|
||||
|
||||
func TestIntegration_UpdateStruct(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
mustCreate("UpdateStruct", t, doc, integrationTestStruct)
|
||||
fields := []string{"Bool", "Time", "Null", "noSuchField"}
|
||||
wr, err := doc.UpdateStruct(ctx, fields,
|
||||
integrationTestStructType{
|
||||
Bool: false,
|
||||
Time: aTime2,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ds, err := doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var got integrationTestStructType
|
||||
if err := ds.DataTo(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := integrationTestStruct
|
||||
want.Bool = false
|
||||
want.Time = aTime2
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got\n%#v\nwant\n%#v", got, want)
|
||||
}
|
||||
|
||||
er := func(_ *WriteResult, err error) error { return err }
|
||||
codeEq(t, "UpdateStruct on missing doc", codes.NotFound,
|
||||
er(integrationColl(t).NewDoc().UpdateStruct(ctx, fields, integrationTestStruct)))
|
||||
codeEq(t, "UpdateStruct with wrong LastUpdateTime", codes.FailedPrecondition,
|
||||
er(doc.UpdateStruct(ctx, fields, integrationTestStruct, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond)))))
|
||||
codeEq(t, "UpdateStruct with right LastUpdateTime", codes.OK,
|
||||
er(doc.UpdateStruct(ctx, fields, integrationTestStruct, LastUpdateTime(wr.UpdateTime))))
|
||||
}
|
||||
|
||||
func TestIntegration_UpdatePaths(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
mustCreate("UpdatePaths", t, doc, integrationTestMap)
|
||||
fpus := []FieldPathUpdate{
|
||||
{Path: []string{"bool"}, Value: false},
|
||||
{Path: []string{"time"}, Value: 17},
|
||||
{Path: []string{"*", "`"}, Value: 18},
|
||||
{Path: []string{"null"}, Value: Delete},
|
||||
{Path: []string{"noSuchField"}, Value: Delete}, // deleting a non-existent field is a no-op
|
||||
}
|
||||
wr, err := doc.UpdatePaths(ctx, fpus)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ds, err := doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := ds.Data()
|
||||
want := copyMap(wantIntegrationTestMap)
|
||||
want["bool"] = false
|
||||
want["time"] = int64(17)
|
||||
want["*"] = map[string]interface{}{"`": int64(18)}
|
||||
delete(want, "null")
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got\n%#v\nwant\n%#v", got, want)
|
||||
}
|
||||
|
||||
er := func(_ *WriteResult, err error) error { return err }
|
||||
|
||||
codeEq(t, "UpdatePaths on missing doc", codes.NotFound,
|
||||
er(integrationColl(t).NewDoc().UpdatePaths(ctx, fpus)))
|
||||
codeEq(t, "UpdatePaths with wrong LastUpdateTime", codes.FailedPrecondition,
|
||||
er(doc.UpdatePaths(ctx, fpus, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond)))))
|
||||
codeEq(t, "UpdatePaths with right LastUpdateTime", codes.OK,
|
||||
er(doc.UpdatePaths(ctx, fpus, LastUpdateTime(wr.UpdateTime))))
|
||||
}
|
||||
|
||||
func TestIntegration_Collections(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := integrationClient(t)
|
||||
got, err := c.Collections(ctx).GetAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// There should be at least one collection.
|
||||
if len(got) == 0 {
|
||||
t.Error("got 0 top-level collections, want at least one")
|
||||
}
|
||||
|
||||
doc := integrationColl(t).NewDoc()
|
||||
got, err = doc.Collections(ctx).GetAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(got) != 0 {
|
||||
t.Errorf("got %d collections, want 0", len(got))
|
||||
}
|
||||
var want []*CollectionRef
|
||||
for i := 0; i < 3; i++ {
|
||||
id := collectionIDs.New()
|
||||
cr := doc.Collection(id)
|
||||
want = append(want, cr)
|
||||
mustCreate("Collections", t, cr.NewDoc(), integrationTestMap)
|
||||
}
|
||||
got, err = doc.Collections(ctx).GetAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got\n%#v\nwant\n%#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ServerTimestamp(t *testing.T) {
|
||||
type S struct {
|
||||
A int
|
||||
B time.Time
|
||||
C time.Time `firestore:"C.C,serverTimestamp"`
|
||||
D map[string]interface{}
|
||||
E time.Time `firestore:",omitempty,serverTimestamp"`
|
||||
}
|
||||
data := S{
|
||||
A: 1,
|
||||
B: aTime,
|
||||
// C is unset, so will get the server timestamp.
|
||||
D: map[string]interface{}{"x": ServerTimestamp},
|
||||
// E is unset, so will get the server timestamp.
|
||||
}
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
// Bound times of the RPC, with some slack for clock skew.
|
||||
start := time.Now()
|
||||
mustCreate("ServerTimestamp", t, doc, data)
|
||||
end := time.Now()
|
||||
ds, err := doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var got S
|
||||
if err := ds.DataTo(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(got.B, aTime) {
|
||||
t.Errorf("B: got %s, want %s", got.B, aTime)
|
||||
}
|
||||
checkTimeBetween(t, got.C, start, end)
|
||||
if g, w := got.D["x"], got.C; !testEqual(g, w) {
|
||||
t.Errorf(`D["x"] = %s, want equal to C (%s)`, g, w)
|
||||
}
|
||||
if g, w := got.E, got.C; !testEqual(g, w) {
|
||||
t.Errorf(`E = %s, want equal to C (%s)`, g, w)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_MergeServerTimestamp(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
|
||||
// Create a doc with an ordinary field "a" and a ServerTimestamp field "b".
|
||||
_, err := doc.Set(ctx, map[string]interface{}{
|
||||
"a": 1,
|
||||
"b": ServerTimestamp})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
docSnap, err := doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data1 := docSnap.Data()
|
||||
// Merge with a document with a different value of "a". However,
|
||||
// specify only "b" in the list of merge fields.
|
||||
_, err = doc.Set(ctx,
|
||||
map[string]interface{}{"a": 2, "b": ServerTimestamp},
|
||||
Merge("b"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// The result should leave "a" unchanged, while "b" is updated.
|
||||
docSnap, err = doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data2 := docSnap.Data()
|
||||
if got, want := data2["a"], data1["a"]; got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
t1 := data1["b"].(time.Time)
|
||||
t2 := data2["b"].(time.Time)
|
||||
if !t1.Before(t2) {
|
||||
t.Errorf("got t1=%s, t2=%s; want t1 before t2", t1, t2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_MergeNestedServerTimestamp(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
doc := integrationColl(t).NewDoc()
|
||||
|
||||
// Create a doc with an ordinary field "a" a ServerTimestamp field "b",
|
||||
// and a second ServerTimestamp field "c.d".
|
||||
_, err := doc.Set(ctx, map[string]interface{}{
|
||||
"a": 1,
|
||||
"b": ServerTimestamp,
|
||||
"c": map[string]interface{}{"d": ServerTimestamp},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
docSnap, err := doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data1 := docSnap.Data()
|
||||
// Merge with a document with a different value of "a". However,
|
||||
// specify only "c.d" in the list of merge fields.
|
||||
_, err = doc.Set(ctx,
|
||||
map[string]interface{}{
|
||||
"a": 2,
|
||||
"b": ServerTimestamp,
|
||||
"c": map[string]interface{}{"d": ServerTimestamp},
|
||||
},
|
||||
Merge("c.d"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// The result should leave "a" and "b" unchanged, while "c.d" is updated.
|
||||
docSnap, err = doc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data2 := docSnap.Data()
|
||||
if got, want := data2["a"], data1["a"]; got != want {
|
||||
t.Errorf("a: got %v, want %v", got, want)
|
||||
}
|
||||
want := data1["b"].(time.Time)
|
||||
got := data2["b"].(time.Time)
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("b: got %s, want %s", got, want)
|
||||
}
|
||||
t1 := data1["c"].(map[string]interface{})["d"].(time.Time)
|
||||
t2 := data2["c"].(map[string]interface{})["d"].(time.Time)
|
||||
if !t1.Before(t2) {
|
||||
t.Errorf("got t1=%s, t2=%s; want t1 before t2", t1, t2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_WriteBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
b := integrationClient(t).Batch()
|
||||
doc1 := iColl.NewDoc()
|
||||
doc2 := iColl.NewDoc()
|
||||
b.Create(doc1, integrationTestMap)
|
||||
b.Set(doc2, integrationTestMap)
|
||||
b.UpdateMap(doc1, map[string]interface{}{"bool": false})
|
||||
b.UpdateMap(doc1, map[string]interface{}{"str": Delete})
|
||||
|
||||
wrs, err := b.Commit(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := len(wrs), 4; got != want {
|
||||
t.Fatalf("got %d WriteResults, want %d", got, want)
|
||||
}
|
||||
ds, err := doc1.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got1 := ds.Data()
|
||||
want := copyMap(wantIntegrationTestMap)
|
||||
want["bool"] = false
|
||||
delete(want, "str")
|
||||
if !testEqual(got1, want) {
|
||||
t.Errorf("got\n%#v\nwant\n%#v", got1, want)
|
||||
}
|
||||
ds, err = doc2.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got2 := ds.Data()
|
||||
if !testEqual(got2, wantIntegrationTestMap) {
|
||||
t.Errorf("got\n%#v\nwant\n%#v", got2, wantIntegrationTestMap)
|
||||
}
|
||||
// TODO(jba): test two updates to the same document when it is supported.
|
||||
// TODO(jba): test verify when it is supported.
|
||||
}
|
||||
|
||||
func TestIntegration_Query(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
coll := integrationColl(t)
|
||||
var docs []*DocumentRef
|
||||
var wants []map[string]interface{}
|
||||
for i := 0; i < 3; i++ {
|
||||
doc := coll.NewDoc()
|
||||
docs = append(docs, doc)
|
||||
// To support running this test in parallel with the others, use a field name
|
||||
// that we don't use anywhere else.
|
||||
mustCreate(fmt.Sprintf("Query #%d", i), t, doc,
|
||||
map[string]interface{}{
|
||||
"q": i,
|
||||
"x": 1,
|
||||
})
|
||||
wants = append(wants, map[string]interface{}{"q": int64(i)})
|
||||
}
|
||||
q := coll.Select("q").OrderBy("q", Asc)
|
||||
for i, test := range []struct {
|
||||
q Query
|
||||
want []map[string]interface{}
|
||||
}{
|
||||
{q, wants},
|
||||
{q.Where("q", ">", 1), wants[2:]},
|
||||
{q.WherePath([]string{"q"}, ">", 1), wants[2:]},
|
||||
{q.Offset(1).Limit(1), wants[1:2]},
|
||||
{q.StartAt(1), wants[1:]},
|
||||
{q.StartAfter(1), wants[2:]},
|
||||
{q.EndAt(1), wants[:2]},
|
||||
{q.EndBefore(1), wants[:1]},
|
||||
} {
|
||||
gotDocs, err := test.q.Documents(ctx).GetAll()
|
||||
if err != nil {
|
||||
t.Errorf("#%d: %+v: %v", i, test.q, err)
|
||||
continue
|
||||
}
|
||||
if len(gotDocs) != len(test.want) {
|
||||
t.Errorf("#%d: %+v: got %d docs, want %d", i, test.q, len(gotDocs), len(test.want))
|
||||
continue
|
||||
}
|
||||
for j, g := range gotDocs {
|
||||
if got, want := g.Data(), test.want[j]; !testEqual(got, want) {
|
||||
t.Errorf("#%d: %+v, #%d: got\n%+v\nwant\n%+v", i, test.q, j, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err := coll.Select("q").Where("x", "==", 1).OrderBy("q", Asc).Documents(ctx).GetAll()
|
||||
codeEq(t, "Where and OrderBy on different fields without an index", codes.FailedPrecondition, err)
|
||||
|
||||
// Using the collection itself as the query should return the full documents.
|
||||
allDocs, err := coll.Documents(ctx).GetAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
seen := map[int64]bool{} // "q" values we see
|
||||
for _, d := range allDocs {
|
||||
data := d.Data()
|
||||
q, ok := data["q"]
|
||||
if !ok {
|
||||
// A document from another test.
|
||||
continue
|
||||
}
|
||||
if seen[q.(int64)] {
|
||||
t.Errorf("%v: duplicate doc", data)
|
||||
}
|
||||
seen[q.(int64)] = true
|
||||
if data["x"] != int64(1) {
|
||||
t.Errorf("%v: wrong or missing 'x'", data)
|
||||
}
|
||||
if len(data) != 2 {
|
||||
t.Errorf("%v: want two keys", data)
|
||||
}
|
||||
}
|
||||
if got, want := len(seen), len(wants); got != want {
|
||||
t.Errorf("got %d docs with 'q', want %d", len(seen), len(wants))
|
||||
}
|
||||
}
|
||||
|
||||
// Test the special DocumentID field in queries.
|
||||
func TestIntegration_QueryName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
checkIDs := func(q Query, wantIDs []string) {
|
||||
gots, err := q.Documents(ctx).GetAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(gots) != len(wantIDs) {
|
||||
t.Fatalf("got %d, want %d", len(gots), len(wantIDs))
|
||||
}
|
||||
for i, g := range gots {
|
||||
if got, want := g.Ref.ID, wantIDs[i]; got != want {
|
||||
t.Errorf("#%d: got %s, want %s", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
coll := integrationColl(t)
|
||||
var wantIDs []string
|
||||
for i := 0; i < 3; i++ {
|
||||
doc := coll.NewDoc()
|
||||
mustCreate(fmt.Sprintf("Query #%d", i), t, doc, map[string]interface{}{"nm": 1})
|
||||
wantIDs = append(wantIDs, doc.ID)
|
||||
}
|
||||
sort.Strings(wantIDs)
|
||||
q := coll.Where("nm", "==", 1).OrderBy(DocumentID, Asc)
|
||||
checkIDs(q, wantIDs)
|
||||
|
||||
// Empty Select.
|
||||
q = coll.Select().Where("nm", "==", 1).OrderBy(DocumentID, Asc)
|
||||
checkIDs(q, wantIDs)
|
||||
|
||||
// Test cursors with __name__.
|
||||
checkIDs(q.StartAt(wantIDs[1]), wantIDs[1:])
|
||||
checkIDs(q.EndAt(wantIDs[1]), wantIDs[:2])
|
||||
}
|
||||
|
||||
func TestIntegration_QueryNested(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
coll1 := integrationColl(t)
|
||||
doc1 := coll1.NewDoc()
|
||||
coll2 := doc1.Collection(collectionIDs.New())
|
||||
doc2 := coll2.NewDoc()
|
||||
wantData := map[string]interface{}{"x": int64(1)}
|
||||
mustCreate("QueryNested", t, doc2, wantData)
|
||||
q := coll2.Select("x")
|
||||
got, err := q.Documents(ctx).GetAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(got) != 1 {
|
||||
t.Fatalf("got %d docs, want 1", len(got))
|
||||
}
|
||||
if gotData := got[0].Data(); !testEqual(gotData, wantData) {
|
||||
t.Errorf("got\n%+v\nwant\n%+v", gotData, wantData)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_RunTransaction(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
type Player struct {
|
||||
Name string
|
||||
Score int
|
||||
Star bool `firestore:"*"`
|
||||
}
|
||||
pat := Player{Name: "Pat", Score: 3, Star: false}
|
||||
client := integrationClient(t)
|
||||
patDoc := iColl.Doc("pat")
|
||||
var anError error
|
||||
incPat := func(_ context.Context, tx *Transaction) error {
|
||||
doc, err := tx.Get(patDoc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
score, err := doc.DataAt("Score")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Since the Star field is called "*", we must use DataAtPath to get it.
|
||||
star, err := doc.DataAtPath([]string{"*"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = tx.UpdateStruct(patDoc, []string{"Score"},
|
||||
Player{Score: int(score.(int64) + 7)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Since the Star field is called "*", we must use UpdatePaths to change it.
|
||||
err = tx.UpdatePaths(patDoc,
|
||||
[]FieldPathUpdate{{Path: []string{"*"}, Value: !star.(bool)}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return anError
|
||||
}
|
||||
mustCreate("RunTransaction", t, patDoc, pat)
|
||||
err := client.RunTransaction(ctx, incPat)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ds, err := patDoc.Get(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var got Player
|
||||
if err := ds.DataTo(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := Player{Name: "Pat", Score: 10, Star: true}
|
||||
if got != want {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
|
||||
// Function returns error, so transaction is rolled back and no writes happen.
|
||||
anError = errors.New("bad")
|
||||
err = client.RunTransaction(ctx, incPat)
|
||||
if err != anError {
|
||||
t.Fatalf("got %v, want %v", err, anError)
|
||||
}
|
||||
if err := ds.DataTo(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// want is same as before.
|
||||
if got != want {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func codeEq(t *testing.T, msg string, code codes.Code, err error) {
|
||||
if grpc.Code(err) != code {
|
||||
t.Fatalf("%s:\ngot <%v>\nwant code %s", msg, err, code)
|
||||
}
|
||||
}
|
||||
|
||||
func mustCreate(msg string, t *testing.T, doc *DocumentRef, data interface{}) *WriteResult {
|
||||
wr, err := doc.Create(context.Background(), data)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: creating: %v", msg, err)
|
||||
}
|
||||
return wr
|
||||
}
|
||||
|
||||
func copyMap(m map[string]interface{}) map[string]interface{} {
|
||||
c := map[string]interface{}{}
|
||||
for k, v := range m {
|
||||
c[k] = v
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func checkTimeBetween(t *testing.T, got, low, high time.Time) {
|
||||
// Allow slack for clock skew.
|
||||
const slack = 1 * time.Second
|
||||
low = low.Add(-slack)
|
||||
high = high.Add(slack)
|
||||
if got.Before(low) || got.After(high) {
|
||||
t.Fatalf("got %s, not in [%s, %s]", got, low, high)
|
||||
}
|
||||
}
|
10
vendor/cloud.google.com/go/firestore/internal/Makefile
generated
vendored
Normal file
10
vendor/cloud.google.com/go/firestore/internal/Makefile
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# Build doc.go from template and snippets.
|
||||
|
||||
SHELL=/bin/bash
|
||||
|
||||
../doc.go: doc-snippets.go doc.template snipdoc.awk
|
||||
@tmp=$$(mktemp) && \
|
||||
awk -f snipdoc.awk doc-snippets.go doc.template > $$tmp && \
|
||||
chmod +w ../doc.go && \
|
||||
mv $$tmp ../doc.go && \
|
||||
chmod -w ../doc.go
|
164
vendor/cloud.google.com/go/firestore/internal/doc-snippets.go
generated
vendored
Normal file
164
vendor/cloud.google.com/go/firestore/internal/doc-snippets.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
firestore "cloud.google.com/go/firestore"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
const ELLIPSIS = 0
|
||||
|
||||
//[ structDef
|
||||
type State struct {
|
||||
Capital string `firestore:"capital"`
|
||||
Population float64 `firestore:"pop"` // in millions
|
||||
}
|
||||
|
||||
//]
|
||||
|
||||
func f1() {
|
||||
//[ NewClient
|
||||
ctx := context.Background()
|
||||
client, err := firestore.NewClient(ctx, "projectID")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
//]
|
||||
//[ refs
|
||||
states := client.Collection("States")
|
||||
ny := states.Doc("NewYork")
|
||||
// Or, in a single call:
|
||||
ny = client.Doc("States/NewYork")
|
||||
//]
|
||||
//[ docref.Get
|
||||
docsnap, err := ny.Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
dataMap := docsnap.Data()
|
||||
fmt.Println(dataMap)
|
||||
//]
|
||||
//[ DataTo
|
||||
var nyData State
|
||||
if err := docsnap.DataTo(&nyData); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
//]
|
||||
//[ GetAll
|
||||
docsnaps, err := client.GetAll(ctx, []*firestore.DocumentRef{
|
||||
states.Doc("Wisconsin"), states.Doc("Ohio"),
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, ds := range docsnaps {
|
||||
_ = ds // TODO: Use ds.
|
||||
}
|
||||
//[ docref.Create
|
||||
wr, err := ny.Create(ctx, State{
|
||||
Capital: "Albany",
|
||||
Population: 19.8,
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(wr)
|
||||
//]
|
||||
//[ docref.Set
|
||||
ca := states.Doc("California")
|
||||
_, err = ca.Set(ctx, State{
|
||||
Capital: "Sacramento",
|
||||
Population: 39.14,
|
||||
})
|
||||
//]
|
||||
|
||||
//[ docref.Delete
|
||||
_, err = ny.Delete(ctx)
|
||||
//]
|
||||
|
||||
//[ LUT-precond
|
||||
docsnap, err = ca.Get(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_, err = ca.UpdateStruct(ctx, []string{"capital"}, State{Capital: "Sacramento"},
|
||||
firestore.LastUpdateTime(docsnap.UpdateTime))
|
||||
//]
|
||||
|
||||
//[ docref.UpdateMap
|
||||
_, err = ca.UpdateMap(ctx, map[string]interface{}{"pop": 39.2})
|
||||
//]
|
||||
//[ docref.UpdateStruct
|
||||
_, err = ca.UpdateStruct(ctx, []string{"pop"}, State{Population: 39.2})
|
||||
//]
|
||||
|
||||
//[ WriteBatch
|
||||
writeResults, err := client.Batch().
|
||||
Create(ny, State{Capital: "Albany"}).
|
||||
UpdateStruct(ca, []string{"capital"}, State{Capital: "Sacramento"}).
|
||||
Delete(client.Doc("States/WestDakota")).
|
||||
Commit(ctx)
|
||||
//]
|
||||
_ = writeResults
|
||||
|
||||
//[ Query
|
||||
q := states.Where("pop", ">", 10).OrderBy("pop", firestore.Desc)
|
||||
//]
|
||||
//[ Documents
|
||||
iter := q.Documents(ctx)
|
||||
for {
|
||||
doc, err := iter.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(doc.Data())
|
||||
}
|
||||
//]
|
||||
|
||||
//[ CollQuery
|
||||
iter = client.Collection("States").Documents(ctx)
|
||||
//]
|
||||
}
|
||||
|
||||
func txn() {
|
||||
var ctx context.Context
|
||||
var client *firestore.Client
|
||||
//[ Transaction
|
||||
ny := client.Doc("States/NewYork")
|
||||
err := client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error {
|
||||
doc, err := tx.Get(ny) // tx.Get, NOT ny.Get!
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pop, err := doc.DataAt("pop")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.UpdateStruct(ny, []string{"pop"},
|
||||
State{Population: pop.(float64) + 0.2})
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
//]
|
||||
}
|
148
vendor/cloud.google.com/go/firestore/internal/doc.template
generated
vendored
Normal file
148
vendor/cloud.google.com/go/firestore/internal/doc.template
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// DO NOT EDIT doc.go. Modify internal/doc.template, then run make -C internal.
|
||||
|
||||
/*
|
||||
Package firestore provides a client for reading and writing to a Cloud Firestore
|
||||
database.
|
||||
|
||||
See https://cloud.google.com/firestore/docs for an introduction
|
||||
to Cloud Firestore and additional help on using the Firestore API.
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client with a project ID:
|
||||
|
||||
[NewClient]
|
||||
|
||||
CollectionRefs and DocumentRefs
|
||||
|
||||
In Firestore, documents are sets of key-value pairs, and collections are groups of
|
||||
documents. A Firestore database consists of a hierarchy of alternating collections
|
||||
and documents, referred to by slash-separated paths like
|
||||
"States/California/Cities/SanFrancisco".
|
||||
|
||||
This client is built around references to collections and documents. CollectionRefs
|
||||
and DocumentRefs are lightweight values that refer to the corresponding database
|
||||
entities. Creating a ref does not involve any network traffic.
|
||||
|
||||
[refs]
|
||||
|
||||
Reading
|
||||
|
||||
Use DocumentRef.Get to read a document. The result is a DocumentSnapshot.
|
||||
Call its Data method to obtain the entire document contents as a map.
|
||||
|
||||
[docref.Get]
|
||||
|
||||
You can also obtain a single field with DataAt, or extract the data into a struct
|
||||
with DataTo. With the type definition
|
||||
|
||||
[structDef]
|
||||
|
||||
we can extract the document's data into a value of type State:
|
||||
|
||||
[DataTo]
|
||||
|
||||
Note that this client supports struct tags beginning with "firestore:" that work like
|
||||
the tags of the encoding/json package, letting you rename fields, ignore them, or
|
||||
omit their values when empty.
|
||||
|
||||
To retrieve multiple documents from their references in a single call, use
|
||||
Client.GetAll.
|
||||
|
||||
[GetAll]
|
||||
|
||||
Writing
|
||||
|
||||
For writing individual documents, use the methods on DocumentReference.
|
||||
Create creates a new document.
|
||||
|
||||
[docref.Create]
|
||||
|
||||
The first return value is a WriteResult, which contains the time
|
||||
at which the document was updated.
|
||||
|
||||
Create fails if the document exists. Another method, Set, either replaces an existing
|
||||
document or creates a new one.
|
||||
|
||||
[docref.Set]
|
||||
|
||||
To update some fields of an existing document, use UpdateMap, UpdateStruct or
|
||||
UpdatePaths. For UpdateMap, the keys of the map specify which fields to change. The
|
||||
others are untouched.
|
||||
|
||||
[docref.UpdateMap]
|
||||
|
||||
For UpdateStruct, you must explicitly provide the fields to update. The field names
|
||||
must match exactly.
|
||||
|
||||
[docref.UpdateStruct]
|
||||
|
||||
Use DocumentRef.Delete to delete a document.
|
||||
|
||||
[docref.Delete]
|
||||
|
||||
Preconditions
|
||||
|
||||
You can condition Deletes or Updates on when a document was last changed. Specify
|
||||
these preconditions as an option to a Delete or Update method. The check and the
|
||||
write happen atomically with a single RPC.
|
||||
|
||||
[LUT-precond]
|
||||
|
||||
Here we update a doc only if it hasn't changed since we read it.
|
||||
You could also do this with a transaction.
|
||||
|
||||
To perform multiple writes at once, use a WriteBatch. Its methods chain
|
||||
for convenience.
|
||||
|
||||
WriteBatch.Commit sends the collected writes to the server, where they happen
|
||||
atomically.
|
||||
|
||||
[WriteBatch]
|
||||
|
||||
Queries
|
||||
|
||||
You can use SQL to select documents from a collection. Begin with the collection, and
|
||||
build up a query using Select, Where and other methods of Query.
|
||||
|
||||
[Query]
|
||||
|
||||
Call the Query's Documents method to get an iterator, and use it like
|
||||
the other Google Cloud Client iterators.
|
||||
|
||||
[Documents]
|
||||
|
||||
To get all the documents in a collection, you can use the collection itself
|
||||
as a query.
|
||||
|
||||
[CollQuery]
|
||||
|
||||
Transactions
|
||||
|
||||
Use a transaction to execute reads and writes atomically. All reads must happen
|
||||
before any writes. Transaction creation, commit, rollback and retry are handled for
|
||||
you by the Client.RunTransaction method; just provide a function and use the
|
||||
read and write methods of the Transaction passed to it.
|
||||
|
||||
[Transaction]
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package firestore
|
116
vendor/cloud.google.com/go/firestore/internal/snipdoc.awk
generated
vendored
Normal file
116
vendor/cloud.google.com/go/firestore/internal/snipdoc.awk
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
# Copyright 2017 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# snipdoc merges code snippets from Go source files into a template to
|
||||
# produce another go file (typically doc.go).
|
||||
#
|
||||
# Call with one or more .go files and a template file.
|
||||
#
|
||||
# awk -f snipmd.awk foo.go bar.go doc.template
|
||||
#
|
||||
# In the Go files, start a snippet with
|
||||
# //[ NAME
|
||||
# and end it with
|
||||
# //]
|
||||
#
|
||||
# In the template, write
|
||||
# [NAME]
|
||||
# on a line by itself to insert the snippet NAME on that line.
|
||||
#
|
||||
# The following transformations are made to the Go code:
|
||||
# - Trailing blank lines are removed.
|
||||
# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...`
|
||||
|
||||
|
||||
/^[ \t]*\/\/\[/ { # start snippet in Go file
|
||||
if (inGo()) {
|
||||
if ($2 == "") {
|
||||
die("missing snippet name")
|
||||
}
|
||||
curSnip = $2
|
||||
next
|
||||
}
|
||||
}
|
||||
|
||||
/^[ \t]*\/\/]/ { # end snippet in Go file
|
||||
if (inGo()) {
|
||||
if (curSnip != "") {
|
||||
# Remove all trailing newlines.
|
||||
gsub(/[\t\n]+$/, "", snips[curSnip])
|
||||
curSnip = ""
|
||||
next
|
||||
} else {
|
||||
die("//] without corresponding //[")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ENDFILE {
|
||||
if (curSnip != "") {
|
||||
die("unclosed snippet: " curSnip)
|
||||
}
|
||||
}
|
||||
|
||||
/^\[.*\]$/ { # Snippet marker in template file.
|
||||
if (inTemplate()) {
|
||||
name = substr($1, 2, length($1)-2)
|
||||
if (snips[name] == "") {
|
||||
die("no snippet named " name)
|
||||
}
|
||||
printf("%s\n", snips[name])
|
||||
afterSnip = 1
|
||||
next
|
||||
}
|
||||
}
|
||||
|
||||
# Matches every line.
|
||||
{
|
||||
if (curSnip != "") {
|
||||
# If the first line in the snip has no indent, add the indent.
|
||||
if (snips[curSnip] == "") {
|
||||
if (index($0, "\t") == 1) {
|
||||
extraIndent = ""
|
||||
} else {
|
||||
extraIndent = "\t"
|
||||
}
|
||||
}
|
||||
|
||||
line = $0
|
||||
# Replace ELLIPSIS.
|
||||
gsub(/_ = ELLIPSIS/, "...", line)
|
||||
gsub(/ELLIPSIS/, "...", line)
|
||||
|
||||
snips[curSnip] = snips[curSnip] extraIndent line "\n"
|
||||
} else if (inTemplate()) {
|
||||
afterSnip = 0
|
||||
# Copy to output.
|
||||
print
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
function inTemplate() {
|
||||
return match(FILENAME, /\.template$/)
|
||||
}
|
||||
|
||||
function inGo() {
|
||||
return match(FILENAME, /\.go$/)
|
||||
}
|
||||
|
||||
|
||||
function die(msg) {
|
||||
printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr"
|
||||
exit 1
|
||||
}
|
175
vendor/cloud.google.com/go/firestore/mock_test.go
generated
vendored
Normal file
175
vendor/cloud.google.com/go/firestore/mock_test.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
// A simple mock server.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type mockServer struct {
|
||||
pb.FirestoreServer
|
||||
|
||||
Addr string
|
||||
|
||||
reqItems []reqItem
|
||||
resps []interface{}
|
||||
}
|
||||
|
||||
type reqItem struct {
|
||||
wantReq proto.Message
|
||||
adjust func(gotReq proto.Message)
|
||||
}
|
||||
|
||||
func newMockServer() (*mockServer, error) {
|
||||
srv, err := testutil.NewServer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mock := &mockServer{Addr: srv.Addr}
|
||||
pb.RegisterFirestoreServer(srv.Gsrv, mock)
|
||||
srv.Start()
|
||||
return mock, nil
|
||||
}
|
||||
|
||||
// addRPC adds a (request, response) pair to the server's list of expected
|
||||
// interactions. The server will compare the incoming request with wantReq
|
||||
// using proto.Equal.
|
||||
//
|
||||
// Passing nil for wantReq disables the request check.
|
||||
func (s *mockServer) addRPC(wantReq proto.Message, resp interface{}) {
|
||||
s.addRPCAdjust(wantReq, resp, nil)
|
||||
}
|
||||
|
||||
// addRPCAdjust is like addRPC, but accepts a function that can be used
|
||||
// to tweak the requests before comparison, for example to adjust for
|
||||
// randomness.
|
||||
func (s *mockServer) addRPCAdjust(wantReq proto.Message, resp interface{}, adjust func(proto.Message)) {
|
||||
s.reqItems = append(s.reqItems, reqItem{wantReq, adjust})
|
||||
s.resps = append(s.resps, resp)
|
||||
}
|
||||
|
||||
// popRPC compares the request with the next expected (request, response) pair.
|
||||
// It returns the response, or an error if the request doesn't match what
|
||||
// was expected or there are no expected rpcs.
|
||||
func (s *mockServer) popRPC(gotReq proto.Message) (interface{}, error) {
|
||||
if len(s.reqItems) == 0 {
|
||||
panic("out of RPCs")
|
||||
}
|
||||
ri := s.reqItems[0]
|
||||
s.reqItems = s.reqItems[1:]
|
||||
if ri.wantReq != nil {
|
||||
if ri.adjust != nil {
|
||||
ri.adjust(gotReq)
|
||||
}
|
||||
if !proto.Equal(gotReq, ri.wantReq) {
|
||||
return nil, fmt.Errorf("mockServer: bad request\ngot: %T\n%+v\nwant: %T\n%+v",
|
||||
gotReq, gotReq, ri.wantReq, ri.wantReq)
|
||||
}
|
||||
}
|
||||
resp := s.resps[0]
|
||||
s.resps = s.resps[1:]
|
||||
if err, ok := resp.(error); ok {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *mockServer) reset() {
|
||||
s.reqItems = nil
|
||||
s.resps = nil
|
||||
}
|
||||
|
||||
func (s *mockServer) GetDocument(_ context.Context, req *pb.GetDocumentRequest) (*pb.Document, error) {
|
||||
res, err := s.popRPC(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.(*pb.Document), nil
|
||||
}
|
||||
|
||||
func (s *mockServer) Commit(_ context.Context, req *pb.CommitRequest) (*pb.CommitResponse, error) {
|
||||
res, err := s.popRPC(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.(*pb.CommitResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockServer) BatchGetDocuments(req *pb.BatchGetDocumentsRequest, bs pb.Firestore_BatchGetDocumentsServer) error {
|
||||
res, err := s.popRPC(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responses := res.([]interface{})
|
||||
for _, res := range responses {
|
||||
switch res := res.(type) {
|
||||
case *pb.BatchGetDocumentsResponse:
|
||||
if err := bs.Send(res); err != nil {
|
||||
return err
|
||||
}
|
||||
case error:
|
||||
return res
|
||||
default:
|
||||
panic(fmt.Sprintf("bad response type in BatchGetDocuments: %+v", res))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *mockServer) RunQuery(req *pb.RunQueryRequest, qs pb.Firestore_RunQueryServer) error {
|
||||
res, err := s.popRPC(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responses := res.([]interface{})
|
||||
for _, res := range responses {
|
||||
switch res := res.(type) {
|
||||
case *pb.RunQueryResponse:
|
||||
if err := qs.Send(res); err != nil {
|
||||
return err
|
||||
}
|
||||
case error:
|
||||
return res
|
||||
default:
|
||||
panic(fmt.Sprintf("bad response type in RunQuery: %+v", res))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *mockServer) BeginTransaction(_ context.Context, req *pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) {
|
||||
res, err := s.popRPC(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.(*pb.BeginTransactionResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockServer) Rollback(_ context.Context, req *pb.RollbackRequest) (*empty.Empty, error) {
|
||||
res, err := s.popRPC(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.(*empty.Empty), nil
|
||||
}
|
182
vendor/cloud.google.com/go/firestore/options.go
generated
vendored
Normal file
182
vendor/cloud.google.com/go/firestore/options.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
)
|
||||
|
||||
// A Precondition modifies a Firestore update or delete operation.
|
||||
type Precondition interface {
|
||||
// Returns the corresponding Precondition proto.
|
||||
preconditionProto() (*pb.Precondition, error)
|
||||
}
|
||||
|
||||
// Exists returns a Precondition that checks for the existence or non-existence
|
||||
// of a resource before writing to it. If the check fails, the write does not occur.
|
||||
func Exists(b bool) Precondition { return exists(b) }
|
||||
|
||||
type exists bool
|
||||
|
||||
func (e exists) preconditionProto() (*pb.Precondition, error) {
|
||||
return &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{bool(e)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e exists) String() string { return fmt.Sprintf("Exists(%t)", e) }
|
||||
|
||||
// LastUpdateTime returns a Precondition that checks that a resource must exist and
|
||||
// must have last been updated at the given time. If the check fails, the write
|
||||
// does not occur.
|
||||
func LastUpdateTime(t time.Time) Precondition { return lastUpdateTime(t) }
|
||||
|
||||
type lastUpdateTime time.Time
|
||||
|
||||
func (u lastUpdateTime) preconditionProto() (*pb.Precondition, error) {
|
||||
ts, err := ptypes.TimestampProto(time.Time(u))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_UpdateTime{ts},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (u lastUpdateTime) String() string { return fmt.Sprintf("LastUpdateTime(%s)", time.Time(u)) }
|
||||
|
||||
func processPreconditionsForDelete(preconds []Precondition) (*pb.Precondition, error) {
|
||||
// At most one option permitted.
|
||||
switch len(preconds) {
|
||||
case 0:
|
||||
return nil, nil
|
||||
case 1:
|
||||
return preconds[0].preconditionProto()
|
||||
default:
|
||||
return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds)
|
||||
}
|
||||
}
|
||||
|
||||
func processPreconditionsForUpdate(preconds []Precondition) (*pb.Precondition, error) {
|
||||
// At most one option permitted, and it cannot be Exists.
|
||||
switch len(preconds) {
|
||||
case 0:
|
||||
// If the user doesn't provide any options, default to Exists(true).
|
||||
return exists(true).preconditionProto()
|
||||
case 1:
|
||||
if _, ok := preconds[0].(exists); ok {
|
||||
return nil, errors.New("Cannot use Exists with Update")
|
||||
}
|
||||
return preconds[0].preconditionProto()
|
||||
default:
|
||||
return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds)
|
||||
}
|
||||
}
|
||||
|
||||
func processPreconditionsForVerify(preconds []Precondition) (*pb.Precondition, error) {
|
||||
// At most one option permitted.
|
||||
switch len(preconds) {
|
||||
case 0:
|
||||
return nil, nil
|
||||
case 1:
|
||||
return preconds[0].preconditionProto()
|
||||
default:
|
||||
return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds)
|
||||
}
|
||||
}
|
||||
|
||||
// A SetOption modifies a Firestore set operation.
|
||||
type SetOption interface {
|
||||
fieldPaths() (fps []FieldPath, all bool, err error)
|
||||
}
|
||||
|
||||
// MergeAll is a SetOption that causes all the field paths given in the data argument
|
||||
// to Set to be overwritten. It is not supported for struct data.
|
||||
var MergeAll SetOption = merge{all: true}
|
||||
|
||||
// Merge returns a SetOption that causes only the given field paths to be
|
||||
// overwritten. Other fields on the existing document will be untouched. It is an
|
||||
// error if a provided field path does not refer to a value in the data passed to
|
||||
// Set.
|
||||
//
|
||||
// Each element of fieldPaths must be a single field or a dot-separated sequence of
|
||||
// fields, none of which contain the runes "˜*/[]". Use MergePaths instead for such
|
||||
// paths.
|
||||
func Merge(fieldPaths ...string) SetOption {
|
||||
fps, err := parseDotSeparatedStrings(fieldPaths)
|
||||
if err != nil {
|
||||
return merge{err: err}
|
||||
}
|
||||
return merge{paths: fps}
|
||||
}
|
||||
|
||||
// MergePaths returns a SetOption that causes only the given field paths to be
|
||||
// overwritten. Other fields on the existing document will be untouched. It is an
|
||||
// error if a provided field path does not refer to a value in the data passed to
|
||||
// Set.
|
||||
func MergePaths(fps ...FieldPath) SetOption {
|
||||
for _, fp := range fps {
|
||||
if err := fp.validate(); err != nil {
|
||||
return merge{err: err}
|
||||
}
|
||||
}
|
||||
return merge{paths: fps}
|
||||
}
|
||||
|
||||
type merge struct {
|
||||
all bool
|
||||
paths []FieldPath
|
||||
err error
|
||||
}
|
||||
|
||||
func (m merge) String() string {
|
||||
if m.err != nil {
|
||||
return fmt.Sprintf("<Merge error: %v>", m.err)
|
||||
}
|
||||
if m.all {
|
||||
return "MergeAll"
|
||||
}
|
||||
return fmt.Sprintf("Merge(%+v)", m.paths)
|
||||
}
|
||||
|
||||
func (m merge) fieldPaths() (fps []FieldPath, all bool, err error) {
|
||||
if m.err != nil {
|
||||
return nil, false, m.err
|
||||
}
|
||||
if err := checkNoDupOrPrefix(m.paths); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if m.all {
|
||||
return nil, true, nil
|
||||
}
|
||||
return m.paths, false, nil
|
||||
}
|
||||
|
||||
func processSetOptions(opts []SetOption) (fps []FieldPath, all bool, err error) {
|
||||
switch len(opts) {
|
||||
case 0:
|
||||
return nil, false, nil
|
||||
case 1:
|
||||
return opts[0].fieldPaths()
|
||||
default:
|
||||
return nil, false, fmt.Errorf("conflicting options: %+v", opts)
|
||||
}
|
||||
}
|
155
vendor/cloud.google.com/go/firestore/options_test.go
generated
vendored
Normal file
155
vendor/cloud.google.com/go/firestore/options_test.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
)
|
||||
|
||||
func TestProcessPreconditionsForVerify(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []Precondition
|
||||
want *pb.Precondition
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
in: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
in: []Precondition{},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(true)},
|
||||
want: &pb.Precondition{&pb.Precondition_Exists{true}},
|
||||
},
|
||||
{
|
||||
in: []Precondition{LastUpdateTime(aTime)},
|
||||
want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}},
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(true), LastUpdateTime(aTime)},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(true), Exists(true)},
|
||||
wantErr: true,
|
||||
},
|
||||
} {
|
||||
got, err := processPreconditionsForVerify(test.in)
|
||||
switch {
|
||||
case test.wantErr && err == nil:
|
||||
t.Errorf("%v: got nil, want error", test.in)
|
||||
case !test.wantErr && err != nil:
|
||||
t.Errorf("%v: got <%v>, want no error", test.in, err)
|
||||
case !test.wantErr && err == nil && !testEqual(got, test.want):
|
||||
t.Errorf("%v: got %+v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessPreconditionsForDelete(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []Precondition
|
||||
want *pb.Precondition
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
in: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
in: []Precondition{},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(true)},
|
||||
want: &pb.Precondition{&pb.Precondition_Exists{true}},
|
||||
},
|
||||
{
|
||||
in: []Precondition{LastUpdateTime(aTime)},
|
||||
want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}},
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(true), LastUpdateTime(aTime)},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(true), Exists(true)},
|
||||
wantErr: true,
|
||||
},
|
||||
} {
|
||||
got, err := processPreconditionsForDelete(test.in)
|
||||
switch {
|
||||
case test.wantErr && err == nil:
|
||||
t.Errorf("%v: got nil, want error", test.in)
|
||||
case !test.wantErr && err != nil:
|
||||
t.Errorf("%v: got <%v>, want no error", test.in, err)
|
||||
case !test.wantErr && err == nil && !testEqual(got, test.want):
|
||||
t.Errorf("%v: got %+v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessPreconditionsForUpdate(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []Precondition
|
||||
want *pb.Precondition
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
in: nil,
|
||||
want: &pb.Precondition{&pb.Precondition_Exists{true}},
|
||||
},
|
||||
{
|
||||
in: []Precondition{},
|
||||
want: &pb.Precondition{&pb.Precondition_Exists{true}},
|
||||
},
|
||||
|
||||
{
|
||||
in: []Precondition{Exists(true)},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(false)},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
in: []Precondition{LastUpdateTime(aTime)},
|
||||
want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}},
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(true), LastUpdateTime(aTime)},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
in: []Precondition{Exists(true), Exists(true)},
|
||||
wantErr: true,
|
||||
},
|
||||
} {
|
||||
got, err := processPreconditionsForUpdate(test.in)
|
||||
switch {
|
||||
case test.wantErr && err == nil:
|
||||
t.Errorf("%v: got nil, want error", test.in)
|
||||
case !test.wantErr && err != nil:
|
||||
t.Errorf("%v: got <%v>, want no error", test.in, err)
|
||||
case !test.wantErr && err == nil && !testEqual(got, test.want):
|
||||
t.Errorf("%v: got %+v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
447
vendor/cloud.google.com/go/firestore/query.go
generated
vendored
Normal file
447
vendor/cloud.google.com/go/firestore/query.go
generated
vendored
Normal file
@ -0,0 +1,447 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// Query represents a Firestore query.
|
||||
//
|
||||
// Query values are immutable. Each Query method creates
|
||||
// a new Query; it does not modify the old.
|
||||
type Query struct {
|
||||
c *Client
|
||||
parentPath string // path of the collection's parent
|
||||
collectionID string
|
||||
selection []FieldPath
|
||||
filters []filter
|
||||
orders []order
|
||||
offset int32
|
||||
limit *wrappers.Int32Value
|
||||
startVals, endVals []interface{}
|
||||
startBefore, endBefore bool
|
||||
err error
|
||||
}
|
||||
|
||||
// DocumentID is the special field name representing the ID of a document
|
||||
// in queries.
|
||||
const DocumentID = "__name__"
|
||||
|
||||
// Select returns a new Query that specifies the field paths
|
||||
// to return from the result documents.
|
||||
func (q Query) Select(fieldPaths ...string) Query {
|
||||
var fps []FieldPath
|
||||
for _, s := range fieldPaths {
|
||||
fp, err := parseDotSeparatedString(s)
|
||||
if err != nil {
|
||||
q.err = err
|
||||
return q
|
||||
}
|
||||
fps = append(fps, fp)
|
||||
}
|
||||
if fps == nil {
|
||||
q.selection = []FieldPath{{DocumentID}}
|
||||
} else {
|
||||
q.selection = fps
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
// SelectPaths returns a new Query that specifies the field paths
|
||||
// to return from the result documents.
|
||||
func (q Query) SelectPaths(fieldPaths ...FieldPath) Query {
|
||||
q.selection = fieldPaths
|
||||
return q
|
||||
}
|
||||
|
||||
// Where returns a new Query that filters the set of results.
|
||||
// A Query can have multiple filters.
|
||||
func (q Query) Where(fieldPath, op string, value interface{}) Query {
|
||||
fp, err := parseDotSeparatedString(fieldPath)
|
||||
if err != nil {
|
||||
q.err = err
|
||||
return q
|
||||
}
|
||||
q.filters = append(append([]filter(nil), q.filters...), filter{fp, op, value})
|
||||
return q
|
||||
}
|
||||
|
||||
// WherePath returns a new Query that filters the set of results.
|
||||
// A Query can have multiple filters.
|
||||
func (q Query) WherePath(fp FieldPath, op string, value interface{}) Query {
|
||||
q.filters = append(append([]filter(nil), q.filters...), filter{fp, op, value})
|
||||
return q
|
||||
}
|
||||
|
||||
// Direction is the sort direction for result ordering.
|
||||
type Direction int32
|
||||
|
||||
const (
|
||||
// Asc sorts results from smallest to largest.
|
||||
Asc Direction = Direction(pb.StructuredQuery_ASCENDING)
|
||||
|
||||
// Desc sorts results from largest to smallest.
|
||||
Desc Direction = Direction(pb.StructuredQuery_DESCENDING)
|
||||
)
|
||||
|
||||
// OrderBy returns a new Query that specifies the order in which results are
|
||||
// returned. A Query can have multiple OrderBy/OrderByPath specifications. OrderBy
|
||||
// appends the specification to the list of existing ones.
|
||||
//
|
||||
// To order by document name, use the special field path DocumentID.
|
||||
func (q Query) OrderBy(fieldPath string, dir Direction) Query {
|
||||
fp, err := parseDotSeparatedString(fieldPath)
|
||||
if err != nil {
|
||||
q.err = err
|
||||
return q
|
||||
}
|
||||
q.orders = append(append([]order(nil), q.orders...), order{fp, dir})
|
||||
return q
|
||||
}
|
||||
|
||||
// OrderByPath returns a new Query that specifies the order in which results are
|
||||
// returned. A Query can have multiple OrderBy/OrderByPath specifications.
|
||||
// OrderByPath appends the specification to the list of existing ones.
|
||||
func (q Query) OrderByPath(fp FieldPath, dir Direction) Query {
|
||||
q.orders = append(append([]order(nil), q.orders...), order{fp, dir})
|
||||
return q
|
||||
}
|
||||
|
||||
// Offset returns a new Query that specifies the number of initial results to skip.
|
||||
// It must not be negative.
|
||||
func (q Query) Offset(n int) Query {
|
||||
q.offset = trunc32(n)
|
||||
return q
|
||||
}
|
||||
|
||||
// Limit returns a new Query that specifies the maximum number of results to return.
|
||||
// It must not be negative.
|
||||
func (q Query) Limit(n int) Query {
|
||||
q.limit = &wrappers.Int32Value{trunc32(n)}
|
||||
return q
|
||||
}
|
||||
|
||||
// StartAt returns a new Query that specifies that results should start at
|
||||
// the document with the given field values. The field path corresponding to
|
||||
// each value is taken from the corresponding OrderBy call. For example, in
|
||||
// q.OrderBy("X", Asc).OrderBy("Y", Desc).StartAt(1, 2)
|
||||
// results will begin at the first document where X = 1 and Y = 2.
|
||||
//
|
||||
// If an OrderBy call uses the special DocumentID field path, the corresponding value
|
||||
// should be the document ID relative to the query's collection. For example, to
|
||||
// start at the document "NewYork" in the "States" collection, write
|
||||
//
|
||||
// client.Collection("States").OrderBy(DocumentID, firestore.Asc).StartAt("NewYork")
|
||||
//
|
||||
// Calling StartAt overrides a previous call to StartAt or StartAfter.
|
||||
func (q Query) StartAt(fieldValues ...interface{}) Query {
|
||||
q.startVals, q.startBefore = fieldValues, true
|
||||
return q
|
||||
}
|
||||
|
||||
// StartAfter returns a new Query that specifies that results should start just after
|
||||
// the document with the given field values. See Query.StartAt for more information.
|
||||
//
|
||||
// Calling StartAfter overrides a previous call to StartAt or StartAfter.
|
||||
func (q Query) StartAfter(fieldValues ...interface{}) Query {
|
||||
q.startVals, q.startBefore = fieldValues, false
|
||||
return q
|
||||
}
|
||||
|
||||
// EndAt returns a new Query that specifies that results should end at the
|
||||
// document with the given field values. See Query.StartAt for more information.
|
||||
//
|
||||
// Calling EndAt overrides a previous call to EndAt or EndBefore.
|
||||
func (q Query) EndAt(fieldValues ...interface{}) Query {
|
||||
q.endVals, q.endBefore = fieldValues, false
|
||||
return q
|
||||
}
|
||||
|
||||
// EndBefore returns a new Query that specifies that results should end just before
|
||||
// the document with the given field values. See Query.StartAt for more information.
|
||||
//
|
||||
// Calling EndBefore overrides a previous call to EndAt or EndBefore.
|
||||
func (q Query) EndBefore(fieldValues ...interface{}) Query {
|
||||
q.endVals, q.endBefore = fieldValues, true
|
||||
return q
|
||||
}
|
||||
|
||||
func (q Query) query() *Query { return &q }
|
||||
|
||||
func (q Query) toProto() (*pb.StructuredQuery, error) {
|
||||
if q.err != nil {
|
||||
return nil, q.err
|
||||
}
|
||||
if q.collectionID == "" {
|
||||
return nil, errors.New("firestore: query created without CollectionRef")
|
||||
}
|
||||
p := &pb.StructuredQuery{
|
||||
From: []*pb.StructuredQuery_CollectionSelector{{CollectionId: q.collectionID}},
|
||||
Offset: q.offset,
|
||||
Limit: q.limit,
|
||||
}
|
||||
if len(q.selection) > 0 {
|
||||
p.Select = &pb.StructuredQuery_Projection{}
|
||||
for _, fp := range q.selection {
|
||||
if err := fp.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Select.Fields = append(p.Select.Fields, fref(fp))
|
||||
}
|
||||
}
|
||||
// If there is only filter, use it directly. Otherwise, construct
|
||||
// a CompositeFilter.
|
||||
if len(q.filters) == 1 {
|
||||
pf, err := q.filters[0].toProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.Where = pf
|
||||
} else if len(q.filters) > 1 {
|
||||
cf := &pb.StructuredQuery_CompositeFilter{
|
||||
Op: pb.StructuredQuery_CompositeFilter_AND,
|
||||
}
|
||||
p.Where = &pb.StructuredQuery_Filter{
|
||||
FilterType: &pb.StructuredQuery_Filter_CompositeFilter{cf},
|
||||
}
|
||||
for _, f := range q.filters {
|
||||
pf, err := f.toProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cf.Filters = append(cf.Filters, pf)
|
||||
}
|
||||
}
|
||||
for _, ord := range q.orders {
|
||||
po, err := ord.toProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.OrderBy = append(p.OrderBy, po)
|
||||
}
|
||||
// StartAt and EndAt must have values that correspond exactly to the explicit order-by fields.
|
||||
if len(q.startVals) != 0 {
|
||||
vals, err := q.toPositionValues(q.startVals)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.StartAt = &pb.Cursor{Values: vals, Before: q.startBefore}
|
||||
}
|
||||
if len(q.endVals) != 0 {
|
||||
vals, err := q.toPositionValues(q.endVals)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.EndAt = &pb.Cursor{Values: vals, Before: q.endBefore}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// toPositionValues converts the field values to protos.
|
||||
func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error) {
|
||||
if len(fieldValues) != len(q.orders) {
|
||||
return nil, errors.New("firestore: number of field values in StartAt/StartAfter/EndAt/EndBefore does not match number of OrderBy fields")
|
||||
}
|
||||
vals := make([]*pb.Value, len(fieldValues))
|
||||
var err error
|
||||
for i, ord := range q.orders {
|
||||
fval := fieldValues[i]
|
||||
if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID {
|
||||
docID, ok := fval.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("firestore: expected doc ID for DocumentID field, got %T", fval)
|
||||
}
|
||||
vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.parentPath + "/documents/" + q.collectionID + "/" + docID}}
|
||||
} else {
|
||||
vals[i], err = toProtoValue(reflect.ValueOf(fval))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
type filter struct {
|
||||
fieldPath FieldPath
|
||||
op string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
|
||||
if err := f.fieldPath.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var op pb.StructuredQuery_FieldFilter_Operator
|
||||
switch f.op {
|
||||
case "<":
|
||||
op = pb.StructuredQuery_FieldFilter_LESS_THAN
|
||||
case "<=":
|
||||
op = pb.StructuredQuery_FieldFilter_LESS_THAN_OR_EQUAL
|
||||
case ">":
|
||||
op = pb.StructuredQuery_FieldFilter_GREATER_THAN
|
||||
case ">=":
|
||||
op = pb.StructuredQuery_FieldFilter_GREATER_THAN_OR_EQUAL
|
||||
case "==":
|
||||
op = pb.StructuredQuery_FieldFilter_EQUAL
|
||||
default:
|
||||
return nil, fmt.Errorf("firestore: invalid operator %q", f.op)
|
||||
}
|
||||
val, err := toProtoValue(reflect.ValueOf(f.value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.StructuredQuery_Filter{
|
||||
FilterType: &pb.StructuredQuery_Filter_FieldFilter{
|
||||
&pb.StructuredQuery_FieldFilter{
|
||||
Field: fref(f.fieldPath),
|
||||
Op: op,
|
||||
Value: val,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type order struct {
|
||||
fieldPath FieldPath
|
||||
dir Direction
|
||||
}
|
||||
|
||||
func (r order) toProto() (*pb.StructuredQuery_Order, error) {
|
||||
if err := r.fieldPath.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.StructuredQuery_Order{
|
||||
Field: fref(r.fieldPath),
|
||||
Direction: pb.StructuredQuery_Direction(r.dir),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fref(fp FieldPath) *pb.StructuredQuery_FieldReference {
|
||||
return &pb.StructuredQuery_FieldReference{fp.toServiceFieldPath()}
|
||||
}
|
||||
|
||||
func trunc32(i int) int32 {
|
||||
if i > math.MaxInt32 {
|
||||
i = math.MaxInt32
|
||||
}
|
||||
return int32(i)
|
||||
}
|
||||
|
||||
// Documents returns an iterator over the query's resulting documents.
|
||||
func (q Query) Documents(ctx context.Context) *DocumentIterator {
|
||||
return &DocumentIterator{
|
||||
ctx: withResourceHeader(ctx, q.c.path()),
|
||||
q: &q,
|
||||
err: checkTransaction(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
// DocumentIterator is an iterator over documents returned by a query.
|
||||
type DocumentIterator struct {
|
||||
ctx context.Context
|
||||
q *Query
|
||||
tid []byte // transaction ID, if any
|
||||
streamClient pb.Firestore_RunQueryClient
|
||||
err error
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there
|
||||
// are no more results. Once Next returns Done, all subsequent calls will return
|
||||
// Done.
|
||||
func (it *DocumentIterator) Next() (*DocumentSnapshot, error) {
|
||||
if it.err != nil {
|
||||
return nil, it.err
|
||||
}
|
||||
client := it.q.c
|
||||
if it.streamClient == nil {
|
||||
sq, err := it.q.toProto()
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return nil, err
|
||||
}
|
||||
req := &pb.RunQueryRequest{
|
||||
Parent: it.q.parentPath,
|
||||
QueryType: &pb.RunQueryRequest_StructuredQuery{sq},
|
||||
}
|
||||
if it.tid != nil {
|
||||
req.ConsistencySelector = &pb.RunQueryRequest_Transaction{it.tid}
|
||||
}
|
||||
it.streamClient, it.err = client.c.RunQuery(it.ctx, req)
|
||||
if it.err != nil {
|
||||
return nil, it.err
|
||||
}
|
||||
}
|
||||
var res *pb.RunQueryResponse
|
||||
var err error
|
||||
for {
|
||||
res, err = it.streamClient.Recv()
|
||||
if err == io.EOF {
|
||||
err = iterator.Done
|
||||
}
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return nil, it.err
|
||||
}
|
||||
if res.Document != nil {
|
||||
break
|
||||
}
|
||||
// No document => partial progress; keep receiving.
|
||||
}
|
||||
docRef, err := pathToDoc(res.Document.Name, client)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return nil, err
|
||||
}
|
||||
doc, err := newDocumentSnapshot(docRef, res.Document, client)
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return nil, err
|
||||
}
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
// GetAll returns all the documents remaining from the iterator.
|
||||
func (it *DocumentIterator) GetAll() ([]*DocumentSnapshot, error) {
|
||||
var docs []*DocumentSnapshot
|
||||
for {
|
||||
doc, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
docs = append(docs, doc)
|
||||
}
|
||||
return docs, nil
|
||||
}
|
||||
|
||||
// TODO(jba): Does the iterator need a Stop or Close method? I don't think so--
|
||||
// I don't think the client can terminate a streaming receive except perhaps
|
||||
// by cancelling the context, and the user can do that themselves if they wish.
|
||||
// Find out for sure.
|
381
vendor/cloud.google.com/go/firestore/query_test.go
generated
vendored
Normal file
381
vendor/cloud.google.com/go/firestore/query_test.go
generated
vendored
Normal file
@ -0,0 +1,381 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
)
|
||||
|
||||
func TestQueryToProto(t *testing.T) {
|
||||
c := &Client{}
|
||||
coll := c.Collection("C")
|
||||
q := coll.Query
|
||||
aFilter, err := filter{[]string{"a"}, ">", 5}.toProto()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bFilter, err := filter{[]string{"b"}, "<", "foo"}.toProto()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
slashStarFilter, err := filter{[]string{"/", "*"}, ">", 5}.toProto()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
type S struct {
|
||||
A int `firestore:"a"`
|
||||
}
|
||||
for _, test := range []struct {
|
||||
in Query
|
||||
want *pb.StructuredQuery
|
||||
}{
|
||||
{
|
||||
in: q.Select(),
|
||||
want: &pb.StructuredQuery{
|
||||
Select: &pb.StructuredQuery_Projection{
|
||||
Fields: []*pb.StructuredQuery_FieldReference{fref1("__name__")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.Select("a", "b"),
|
||||
want: &pb.StructuredQuery{
|
||||
Select: &pb.StructuredQuery_Projection{
|
||||
Fields: []*pb.StructuredQuery_FieldReference{fref1("a"), fref1("b")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.Select("a", "b").Select("c"), // last wins
|
||||
want: &pb.StructuredQuery{
|
||||
Select: &pb.StructuredQuery_Projection{
|
||||
Fields: []*pb.StructuredQuery_FieldReference{fref1("c")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.SelectPaths([]string{"*"}, []string{"/"}),
|
||||
want: &pb.StructuredQuery{
|
||||
Select: &pb.StructuredQuery_Projection{
|
||||
Fields: []*pb.StructuredQuery_FieldReference{fref1("*"), fref1("/")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.Where("a", ">", 5),
|
||||
want: &pb.StructuredQuery{Where: aFilter},
|
||||
},
|
||||
{
|
||||
in: q.Where("a", ">", 5).Where("b", "<", "foo"),
|
||||
want: &pb.StructuredQuery{
|
||||
Where: &pb.StructuredQuery_Filter{
|
||||
&pb.StructuredQuery_Filter_CompositeFilter{
|
||||
&pb.StructuredQuery_CompositeFilter{
|
||||
Op: pb.StructuredQuery_CompositeFilter_AND,
|
||||
Filters: []*pb.StructuredQuery_Filter{
|
||||
aFilter, bFilter,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.WherePath([]string{"/", "*"}, ">", 5),
|
||||
want: &pb.StructuredQuery{Where: slashStarFilter},
|
||||
},
|
||||
{
|
||||
in: q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc),
|
||||
want: &pb.StructuredQuery{
|
||||
OrderBy: []*pb.StructuredQuery_Order{
|
||||
{fref1("b"), pb.StructuredQuery_ASCENDING},
|
||||
{fref1("a"), pb.StructuredQuery_DESCENDING},
|
||||
{fref1("~"), pb.StructuredQuery_ASCENDING},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.Offset(2).Limit(3),
|
||||
want: &pb.StructuredQuery{
|
||||
Offset: 2,
|
||||
Limit: &wrappers.Int32Value{3},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.Offset(2).Limit(3).Limit(4).Offset(5), // last wins
|
||||
want: &pb.StructuredQuery{
|
||||
Offset: 5,
|
||||
Limit: &wrappers.Int32Value{4},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9),
|
||||
want: &pb.StructuredQuery{
|
||||
OrderBy: []*pb.StructuredQuery_Order{
|
||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||
},
|
||||
StartAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(7)},
|
||||
Before: true,
|
||||
},
|
||||
EndAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(9)},
|
||||
Before: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9),
|
||||
want: &pb.StructuredQuery{
|
||||
OrderBy: []*pb.StructuredQuery_Order{
|
||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||
},
|
||||
StartAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(7)},
|
||||
Before: true,
|
||||
},
|
||||
EndAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(9)},
|
||||
Before: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.OrderBy("a", Asc).StartAfter(7).EndAt(9),
|
||||
want: &pb.StructuredQuery{
|
||||
OrderBy: []*pb.StructuredQuery_Order{
|
||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||
},
|
||||
StartAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(7)},
|
||||
Before: false,
|
||||
},
|
||||
EndAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(9)},
|
||||
Before: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10),
|
||||
want: &pb.StructuredQuery{
|
||||
OrderBy: []*pb.StructuredQuery_Order{
|
||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||
{fref1("b"), pb.StructuredQuery_DESCENDING},
|
||||
},
|
||||
StartAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(7), intval(8)},
|
||||
Before: false,
|
||||
},
|
||||
EndAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(9), intval(10)},
|
||||
Before: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// last of StartAt/After wins, same for End
|
||||
in: q.OrderBy("a", Asc).
|
||||
StartAfter(1).StartAt(2).
|
||||
EndAt(3).EndBefore(4),
|
||||
want: &pb.StructuredQuery{
|
||||
OrderBy: []*pb.StructuredQuery_Order{
|
||||
{fref1("a"), pb.StructuredQuery_ASCENDING},
|
||||
},
|
||||
StartAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(2)},
|
||||
Before: true,
|
||||
},
|
||||
EndAt: &pb.Cursor{
|
||||
Values: []*pb.Value{intval(4)},
|
||||
Before: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got, err := test.in.toProto()
|
||||
if err != nil {
|
||||
t.Fatalf("%+v: %v", test.in, err)
|
||||
}
|
||||
test.want.From = []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}}
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%+v: got\n%v\nwant\n%v", test.in, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fref1(s string) *pb.StructuredQuery_FieldReference {
|
||||
return fref([]string{s})
|
||||
}
|
||||
|
||||
func TestQueryToProtoErrors(t *testing.T) {
|
||||
q := (&Client{}).Collection("C").Query
|
||||
for _, query := range []Query{
|
||||
Query{}, // no collection ID
|
||||
q.Where("x", "!=", 1), // invalid operator
|
||||
q.Where("~", ">", 1), // invalid path
|
||||
q.WherePath([]string{"*", ""}, ">", 1), // invalid path
|
||||
q.StartAt(1), // no OrderBy
|
||||
q.StartAt(2).OrderBy("x", Asc).OrderBy("y", Desc), // wrong # OrderBy
|
||||
q.Select("*"), // invalid path
|
||||
q.SelectPaths([]string{"/", "", "~"}), // invalid path
|
||||
q.OrderBy("[", Asc), // invalid path
|
||||
q.OrderByPath([]string{""}, Desc), // invalid path
|
||||
} {
|
||||
_, err := query.toProto()
|
||||
if err == nil {
|
||||
t.Errorf("%+v: got nil, want error", query)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryMethodsDoNotModifyReceiver(t *testing.T) {
|
||||
var empty Query
|
||||
|
||||
q := Query{}
|
||||
_ = q.Select("a", "b")
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
|
||||
q = Query{}
|
||||
q1 := q.Where("a", ">", 3)
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
// Extra check because Where appends to a slice.
|
||||
q1before := q.Where("a", ">", 3) // same as q1
|
||||
_ = q1.Where("b", "<", "foo")
|
||||
if !testEqual(q1, q1before) {
|
||||
t.Errorf("got %+v, want %+v", q1, q1before)
|
||||
}
|
||||
|
||||
q = Query{}
|
||||
q1 = q.OrderBy("a", Asc)
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
// Extra check because Where appends to a slice.
|
||||
q1before = q.OrderBy("a", Asc) // same as q1
|
||||
_ = q1.OrderBy("b", Desc)
|
||||
if !testEqual(q1, q1before) {
|
||||
t.Errorf("got %+v, want %+v", q1, q1before)
|
||||
}
|
||||
|
||||
q = Query{}
|
||||
_ = q.Offset(5)
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
|
||||
q = Query{}
|
||||
_ = q.Limit(5)
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
|
||||
q = Query{}
|
||||
_ = q.StartAt(5)
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
|
||||
q = Query{}
|
||||
_ = q.StartAfter(5)
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
|
||||
q = Query{}
|
||||
_ = q.EndAt(5)
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
|
||||
q = Query{}
|
||||
_ = q.EndBefore(5)
|
||||
if !testEqual(q, empty) {
|
||||
t.Errorf("got %+v, want empty", q)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryFromCollectionRef(t *testing.T) {
|
||||
c := &Client{}
|
||||
coll := c.Collection("C")
|
||||
got := coll.Select("x").Offset(8)
|
||||
want := Query{
|
||||
c: c,
|
||||
parentPath: c.path(),
|
||||
collectionID: "C",
|
||||
selection: []FieldPath{{"x"}},
|
||||
offset: 8,
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
t.Fatalf("got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryGetAll(t *testing.T) {
|
||||
// This implicitly tests DocumentIterator as well.
|
||||
const dbPath = "projects/projectID/databases/(default)"
|
||||
ctx := context.Background()
|
||||
c, srv := newMock(t)
|
||||
docNames := []string{"C/a", "C/b"}
|
||||
wantPBDocs := []*pb.Document{
|
||||
{
|
||||
Name: dbPath + "/documents/" + docNames[0],
|
||||
CreateTime: aTimestamp,
|
||||
UpdateTime: aTimestamp,
|
||||
Fields: map[string]*pb.Value{"f": intval(2)},
|
||||
},
|
||||
{
|
||||
Name: dbPath + "/documents/" + docNames[1],
|
||||
CreateTime: aTimestamp2,
|
||||
UpdateTime: aTimestamp3,
|
||||
Fields: map[string]*pb.Value{"f": intval(1)},
|
||||
},
|
||||
}
|
||||
|
||||
srv.addRPC(nil, []interface{}{
|
||||
&pb.RunQueryResponse{Document: wantPBDocs[0]},
|
||||
&pb.RunQueryResponse{Document: wantPBDocs[1]},
|
||||
})
|
||||
gotDocs, err := c.Collection("C").Documents(ctx).GetAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := len(gotDocs), len(wantPBDocs); got != want {
|
||||
t.Errorf("got %d docs, wanted %d", got, want)
|
||||
}
|
||||
for i, got := range gotDocs {
|
||||
want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testEqual(got, want) {
|
||||
// avoid writing a cycle
|
||||
got.c = nil
|
||||
want.c = nil
|
||||
t.Errorf("#%d: got %+v, want %+v", i, pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
}
|
23
vendor/cloud.google.com/go/firestore/testdata/fieldpaths.json
generated
vendored
Normal file
23
vendor/cloud.google.com/go/firestore/testdata/fieldpaths.json
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"comment": "Field path test suite. Backslash must be escaped in JSON strings.",
|
||||
|
||||
"good": [
|
||||
["a", "a"],
|
||||
["__x__", "__x__"],
|
||||
["aBc0_d1", "aBc0_d1"],
|
||||
["a.b", "a", "b"],
|
||||
["`a`", "a"],
|
||||
["`a`.b", "a", "b"],
|
||||
["`a`.`b`.c", "a", "b", "c"],
|
||||
["`a.b`.c", "a.b", "c"],
|
||||
["`..`.`...`", "..", "..."],
|
||||
["` `", " "],
|
||||
["`\t\t`.` x\t`", "\t\t", " x\t"],
|
||||
["`\b\f\r`", "\b\f\r"],
|
||||
["`a\\`b`.`c\\\\d`", "a`b", "c\\d"],
|
||||
["`\\\\`.`\\`\\``", "\\", "``"]
|
||||
],
|
||||
"bad": ["", " ", "\t", "a.", ".a", "a.b.", "a..b",
|
||||
"`", "``", "`a", "a`", "a`b", "`a`b", "a`b`", "`a`.b`c`",
|
||||
"\\", "\\`"]
|
||||
}
|
256
vendor/cloud.google.com/go/firestore/to_value.go
generated
vendored
Normal file
256
vendor/cloud.google.com/go/firestore/to_value.go
generated
vendored
Normal file
@ -0,0 +1,256 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/fields"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"google.golang.org/genproto/googleapis/type/latlng"
|
||||
)
|
||||
|
||||
var nullValue = &pb.Value{&pb.Value_NullValue{}}
|
||||
|
||||
var (
|
||||
typeOfByteSlice = reflect.TypeOf([]byte{})
|
||||
typeOfGoTime = reflect.TypeOf(time.Time{})
|
||||
typeOfLatLng = reflect.TypeOf((*latlng.LatLng)(nil))
|
||||
typeOfDocumentRef = reflect.TypeOf((*DocumentRef)(nil))
|
||||
)
|
||||
|
||||
// toProtoValue converts a Go value to a Firestore Value protobuf.
|
||||
// Some corner cases:
|
||||
// - All nils (nil interface, nil slice, nil map, nil pointer) are converted to
|
||||
// a NullValue (not a nil *pb.Value). toProtoValue never returns (nil, nil).
|
||||
// - An error is returned for uintptr, uint and uint64, because Firestore uses
|
||||
// an int64 to represent integral values, and those types can't be properly
|
||||
// represented in an int64.
|
||||
// - An error is returned for the special Delete value.
|
||||
func toProtoValue(v reflect.Value) (*pb.Value, error) {
|
||||
if !v.IsValid() {
|
||||
return nullValue, nil
|
||||
}
|
||||
vi := v.Interface()
|
||||
if vi == Delete {
|
||||
return nil, errors.New("firestore: cannot use Delete in value")
|
||||
}
|
||||
switch x := vi.(type) {
|
||||
case []byte:
|
||||
return &pb.Value{&pb.Value_BytesValue{x}}, nil
|
||||
case time.Time:
|
||||
ts, err := ptypes.TimestampProto(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.Value{&pb.Value_TimestampValue{ts}}, nil
|
||||
case *latlng.LatLng:
|
||||
if x == nil {
|
||||
// gRPC doesn't like nil oneofs. Use NullValue.
|
||||
return nullValue, nil
|
||||
}
|
||||
return &pb.Value{&pb.Value_GeoPointValue{x}}, nil
|
||||
case *DocumentRef:
|
||||
if x == nil {
|
||||
// gRPC doesn't like nil oneofs. Use NullValue.
|
||||
return nullValue, nil
|
||||
}
|
||||
return &pb.Value{&pb.Value_ReferenceValue{x.Path}}, nil
|
||||
// Do not add bool, string, int, etc. to this switch; leave them in the
|
||||
// reflect-based switch below. Moving them here would drop support for
|
||||
// types whose underlying types are those primitives.
|
||||
// E.g. Given "type mybool bool", an ordinary type switch on bool will
|
||||
// not catch a mybool, but the reflect.Kind of a mybool is reflect.Bool.
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return &pb.Value{&pb.Value_BooleanValue{v.Bool()}}, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return &pb.Value{&pb.Value_IntegerValue{v.Int()}}, nil
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return &pb.Value{&pb.Value_IntegerValue{int64(v.Uint())}}, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return &pb.Value{&pb.Value_DoubleValue{v.Float()}}, nil
|
||||
case reflect.String:
|
||||
return &pb.Value{&pb.Value_StringValue{v.String()}}, nil
|
||||
case reflect.Slice:
|
||||
return sliceToProtoValue(v)
|
||||
case reflect.Map:
|
||||
return mapToProtoValue(v)
|
||||
case reflect.Struct:
|
||||
return structToProtoValue(v)
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return nullValue, nil
|
||||
}
|
||||
return toProtoValue(v.Elem())
|
||||
case reflect.Interface:
|
||||
if v.NumMethod() == 0 { // empty interface: recurse on its contents
|
||||
return toProtoValue(v.Elem())
|
||||
}
|
||||
fallthrough // any other interface value is an error
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("firestore: cannot convert type %s to value", v.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func sliceToProtoValue(v reflect.Value) (*pb.Value, error) {
|
||||
// A nil slice is converted to a null value.
|
||||
if v.IsNil() {
|
||||
return nullValue, nil
|
||||
}
|
||||
vals := make([]*pb.Value, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := toProtoValue(v.Index(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals[i] = val
|
||||
}
|
||||
return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{vals}}}, nil
|
||||
}
|
||||
|
||||
func mapToProtoValue(v reflect.Value) (*pb.Value, error) {
|
||||
if v.Type().Key().Kind() != reflect.String {
|
||||
return nil, errors.New("firestore: map key type must be string")
|
||||
}
|
||||
// A nil map is converted to a null value.
|
||||
if v.IsNil() {
|
||||
return nullValue, nil
|
||||
}
|
||||
m := map[string]*pb.Value{}
|
||||
for _, k := range v.MapKeys() {
|
||||
mi := v.MapIndex(k)
|
||||
if mi.Interface() == ServerTimestamp {
|
||||
continue
|
||||
}
|
||||
val, err := toProtoValue(mi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[k.String()] = val
|
||||
}
|
||||
return &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}, nil
|
||||
}
|
||||
|
||||
func structToProtoValue(v reflect.Value) (*pb.Value, error) {
|
||||
m := map[string]*pb.Value{}
|
||||
fields, err := fieldCache.Fields(v.Type())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range fields {
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
opts := f.ParsedTag.(tagOptions)
|
||||
if opts.serverTimestamp {
|
||||
continue
|
||||
}
|
||||
if opts.omitEmpty && isEmptyValue(fv) {
|
||||
continue
|
||||
}
|
||||
val, err := toProtoValue(fv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[f.Name] = val
|
||||
}
|
||||
return &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}, nil
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
omitEmpty bool // do not marshal value if empty
|
||||
serverTimestamp bool // set time.Time to server timestamp on write
|
||||
}
|
||||
|
||||
// parseTag interprets firestore struct field tags.
|
||||
func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
|
||||
name, keep, opts, err := parseStandardTag("firestore", t)
|
||||
if err != nil {
|
||||
return "", false, nil, fmt.Errorf("firestore: %v", err)
|
||||
}
|
||||
tagOpts := tagOptions{}
|
||||
for _, opt := range opts {
|
||||
switch opt {
|
||||
case "omitempty":
|
||||
tagOpts.omitEmpty = true
|
||||
case "serverTimestamp":
|
||||
tagOpts.serverTimestamp = true
|
||||
default:
|
||||
return "", false, nil, fmt.Errorf("firestore: unknown tag option: %q", opt)
|
||||
}
|
||||
}
|
||||
return name, keep, tagOpts, nil
|
||||
}
|
||||
|
||||
// parseStandardTag extracts the sub-tag named by key, then parses it using the
|
||||
// de facto standard format introduced in encoding/json:
|
||||
// "-" means "ignore this tag". It must occur by itself. (parseStandardTag returns an error
|
||||
// in this case, whereas encoding/json accepts the "-" even if it is not alone.)
|
||||
// "<name>" provides an alternative name for the field
|
||||
// "<name>,opt1,opt2,..." specifies options after the name.
|
||||
// The options are returned as a []string.
|
||||
//
|
||||
// TODO(jba): move this into the fields package, and use it elsewhere, like bigquery.
|
||||
func parseStandardTag(key string, t reflect.StructTag) (name string, keep bool, options []string, err error) {
|
||||
s := t.Get(key)
|
||||
parts := strings.Split(s, ",")
|
||||
if parts[0] == "-" {
|
||||
if len(parts) > 1 {
|
||||
return "", false, nil, errors.New(`"-" field tag with options`)
|
||||
}
|
||||
return "", false, nil, nil
|
||||
}
|
||||
return parts[0], true, parts[1:], nil
|
||||
}
|
||||
|
||||
// isLeafType determines whether or not a type is a 'leaf type'
|
||||
// and should not be recursed into, but considered one field.
|
||||
func isLeafType(t reflect.Type) bool {
|
||||
return t == typeOfGoTime || t == typeOfLatLng
|
||||
}
|
||||
|
||||
var fieldCache = fields.NewCache(parseTag, nil, isLeafType)
|
||||
|
||||
// isEmptyValue is taken from the encoding/json package in the
|
||||
// standard library.
|
||||
// TODO(jba): move to the fields package
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
if v.Type() == typeOfGoTime {
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
}
|
||||
return false
|
||||
}
|
219
vendor/cloud.google.com/go/firestore/to_value_test.go
generated
vendored
Normal file
219
vendor/cloud.google.com/go/firestore/to_value_test.go
generated
vendored
Normal file
@ -0,0 +1,219 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"google.golang.org/genproto/googleapis/type/latlng"
|
||||
)
|
||||
|
||||
type testStruct1 struct {
|
||||
B bool
|
||||
I int
|
||||
U uint32
|
||||
F float64
|
||||
S string
|
||||
Y []byte
|
||||
T time.Time
|
||||
G *latlng.LatLng
|
||||
L []int
|
||||
M map[string]int
|
||||
P *int
|
||||
}
|
||||
|
||||
var (
|
||||
p = new(int)
|
||||
|
||||
testVal1 = testStruct1{
|
||||
B: true,
|
||||
I: 1,
|
||||
U: 2,
|
||||
F: 3.0,
|
||||
S: "four",
|
||||
Y: []byte{5},
|
||||
T: tm,
|
||||
G: ll,
|
||||
L: []int{6},
|
||||
M: map[string]int{"a": 7},
|
||||
P: p,
|
||||
}
|
||||
|
||||
mapVal1 = mapval(map[string]*pb.Value{
|
||||
"B": boolval(true),
|
||||
"I": intval(1),
|
||||
"U": intval(2),
|
||||
"F": floatval(3),
|
||||
"S": &pb.Value{&pb.Value_StringValue{"four"}},
|
||||
"Y": bytesval([]byte{5}),
|
||||
"T": tsval(tm),
|
||||
"G": geoval(ll),
|
||||
"L": arrayval(intval(6)),
|
||||
"M": mapval(map[string]*pb.Value{"a": intval(7)}),
|
||||
"P": intval(8),
|
||||
})
|
||||
)
|
||||
|
||||
func TestToProtoValue(t *testing.T) {
|
||||
*p = 8
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want *pb.Value
|
||||
}{
|
||||
{nil, nullValue},
|
||||
{[]int(nil), nullValue},
|
||||
{map[string]int(nil), nullValue},
|
||||
{(*testStruct1)(nil), nullValue},
|
||||
{(*latlng.LatLng)(nil), nullValue},
|
||||
{(*DocumentRef)(nil), nullValue},
|
||||
{true, boolval(true)},
|
||||
{3, intval(3)},
|
||||
{uint32(3), intval(3)},
|
||||
{1.5, floatval(1.5)},
|
||||
{"str", strval("str")},
|
||||
{[]byte{1, 2}, bytesval([]byte{1, 2})},
|
||||
{tm, tsval(tm)},
|
||||
{ll, geoval(ll)},
|
||||
{[]int{1, 2}, arrayval(intval(1), intval(2))},
|
||||
{&[]int{1, 2}, arrayval(intval(1), intval(2))},
|
||||
{[]int{}, arrayval()},
|
||||
{map[string]int{"a": 1, "b": 2},
|
||||
mapval(map[string]*pb.Value{"a": intval(1), "b": intval(2)})},
|
||||
{map[string]int{}, mapval(map[string]*pb.Value{})},
|
||||
{p, intval(8)},
|
||||
{&p, intval(8)},
|
||||
{map[string]interface{}{"a": 1, "p": p, "s": "str"},
|
||||
mapval(map[string]*pb.Value{"a": intval(1), "p": intval(8), "s": strval("str")})},
|
||||
{map[string]fmt.Stringer{"a": tm},
|
||||
mapval(map[string]*pb.Value{"a": tsval(tm)})},
|
||||
{testVal1, mapVal1},
|
||||
{
|
||||
&DocumentRef{
|
||||
ID: "d",
|
||||
Path: "projects/P/databases/D/documents/c/d",
|
||||
Parent: &CollectionRef{
|
||||
ID: "c",
|
||||
parentPath: "projects/P/databases/D",
|
||||
Path: "projects/P/databases/D/documents/c",
|
||||
Query: Query{collectionID: "c", parentPath: "projects/P/databases/D"},
|
||||
},
|
||||
},
|
||||
refval("projects/P/databases/D/documents/c/d"),
|
||||
},
|
||||
} {
|
||||
got, err := toProtoValue(reflect.ValueOf(test.in))
|
||||
if err != nil {
|
||||
t.Errorf("%v (%T): %v", test.in, test.in, err)
|
||||
continue
|
||||
}
|
||||
if !testEqual(got, test.want) {
|
||||
t.Errorf("%+v (%T):\ngot\n%+v\nwant\n%+v", test.in, test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type stringy struct{}
|
||||
|
||||
func (stringy) String() string { return "stringy" }
|
||||
|
||||
func TestToProtoValueErrors(t *testing.T) {
|
||||
for _, in := range []interface{}{
|
||||
uint64(0), // a bad fit for int64
|
||||
map[int]bool{}, // map key type is not string
|
||||
make(chan int), // can't handle type
|
||||
map[string]fmt.Stringer{"a": stringy{}}, // only empty interfaces
|
||||
} {
|
||||
_, err := toProtoValue(reflect.ValueOf(in))
|
||||
if err == nil {
|
||||
t.Errorf("%v: got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testStruct2 struct {
|
||||
Ignore int `firestore:"-"`
|
||||
Rename int `firestore:"a"`
|
||||
OmitEmpty int `firestore:",omitempty"`
|
||||
OmitEmptyTime time.Time `firestore:",omitempty"`
|
||||
}
|
||||
|
||||
func TestToProtoValueTags(t *testing.T) {
|
||||
in := &testStruct2{
|
||||
Ignore: 1,
|
||||
Rename: 2,
|
||||
OmitEmpty: 3,
|
||||
OmitEmptyTime: aTime,
|
||||
}
|
||||
got, err := toProtoValue(reflect.ValueOf(in))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := mapval(map[string]*pb.Value{
|
||||
"a": intval(2),
|
||||
"OmitEmpty": intval(3),
|
||||
"OmitEmptyTime": tsval(aTime),
|
||||
})
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
|
||||
got, err = toProtoValue(reflect.ValueOf(testStruct2{}))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want = mapval(map[string]*pb.Value{"a": intval(0)})
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got\n%+v\nwant\n%+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToProtoValueEmbedded(t *testing.T) {
|
||||
// Embedded time.Time or LatLng should behave like non-embedded.
|
||||
type embed struct {
|
||||
time.Time
|
||||
*latlng.LatLng
|
||||
}
|
||||
|
||||
got, err := toProtoValue(reflect.ValueOf(embed{tm, ll}))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := mapval(map[string]*pb.Value{
|
||||
"Time": tsval(tm),
|
||||
"LatLng": geoval(ll),
|
||||
})
|
||||
if !testEqual(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsEmpty(t *testing.T) {
|
||||
for _, e := range []interface{}{int(0), float32(0), false, "", []int{}, []int(nil), (*int)(nil)} {
|
||||
if !isEmptyValue(reflect.ValueOf(e)) {
|
||||
t.Errorf("%v (%T): want true, got false", e, e)
|
||||
}
|
||||
}
|
||||
i := 3
|
||||
for _, n := range []interface{}{int(1), float32(1), true, "x", []int{1}, &i} {
|
||||
if isEmptyValue(reflect.ValueOf(n)) {
|
||||
t.Errorf("%v (%T): want false, got true", n, n)
|
||||
}
|
||||
}
|
||||
}
|
279
vendor/cloud.google.com/go/firestore/transaction.go
generated
vendored
Normal file
279
vendor/cloud.google.com/go/firestore/transaction.go
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// Transaction represents a Firestore transaction.
|
||||
type Transaction struct {
|
||||
c *Client
|
||||
ctx context.Context
|
||||
id []byte
|
||||
writes []*pb.Write
|
||||
maxAttempts int
|
||||
readOnly bool
|
||||
readAfterWrite bool
|
||||
}
|
||||
|
||||
// A TransactionOption is an option passed to Client.Transaction.
|
||||
type TransactionOption interface {
|
||||
config(t *Transaction)
|
||||
}
|
||||
|
||||
// MaxAttempts is a TransactionOption that configures the maximum number of times to
|
||||
// try a transaction. In defaults to DefaultTransactionMaxAttempts.
|
||||
func MaxAttempts(n int) maxAttempts { return maxAttempts(n) }
|
||||
|
||||
type maxAttempts int
|
||||
|
||||
func (m maxAttempts) config(t *Transaction) { t.maxAttempts = int(m) }
|
||||
|
||||
// DefaultTransactionMaxAttempts is the default number of times to attempt a transaction.
|
||||
const DefaultTransactionMaxAttempts = 5
|
||||
|
||||
// ReadOnly is a TransactionOption that makes the transaction read-only. Read-only
|
||||
// transactions cannot issue write operations, but are more efficient.
|
||||
var ReadOnly = ro{}
|
||||
|
||||
type ro struct{}
|
||||
|
||||
func (ro) config(t *Transaction) { t.readOnly = true }
|
||||
|
||||
var (
|
||||
// ErrConcurrentTransaction is returned when a transaction is rolled back due
|
||||
// to a conflict with a concurrent transaction.
|
||||
ErrConcurrentTransaction = errors.New("firestore: concurrent transaction")
|
||||
|
||||
// Defined here for testing.
|
||||
errReadAfterWrite = errors.New("firestore: read after write in transaction")
|
||||
errWriteReadOnly = errors.New("firestore: write in read-only transaction")
|
||||
errNonTransactionalOp = errors.New("firestore: non-transactional operation inside a transaction")
|
||||
errNestedTransaction = errors.New("firestore: nested transaction")
|
||||
)
|
||||
|
||||
type transactionInProgressKey struct{}
|
||||
|
||||
func checkTransaction(ctx context.Context) error {
|
||||
if ctx.Value(transactionInProgressKey{}) != nil {
|
||||
return errNonTransactionalOp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunTransaction runs f in a transaction. f should use the transaction it is given
|
||||
// for all Firestore operations. For any operation requiring a context, f should use
|
||||
// the context it is passed, not the first argument to RunTransaction.
|
||||
//
|
||||
// f must not call Commit or Rollback on the provided Transaction.
|
||||
//
|
||||
// If f returns nil, RunTransaction commits the transaction. If the commit fails due
|
||||
// to a conflicting transaction, RunTransaction retries f. It gives up and returns
|
||||
// ErrConcurrentTransaction after a number of attempts that can be configured with
|
||||
// the MaxAttempts option. If the commit succeeds, RunTransaction returns a nil error.
|
||||
//
|
||||
// If f returns non-nil, then the transaction will be rolled back and
|
||||
// this method will return the same error. The function f is not retried.
|
||||
//
|
||||
// Note that when f returns, the transaction is not committed. Calling code
|
||||
// must not assume that any of f's changes have been committed until
|
||||
// RunTransaction returns nil.
|
||||
//
|
||||
// Since f may be called more than once, f should usually be idempotent – that is, it
|
||||
// should have the same result when called multiple times.
|
||||
func (c *Client) RunTransaction(ctx context.Context, f func(context.Context, *Transaction) error, opts ...TransactionOption) error {
|
||||
if ctx.Value(transactionInProgressKey{}) != nil {
|
||||
return errNestedTransaction
|
||||
}
|
||||
db := c.path()
|
||||
t := &Transaction{
|
||||
c: c,
|
||||
ctx: withResourceHeader(ctx, db),
|
||||
maxAttempts: DefaultTransactionMaxAttempts,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.config(t)
|
||||
}
|
||||
var txOpts *pb.TransactionOptions
|
||||
if t.readOnly {
|
||||
txOpts = &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadOnly_{&pb.TransactionOptions_ReadOnly{}},
|
||||
}
|
||||
}
|
||||
var backoff gax.Backoff
|
||||
// TODO(jba): use other than the standard backoff parameters?
|
||||
// TODO(jba): get backoff time from gRPC trailer metadata? See extractRetryDelay in https://code.googlesource.com/gocloud/+/master/spanner/retry.go.
|
||||
var err error
|
||||
for i := 0; i < t.maxAttempts; i++ {
|
||||
var res *pb.BeginTransactionResponse
|
||||
res, err = t.c.c.BeginTransaction(t.ctx, &pb.BeginTransactionRequest{
|
||||
Database: db,
|
||||
Options: txOpts,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.id = res.Transaction
|
||||
err = f(context.WithValue(ctx, transactionInProgressKey{}, 1), t)
|
||||
// Read after write can only be checked client-side, so we make sure to check
|
||||
// even if the user does not.
|
||||
if err == nil && t.readAfterWrite {
|
||||
err = errReadAfterWrite
|
||||
}
|
||||
if err != nil {
|
||||
t.rollback()
|
||||
// Prefer f's returned error to rollback error.
|
||||
return err
|
||||
}
|
||||
_, err = t.c.c.Commit(t.ctx, &pb.CommitRequest{
|
||||
Database: t.c.path(),
|
||||
Writes: t.writes,
|
||||
Transaction: t.id,
|
||||
})
|
||||
// If a read-write transaction returns Aborted, retry.
|
||||
// On success or other failures, return here.
|
||||
if t.readOnly || grpc.Code(err) != codes.Aborted {
|
||||
// According to the Firestore team, we should not roll back here
|
||||
// if err != nil. But spanner does.
|
||||
// See https://code.googlesource.com/gocloud/+/master/spanner/transaction.go#740.
|
||||
return err
|
||||
}
|
||||
|
||||
if txOpts == nil {
|
||||
// txOpts can only be nil if is the first retry of a read-write transaction.
|
||||
// (It is only set here and in the body of "if t.readOnly" above.)
|
||||
// Mention the transaction ID in BeginTransaction so the service
|
||||
// knows it is a retry.
|
||||
txOpts = &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadWrite_{
|
||||
&pb.TransactionOptions_ReadWrite{RetryTransaction: t.id},
|
||||
},
|
||||
}
|
||||
}
|
||||
// Use exponential backoff to avoid contention with other running
|
||||
// transactions.
|
||||
if cerr := gax.Sleep(ctx, backoff.Pause()); cerr != nil {
|
||||
err = cerr
|
||||
break
|
||||
}
|
||||
}
|
||||
// If we run out of retries, return the last error we saw (which should
|
||||
// be the Aborted from Commit, or a context error).
|
||||
if err != nil {
|
||||
t.rollback()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *Transaction) rollback() {
|
||||
_ = t.c.c.Rollback(t.ctx, &pb.RollbackRequest{
|
||||
Database: t.c.path(),
|
||||
Transaction: t.id,
|
||||
})
|
||||
// Ignore the rollback error.
|
||||
// TODO(jba): Log it?
|
||||
// Note: Rollback is idempotent so it will be retried by the gapic layer.
|
||||
}
|
||||
|
||||
// Get gets the document in the context of the transaction.
|
||||
func (t *Transaction) Get(dr *DocumentRef) (*DocumentSnapshot, error) {
|
||||
if len(t.writes) > 0 {
|
||||
t.readAfterWrite = true
|
||||
return nil, errReadAfterWrite
|
||||
}
|
||||
docProto, err := t.c.c.GetDocument(t.ctx, &pb.GetDocumentRequest{
|
||||
Name: dr.Path,
|
||||
ConsistencySelector: &pb.GetDocumentRequest_Transaction{t.id},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newDocumentSnapshot(dr, docProto, t.c)
|
||||
}
|
||||
|
||||
// A Queryer is a Query or a CollectionRef. CollectionRefs act as queries whose
|
||||
// results are all the documents in the collection.
|
||||
type Queryer interface {
|
||||
query() *Query
|
||||
}
|
||||
|
||||
// Documents returns a DocumentIterator based on given Query or CollectionRef. The
|
||||
// results will be in the context of the transaction.
|
||||
func (t *Transaction) Documents(q Queryer) *DocumentIterator {
|
||||
if len(t.writes) > 0 {
|
||||
t.readAfterWrite = true
|
||||
return &DocumentIterator{err: errReadAfterWrite}
|
||||
}
|
||||
return &DocumentIterator{
|
||||
ctx: t.ctx,
|
||||
q: q.query(),
|
||||
tid: t.id,
|
||||
}
|
||||
}
|
||||
|
||||
// Create adds a Create operation to the Transaction.
|
||||
// See DocumentRef.Create for details.
|
||||
func (t *Transaction) Create(dr *DocumentRef, data interface{}) error {
|
||||
return t.addWrites(dr.newReplaceWrites(data, nil, Exists(false)))
|
||||
}
|
||||
|
||||
// Set adds a Set operation to the Transaction.
|
||||
// See DocumentRef.Set for details.
|
||||
func (t *Transaction) Set(dr *DocumentRef, data interface{}, opts ...SetOption) error {
|
||||
return t.addWrites(dr.newReplaceWrites(data, opts, nil))
|
||||
}
|
||||
|
||||
// Delete adds a Delete operation to the Transaction.
|
||||
// See DocumentRef.Delete for details.
|
||||
func (t *Transaction) Delete(dr *DocumentRef, opts ...Precondition) error {
|
||||
return t.addWrites(dr.newDeleteWrites(opts))
|
||||
}
|
||||
|
||||
// UpdateMap adds a new Update operation to the Transaction.
|
||||
// See DocumentRef.UpdateMap for details.
|
||||
func (t *Transaction) UpdateMap(dr *DocumentRef, data map[string]interface{}, opts ...Precondition) error {
|
||||
return t.addWrites(dr.newUpdateMapWrites(data, opts))
|
||||
}
|
||||
|
||||
// UpdateStruct adds a new Update operation to the Transaction.
|
||||
// See DocumentRef.UpdateStruct for details.
|
||||
func (t *Transaction) UpdateStruct(dr *DocumentRef, fieldPaths []string, data interface{}, opts ...Precondition) error {
|
||||
return t.addWrites(dr.newUpdateStructWrites(fieldPaths, data, opts))
|
||||
}
|
||||
|
||||
// UpdatePaths adds a new Update operation to the Transaction.
|
||||
// See DocumentRef.UpdatePaths for details.
|
||||
func (t *Transaction) UpdatePaths(dr *DocumentRef, data []FieldPathUpdate, opts ...Precondition) error {
|
||||
return t.addWrites(dr.newUpdatePathWrites(data, opts))
|
||||
}
|
||||
|
||||
func (t *Transaction) addWrites(ws []*pb.Write, err error) error {
|
||||
if t.readOnly {
|
||||
return errWriteReadOnly
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.writes = append(t.writes, ws...)
|
||||
return nil
|
||||
}
|
346
vendor/cloud.google.com/go/firestore/transaction_test.go
generated
vendored
Normal file
346
vendor/cloud.google.com/go/firestore/transaction_test.go
generated
vendored
Normal file
@ -0,0 +1,346 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"testing"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func TestRunTransaction(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
const db = "projects/projectID/databases/(default)"
|
||||
tid := []byte{1}
|
||||
c, srv := newMock(t)
|
||||
beginReq := &pb.BeginTransactionRequest{Database: db}
|
||||
beginRes := &pb.BeginTransactionResponse{Transaction: tid}
|
||||
commitReq := &pb.CommitRequest{Database: db, Transaction: tid}
|
||||
// Empty transaction.
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp})
|
||||
err := c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil })
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Transaction with read and write.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
aDoc := &pb.Document{
|
||||
Name: db + "/documents/C/a",
|
||||
CreateTime: aTimestamp,
|
||||
UpdateTime: aTimestamp2,
|
||||
Fields: map[string]*pb.Value{"count": intval(1)},
|
||||
}
|
||||
srv.addRPC(
|
||||
&pb.GetDocumentRequest{
|
||||
Name: db + "/documents/C/a",
|
||||
ConsistencySelector: &pb.GetDocumentRequest_Transaction{tid},
|
||||
},
|
||||
aDoc,
|
||||
)
|
||||
aDoc2 := &pb.Document{
|
||||
Name: aDoc.Name,
|
||||
Fields: map[string]*pb.Value{"count": intval(2)},
|
||||
}
|
||||
srv.addRPC(
|
||||
&pb.CommitRequest{
|
||||
Database: db,
|
||||
Transaction: tid,
|
||||
Writes: []*pb.Write{{
|
||||
Operation: &pb.Write_Update{aDoc2},
|
||||
UpdateMask: &pb.DocumentMask{FieldPaths: []string{"count"}},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{true},
|
||||
},
|
||||
}},
|
||||
},
|
||||
&pb.CommitResponse{CommitTime: aTimestamp3},
|
||||
)
|
||||
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
|
||||
docref := c.Collection("C").Doc("a")
|
||||
doc, err := tx.Get(docref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
count, err := doc.DataAt("count")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tx.UpdateMap(docref, map[string]interface{}{"count": count.(int64) + 1})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Query
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(
|
||||
&pb.RunQueryRequest{
|
||||
Parent: db,
|
||||
QueryType: &pb.RunQueryRequest_StructuredQuery{
|
||||
&pb.StructuredQuery{
|
||||
From: []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}},
|
||||
},
|
||||
},
|
||||
ConsistencySelector: &pb.RunQueryRequest_Transaction{tid},
|
||||
},
|
||||
[]interface{}{},
|
||||
)
|
||||
srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp3})
|
||||
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
|
||||
it := tx.Documents(c.Collection("C"))
|
||||
_, err := it.Next()
|
||||
if err != iterator.Done {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Retry entire transaction.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, ""))
|
||||
srv.addRPC(
|
||||
&pb.BeginTransactionRequest{
|
||||
Database: db,
|
||||
Options: &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadWrite_{
|
||||
&pb.TransactionOptions_ReadWrite{tid},
|
||||
},
|
||||
},
|
||||
},
|
||||
beginRes,
|
||||
)
|
||||
srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp})
|
||||
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { return nil })
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransactionErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
const db = "projects/projectID/databases/(default)"
|
||||
c, srv := newMock(t)
|
||||
var (
|
||||
tid = []byte{1}
|
||||
internalErr = grpc.Errorf(codes.Internal, "so sad")
|
||||
beginReq = &pb.BeginTransactionRequest{
|
||||
Database: db,
|
||||
}
|
||||
beginRes = &pb.BeginTransactionResponse{Transaction: tid}
|
||||
getReq = &pb.GetDocumentRequest{
|
||||
Name: db + "/documents/C/a",
|
||||
ConsistencySelector: &pb.GetDocumentRequest_Transaction{tid},
|
||||
}
|
||||
rollbackReq = &pb.RollbackRequest{Database: db, Transaction: tid}
|
||||
commitReq = &pb.CommitRequest{Database: db, Transaction: tid}
|
||||
)
|
||||
|
||||
// BeginTransaction has a permanent error.
|
||||
srv.addRPC(beginReq, internalErr)
|
||||
err := c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil })
|
||||
if grpc.Code(err) != codes.Internal {
|
||||
t.Errorf("got <%v>, want Internal", err)
|
||||
}
|
||||
|
||||
// Get has a permanent error.
|
||||
get := func(_ context.Context, tx *Transaction) error {
|
||||
_, err := tx.Get(c.Doc("C/a"))
|
||||
return err
|
||||
}
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(getReq, internalErr)
|
||||
srv.addRPC(rollbackReq, &empty.Empty{})
|
||||
err = c.RunTransaction(ctx, get)
|
||||
if grpc.Code(err) != codes.Internal {
|
||||
t.Errorf("got <%v>, want Internal", err)
|
||||
}
|
||||
|
||||
// Get has a permanent error, but the rollback fails. We still
|
||||
// return Get's error.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(getReq, internalErr)
|
||||
srv.addRPC(rollbackReq, grpc.Errorf(codes.FailedPrecondition, ""))
|
||||
err = c.RunTransaction(ctx, get)
|
||||
if grpc.Code(err) != codes.Internal {
|
||||
t.Errorf("got <%v>, want Internal", err)
|
||||
}
|
||||
|
||||
// Commit has a permanent error.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(getReq, &pb.Document{
|
||||
Name: "projects/projectID/databases/(default)/documents/C/a",
|
||||
CreateTime: aTimestamp,
|
||||
UpdateTime: aTimestamp2,
|
||||
})
|
||||
srv.addRPC(commitReq, internalErr)
|
||||
err = c.RunTransaction(ctx, get)
|
||||
if grpc.Code(err) != codes.Internal {
|
||||
t.Errorf("got <%v>, want Internal", err)
|
||||
}
|
||||
|
||||
// Read after write.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(rollbackReq, &empty.Empty{})
|
||||
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
|
||||
tx.Delete(c.Doc("C/a"))
|
||||
if _, err := tx.Get(c.Doc("C/a")); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != errReadAfterWrite {
|
||||
t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite)
|
||||
}
|
||||
|
||||
// Read after write, with query.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(rollbackReq, &empty.Empty{})
|
||||
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
|
||||
tx.Delete(c.Doc("C/a"))
|
||||
it := tx.Documents(c.Collection("C").Select("x"))
|
||||
if _, err := it.Next(); err != iterator.Done {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != errReadAfterWrite {
|
||||
t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite)
|
||||
}
|
||||
|
||||
// Read after write fails even if the user ignores the read's error.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(rollbackReq, &empty.Empty{})
|
||||
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
|
||||
tx.Delete(c.Doc("C/a"))
|
||||
tx.Get(c.Doc("C/a"))
|
||||
return nil
|
||||
})
|
||||
if err != errReadAfterWrite {
|
||||
t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite)
|
||||
}
|
||||
|
||||
// Write in read-only transaction.
|
||||
srv.reset()
|
||||
srv.addRPC(
|
||||
&pb.BeginTransactionRequest{
|
||||
Database: db,
|
||||
Options: &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadOnly_{&pb.TransactionOptions_ReadOnly{}},
|
||||
},
|
||||
},
|
||||
beginRes,
|
||||
)
|
||||
srv.addRPC(rollbackReq, &empty.Empty{})
|
||||
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
|
||||
return tx.Delete(c.Doc("C/a"))
|
||||
}, ReadOnly)
|
||||
if err != errWriteReadOnly {
|
||||
t.Errorf("got <%v>, want <%v>", err, errWriteReadOnly)
|
||||
}
|
||||
|
||||
// Too many retries.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, ""))
|
||||
srv.addRPC(
|
||||
&pb.BeginTransactionRequest{
|
||||
Database: db,
|
||||
Options: &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadWrite_{
|
||||
&pb.TransactionOptions_ReadWrite{tid},
|
||||
},
|
||||
},
|
||||
},
|
||||
beginRes,
|
||||
)
|
||||
srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, ""))
|
||||
srv.addRPC(rollbackReq, &empty.Empty{})
|
||||
err = c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil },
|
||||
MaxAttempts(2))
|
||||
if grpc.Code(err) != codes.Aborted {
|
||||
t.Errorf("got <%v>, want Aborted", err)
|
||||
}
|
||||
|
||||
// Nested transaction.
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(rollbackReq, &empty.Empty{})
|
||||
err = c.RunTransaction(ctx, func(ctx context.Context, tx *Transaction) error {
|
||||
return c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil })
|
||||
})
|
||||
if got, want := err, errNestedTransaction; got != want {
|
||||
t.Errorf("got <%v>, want <%V>", got, want)
|
||||
}
|
||||
|
||||
// Non-transactional operation.
|
||||
dr := c.Doc("C/d")
|
||||
|
||||
for i, op := range []func(ctx context.Context) error{
|
||||
func(ctx context.Context) error { _, err := c.GetAll(ctx, []*DocumentRef{dr}); return err },
|
||||
func(ctx context.Context) error { _, _, err := c.Collection("C").Add(ctx, testData); return err },
|
||||
func(ctx context.Context) error { _, err := dr.Get(ctx); return err },
|
||||
func(ctx context.Context) error { _, err := dr.Create(ctx, testData); return err },
|
||||
func(ctx context.Context) error { _, err := dr.Set(ctx, testData); return err },
|
||||
func(ctx context.Context) error { _, err := dr.Delete(ctx); return err },
|
||||
func(ctx context.Context) error { _, err := dr.UpdateMap(ctx, testData); return err },
|
||||
func(ctx context.Context) error {
|
||||
_, err := dr.UpdateStruct(ctx, []string{"x"}, struct{}{})
|
||||
return err
|
||||
},
|
||||
func(ctx context.Context) error {
|
||||
_, err := dr.UpdatePaths(ctx, []FieldPathUpdate{{Path: []string{"*"}, Value: 1}})
|
||||
return err
|
||||
},
|
||||
func(ctx context.Context) error { it := c.Collections(ctx); _, err := it.Next(); return err },
|
||||
func(ctx context.Context) error { it := dr.Collections(ctx); _, err := it.Next(); return err },
|
||||
func(ctx context.Context) error { _, err := c.Batch().Commit(ctx); return err },
|
||||
func(ctx context.Context) error {
|
||||
it := c.Collection("C").Documents(ctx)
|
||||
_, err := it.Next()
|
||||
return err
|
||||
},
|
||||
} {
|
||||
srv.reset()
|
||||
srv.addRPC(beginReq, beginRes)
|
||||
srv.addRPC(rollbackReq, &empty.Empty{})
|
||||
err = c.RunTransaction(ctx, func(ctx context.Context, _ *Transaction) error {
|
||||
return op(ctx)
|
||||
})
|
||||
if got, want := err, errNonTransactionalOp; got != want {
|
||||
t.Errorf("#%d: got <%v>, want <%v>", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
147
vendor/cloud.google.com/go/firestore/util_test.go
generated
vendored
Normal file
147
vendor/cloud.google.com/go/firestore/util_test.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
tspb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/genproto/googleapis/type/latlng"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
aTime = time.Date(2017, 1, 26, 0, 0, 0, 0, time.UTC)
|
||||
aTime2 = time.Date(2017, 2, 5, 0, 0, 0, 0, time.UTC)
|
||||
aTime3 = time.Date(2017, 3, 20, 0, 0, 0, 0, time.UTC)
|
||||
aTimestamp = mustTimestampProto(aTime)
|
||||
aTimestamp2 = mustTimestampProto(aTime2)
|
||||
aTimestamp3 = mustTimestampProto(aTime3)
|
||||
)
|
||||
|
||||
func mustTimestampProto(t time.Time) *tspb.Timestamp {
|
||||
ts, err := ptypes.TimestampProto(t)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// testEqual implements equality for Firestore tests.
|
||||
func testEqual(a, b interface{}) bool {
|
||||
switch a := a.(type) {
|
||||
case time.Time:
|
||||
return a.Equal(b.(time.Time))
|
||||
case proto.Message:
|
||||
return proto.Equal(a, b.(proto.Message))
|
||||
case *DocumentSnapshot:
|
||||
return a.equal(b.(*DocumentSnapshot))
|
||||
case *DocumentRef:
|
||||
return a.equal(b.(*DocumentRef))
|
||||
case *CollectionRef:
|
||||
return a.equal(b.(*CollectionRef))
|
||||
default:
|
||||
return reflect.DeepEqual(a, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestEqual(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
a, b interface{}
|
||||
want bool
|
||||
}{
|
||||
{nil, nil, true},
|
||||
{([]int)(nil), nil, false},
|
||||
{nil, ([]int)(nil), false},
|
||||
{([]int)(nil), ([]int)(nil), true},
|
||||
} {
|
||||
if got := testEqual(test.a, test.b); got != test.want {
|
||||
t.Errorf("testEqual(%#v, %#v) == %t, want %t", test.a, test.b, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newMock(t *testing.T) (*Client, *mockServer) {
|
||||
srv, err := newMockServer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return client, srv
|
||||
}
|
||||
|
||||
func intval(i int) *pb.Value {
|
||||
return &pb.Value{&pb.Value_IntegerValue{int64(i)}}
|
||||
}
|
||||
|
||||
func boolval(b bool) *pb.Value {
|
||||
return &pb.Value{&pb.Value_BooleanValue{b}}
|
||||
}
|
||||
|
||||
func floatval(f float64) *pb.Value {
|
||||
return &pb.Value{&pb.Value_DoubleValue{f}}
|
||||
}
|
||||
|
||||
func strval(s string) *pb.Value {
|
||||
return &pb.Value{&pb.Value_StringValue{s}}
|
||||
}
|
||||
|
||||
func bytesval(b []byte) *pb.Value {
|
||||
return &pb.Value{&pb.Value_BytesValue{b}}
|
||||
}
|
||||
|
||||
func tsval(t time.Time) *pb.Value {
|
||||
ts, err := ptypes.TimestampProto(t)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("bad time %s in test: %v", t, err))
|
||||
}
|
||||
return &pb.Value{&pb.Value_TimestampValue{ts}}
|
||||
}
|
||||
|
||||
func geoval(ll *latlng.LatLng) *pb.Value {
|
||||
return &pb.Value{&pb.Value_GeoPointValue{ll}}
|
||||
}
|
||||
|
||||
func arrayval(s ...*pb.Value) *pb.Value {
|
||||
if s == nil {
|
||||
s = []*pb.Value{}
|
||||
}
|
||||
return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{s}}}
|
||||
}
|
||||
|
||||
func mapval(m map[string]*pb.Value) *pb.Value {
|
||||
return &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}}
|
||||
}
|
||||
|
||||
func refval(path string) *pb.Value {
|
||||
return &pb.Value{&pb.Value_ReferenceValue{path}}
|
||||
}
|
113
vendor/cloud.google.com/go/firestore/writebatch.go
generated
vendored
Normal file
113
vendor/cloud.google.com/go/firestore/writebatch.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// A WriteBatch holds multiple database updates. Build a batch with the Create, Set,
|
||||
// Update and Delete methods, then run it with the Commit method. Errors in Create,
|
||||
// Set, Update or Delete are recorded instead of being returned immediately. The
|
||||
// first such error is returned by Commit.
|
||||
type WriteBatch struct {
|
||||
c *Client
|
||||
err error
|
||||
writes []*pb.Write
|
||||
}
|
||||
|
||||
func (b *WriteBatch) add(ws []*pb.Write, err error) *WriteBatch {
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
if err != nil {
|
||||
b.err = err
|
||||
return b
|
||||
}
|
||||
b.writes = append(b.writes, ws...)
|
||||
return b
|
||||
}
|
||||
|
||||
// Create adds a Create operation to the batch.
|
||||
// See DocumentRef.Create for details.
|
||||
func (b *WriteBatch) Create(dr *DocumentRef, data interface{}) *WriteBatch {
|
||||
return b.add(dr.newReplaceWrites(data, nil, Exists(false)))
|
||||
}
|
||||
|
||||
// Set adds a Set operation to the batch.
|
||||
// See DocumentRef.Set for details.
|
||||
func (b *WriteBatch) Set(dr *DocumentRef, data interface{}, opts ...SetOption) *WriteBatch {
|
||||
return b.add(dr.newReplaceWrites(data, opts, nil))
|
||||
}
|
||||
|
||||
// Delete adds a Delete operation to the batch.
|
||||
// See DocumentRef.Delete for details.
|
||||
func (b *WriteBatch) Delete(dr *DocumentRef, opts ...Precondition) *WriteBatch {
|
||||
return b.add(dr.newDeleteWrites(opts))
|
||||
}
|
||||
|
||||
// UpdateMap adds an UpdateMap operation to the batch.
|
||||
// See DocumentRef.UpdateMap for details.
|
||||
func (b *WriteBatch) UpdateMap(dr *DocumentRef, data map[string]interface{}, opts ...Precondition) *WriteBatch {
|
||||
return b.add(dr.newUpdateMapWrites(data, opts))
|
||||
}
|
||||
|
||||
// UpdateStruct adds an UpdateStruct operation to the batch.
|
||||
// See DocumentRef.UpdateStruct for details.
|
||||
func (b *WriteBatch) UpdateStruct(dr *DocumentRef, fieldPaths []string, data interface{}, opts ...Precondition) *WriteBatch {
|
||||
return b.add(dr.newUpdateStructWrites(fieldPaths, data, opts))
|
||||
}
|
||||
|
||||
// UpdatePaths adds an UpdatePaths operation to the batch.
|
||||
// See DocumentRef.UpdatePaths for details.
|
||||
func (b *WriteBatch) UpdatePaths(dr *DocumentRef, data []FieldPathUpdate, opts ...Precondition) *WriteBatch {
|
||||
return b.add(dr.newUpdatePathWrites(data, opts))
|
||||
}
|
||||
|
||||
// Commit applies all the writes in the batch to the database atomically. Commit
|
||||
// returns an error if there are no writes in the batch, if any errors occurred in
|
||||
// constructing the writes, or if the Commmit operation fails.
|
||||
func (b *WriteBatch) Commit(ctx context.Context) ([]*WriteResult, error) {
|
||||
if err := checkTransaction(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.err != nil {
|
||||
return nil, b.err
|
||||
}
|
||||
if len(b.writes) == 0 {
|
||||
return nil, errors.New("firestore: cannot commit empty WriteBatch")
|
||||
}
|
||||
db := b.c.path()
|
||||
res, err := b.c.c.Commit(withResourceHeader(ctx, db), &pb.CommitRequest{
|
||||
Database: db,
|
||||
Writes: b.writes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var wrs []*WriteResult
|
||||
for _, pwr := range res.WriteResults {
|
||||
wr, err := writeResultFromProto(pwr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wrs = append(wrs, wr)
|
||||
}
|
||||
return wrs, nil
|
||||
}
|
145
vendor/cloud.google.com/go/firestore/writebatch_test.go
generated
vendored
Normal file
145
vendor/cloud.google.com/go/firestore/writebatch_test.go
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package firestore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestWriteBatch(t *testing.T) {
|
||||
type update struct{ A int }
|
||||
|
||||
c, srv := newMock(t)
|
||||
docPrefix := c.Collection("C").Path + "/"
|
||||
srv.addRPC(
|
||||
&pb.CommitRequest{
|
||||
Database: c.path(),
|
||||
Writes: []*pb.Write{
|
||||
{ // Create
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: docPrefix + "a",
|
||||
Fields: testFields,
|
||||
},
|
||||
},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{false},
|
||||
},
|
||||
},
|
||||
{ // Set
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: docPrefix + "b",
|
||||
Fields: testFields,
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // Delete
|
||||
Operation: &pb.Write_Delete{
|
||||
Delete: docPrefix + "c",
|
||||
},
|
||||
},
|
||||
{ // UpdateMap
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: docPrefix + "d",
|
||||
Fields: testFields,
|
||||
},
|
||||
},
|
||||
UpdateMask: &pb.DocumentMask{[]string{"a"}},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{true},
|
||||
},
|
||||
},
|
||||
{ // UpdateStruct
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: docPrefix + "e",
|
||||
Fields: map[string]*pb.Value{"A": intval(3)},
|
||||
},
|
||||
},
|
||||
UpdateMask: &pb.DocumentMask{[]string{"A"}},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{true},
|
||||
},
|
||||
},
|
||||
{ // UpdatePaths
|
||||
Operation: &pb.Write_Update{
|
||||
Update: &pb.Document{
|
||||
Name: docPrefix + "f",
|
||||
Fields: map[string]*pb.Value{"*": intval(3)},
|
||||
},
|
||||
},
|
||||
UpdateMask: &pb.DocumentMask{[]string{"`*`"}},
|
||||
CurrentDocument: &pb.Precondition{
|
||||
ConditionType: &pb.Precondition_Exists{true},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&pb.CommitResponse{
|
||||
WriteResults: []*pb.WriteResult{
|
||||
{UpdateTime: aTimestamp},
|
||||
{UpdateTime: aTimestamp2},
|
||||
{UpdateTime: aTimestamp3},
|
||||
},
|
||||
},
|
||||
)
|
||||
gotWRs, err := c.Batch().
|
||||
Create(c.Doc("C/a"), testData).
|
||||
Set(c.Doc("C/b"), testData).
|
||||
Delete(c.Doc("C/c")).
|
||||
UpdateMap(c.Doc("C/d"), testData).
|
||||
UpdateStruct(c.Doc("C/e"), []string{"A"}, update{A: 3}).
|
||||
UpdatePaths(c.Doc("C/f"), []FieldPathUpdate{{Path: []string{"*"}, Value: 3}}).
|
||||
Commit(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantWRs := []*WriteResult{{aTime}, {aTime2}, {aTime3}}
|
||||
if !testEqual(gotWRs, wantWRs) {
|
||||
t.Errorf("got %+v\nwant %+v", gotWRs, wantWRs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteBatchErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c, _ := newMock(t)
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
batch *WriteBatch
|
||||
}{
|
||||
{
|
||||
"empty batch",
|
||||
c.Batch(),
|
||||
},
|
||||
{
|
||||
"bad doc reference",
|
||||
c.Batch().Create(c.Doc("a"), testData),
|
||||
},
|
||||
{
|
||||
"bad data",
|
||||
c.Batch().Create(c.Doc("a/b"), 3),
|
||||
},
|
||||
} {
|
||||
if _, err := test.batch.Commit(ctx); err == nil {
|
||||
t.Errorf("%s: got nil, want error", test.desc)
|
||||
}
|
||||
}
|
||||
}
|
24
vendor/cloud.google.com/go/internal/testutil/context.go
generated
vendored
24
vendor/cloud.google.com/go/internal/testutil/context.go
generated
vendored
@ -33,20 +33,24 @@ const (
|
||||
// ProjID returns the project ID to use in integration tests, or the empty
|
||||
// string if none is configured.
|
||||
func ProjID() string {
|
||||
projID := os.Getenv(envProjID)
|
||||
if projID == "" {
|
||||
return ""
|
||||
}
|
||||
return projID
|
||||
return os.Getenv(envProjID)
|
||||
}
|
||||
|
||||
// TokenSource returns the OAuth2 token source to use in integration tests,
|
||||
// or nil if none is configured. If the environment variable is unset,
|
||||
// TokenSource will try to find 'Application Default Credentials'. Else,
|
||||
// TokenSource will return nil.
|
||||
// TokenSource will log.Fatal if the token source is specified but missing or invalid.
|
||||
// or nil if none is configured. It uses the standard environment variable
|
||||
// for tests in this repo.
|
||||
func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource {
|
||||
key := os.Getenv(envPrivateKey)
|
||||
return TokenSourceEnv(ctx, envPrivateKey, scopes...)
|
||||
}
|
||||
|
||||
// TokenSourceEnv returns the OAuth2 token source to use in integration tests. or nil
|
||||
// if none is configured. It tries to get credentials from the filename in the
|
||||
// environment variable envVar. If the environment variable is unset, TokenSourceEnv
|
||||
// will try to find 'Application Default Credentials'. Else, TokenSourceEnv will
|
||||
// return nil. TokenSourceEnv will log.Fatal if the token source is specified but
|
||||
// missing or invalid.
|
||||
func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2.TokenSource {
|
||||
key := os.Getenv(envVar)
|
||||
if key == "" { // Try for application default credentials.
|
||||
ts, err := google.DefaultTokenSource(ctx, scopes...)
|
||||
if err != nil {
|
||||
|
35
vendor/cloud.google.com/go/old-news.md
generated
vendored
35
vendor/cloud.google.com/go/old-news.md
generated
vendored
@ -1,3 +1,38 @@
|
||||
_July 31, 2017_
|
||||
|
||||
*v0.11.0*
|
||||
|
||||
- Clients for spanner, pubsub and video are now in beta.
|
||||
|
||||
- New client for DLP.
|
||||
|
||||
- spanner: performance and testing improvements.
|
||||
|
||||
- storage: requester-pays buckets are supported.
|
||||
|
||||
- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
|
||||
|
||||
- pubsub: bug fixes and other minor improvements
|
||||
|
||||
_June 17, 2017_
|
||||
|
||||
|
||||
*v0.10.0*
|
||||
|
||||
- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
|
||||
|
||||
- pubsub: Subscription.Receive now runs concurrently for higher throughput.
|
||||
|
||||
- vision: cloud.google.com/go/vision is deprecated. Use
|
||||
cloud.google.com/go/vision/apiv1 instead.
|
||||
|
||||
- translation: now stable.
|
||||
|
||||
- trace: several changes to the surface. See the link below.
|
||||
|
||||
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
|
||||
|
||||
|
||||
_March 17, 2017_
|
||||
|
||||
Breaking Pubsub changes.
|
||||
|
79
vendor/cloud.google.com/go/profiler/busybench/busybench.go
generated
vendored
Normal file
79
vendor/cloud.google.com/go/profiler/busybench/busybench.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cloud.google.com/go/profiler"
|
||||
"compress/gzip"
|
||||
"flag"
|
||||
"log"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
var service = flag.String("service", "", "service name")
|
||||
|
||||
const duration = time.Minute * 10
|
||||
|
||||
// busywork continuously generates 1MiB of random data and compresses it
|
||||
// throwing away the result.
|
||||
func busywork() {
|
||||
ticker := time.NewTicker(duration)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
return
|
||||
default:
|
||||
busyworkOnce()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func busyworkOnce() {
|
||||
data := make([]byte, 1024*1024)
|
||||
rand.Read(data)
|
||||
|
||||
var b bytes.Buffer
|
||||
gz := gzip.NewWriter(&b)
|
||||
if _, err := gz.Write(data); err != nil {
|
||||
log.Printf("Failed to write to gzip stream: %v", err)
|
||||
return
|
||||
}
|
||||
if err := gz.Flush(); err != nil {
|
||||
log.Printf("Failed to flush to gzip stream: %v", err)
|
||||
return
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
log.Printf("Failed to close gzip stream: %v", err)
|
||||
}
|
||||
// Throw away the result.
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *service == "" {
|
||||
log.Print("Service name must be configured using --service flag.")
|
||||
} else if err := profiler.Start(profiler.Config{Service: *service, DebugLogging: true}); err != nil {
|
||||
log.Printf("Failed to start the profiler: %v", err)
|
||||
} else {
|
||||
busywork()
|
||||
}
|
||||
|
||||
log.Printf("busybench finished profiling.")
|
||||
select {}
|
||||
}
|
52
vendor/cloud.google.com/go/profiler/integration-test.sh
generated
vendored
Normal file
52
vendor/cloud.google.com/go/profiler/integration-test.sh
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Fail on any error.
|
||||
set -eo pipefail
|
||||
|
||||
# Display commands being run.
|
||||
set -x
|
||||
|
||||
cd git/gocloud
|
||||
|
||||
# Run test only if profiler directory is touched.
|
||||
profiler_test=false
|
||||
for f in $(git diff-tree --no-commit-id --name-only -r HEAD); do
|
||||
if [[ "$(dirname $f)" == "profiler" ]]; then
|
||||
profiler_test=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$profiler_test" = false ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
COMMIT=$(git rev-parse HEAD)
|
||||
|
||||
# Set $GOPATH
|
||||
export GOPATH="$HOME/go"
|
||||
GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go
|
||||
mkdir -p $GOCLOUD_HOME
|
||||
|
||||
# Move code into $GOPATH and get dependencies
|
||||
cp -R ./* $GOCLOUD_HOME
|
||||
cd $GOCLOUD_HOME
|
||||
go get -v ./...
|
||||
|
||||
cd internal/kokoro
|
||||
# Don't print out encryption keys, etc
|
||||
set +x
|
||||
key=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_key")
|
||||
iv=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_iv")
|
||||
pass=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_pass")
|
||||
|
||||
openssl aes-256-cbc -K $key -iv $iv -pass pass:$pass -in kokoro-key.json.enc -out key.json -d
|
||||
set -x
|
||||
|
||||
export GOOGLE_APPLICATION_CREDENTIALS="$(pwd)/key.json"
|
||||
export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
|
||||
export GCLOUD_TESTS_GOLANG_ZONE="us-west1-a"
|
||||
export GCLOUD_TESTS_GOLANG_BUCKET="dulcet-port-762-go-cloud-profiler-test"
|
||||
|
||||
cd $GOCLOUD_HOME/profiler
|
||||
go get -t -tags=integration .
|
||||
go test -timeout=60m -parallel=5 -tags=integration -run TestAgentIntegration -commit="$COMMIT"
|
690
vendor/cloud.google.com/go/profiler/integration_test.go
generated
vendored
Normal file
690
vendor/cloud.google.com/go/profiler/integration_test.go
generated
vendored
Normal file
@ -0,0 +1,690 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build integration,go1.7
|
||||
|
||||
package profiler
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"golang.org/x/build/kubernetes"
|
||||
k8sapi "golang.org/x/build/kubernetes/api"
|
||||
"golang.org/x/build/kubernetes/gke"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/google"
|
||||
cloudbuild "google.golang.org/api/cloudbuild/v1"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
container "google.golang.org/api/container/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
var (
|
||||
commit = flag.String("commit", "", "git commit to test")
|
||||
runID = time.Now().Unix()
|
||||
)
|
||||
|
||||
const (
|
||||
cloudScope = "https://www.googleapis.com/auth/cloud-platform"
|
||||
monitorWriteScope = "https://www.googleapis.com/auth/monitoring.write"
|
||||
storageReadScope = "https://www.googleapis.com/auth/devstorage.read_only"
|
||||
// benchFinishString should keep in sync with the finish string in busybench.
|
||||
benchFinishString = "busybench finished profiling"
|
||||
)
|
||||
|
||||
const startupTemplate = `
|
||||
#! /bin/bash
|
||||
|
||||
# Fail on any error.
|
||||
set -eo pipefail
|
||||
|
||||
# Display commands being run.
|
||||
set -x
|
||||
|
||||
# Install git
|
||||
sudo apt-get update
|
||||
sudo apt-get -y -q install git-all
|
||||
|
||||
# Install desired Go version
|
||||
mkdir -p /tmp/bin
|
||||
curl -sL -o /tmp/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
|
||||
chmod +x /tmp/bin/gimme
|
||||
export PATH=$PATH:/tmp/bin
|
||||
|
||||
eval "$(gimme {{.GoVersion}})"
|
||||
|
||||
# Set $GOPATH
|
||||
export GOPATH="$HOME/go"
|
||||
|
||||
export GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go
|
||||
mkdir -p $GOCLOUD_HOME
|
||||
|
||||
# Install agent
|
||||
git clone https://code.googlesource.com/gocloud $GOCLOUD_HOME
|
||||
|
||||
cd $GOCLOUD_HOME
|
||||
git reset --hard {{.Commit}}
|
||||
go get -v ./...
|
||||
|
||||
# Run benchmark with agent
|
||||
go run profiler/busybench/busybench.go --service="{{.Service}}"
|
||||
`
|
||||
|
||||
const dockerfileFmt = `FROM golang
|
||||
RUN git clone https://code.googlesource.com/gocloud /go/src/cloud.google.com/go \
|
||||
&& cd /go/src/cloud.google.com/go && git reset --hard %s \
|
||||
&& go get -v cloud.google.com/go/... && go install -v cloud.google.com/go/profiler/busybench
|
||||
CMD ["busybench", "--service", "%s"]
|
||||
`
|
||||
|
||||
type testRunner struct {
|
||||
client *http.Client
|
||||
startupTemplate *template.Template
|
||||
containerService *container.Service
|
||||
computeService *compute.Service
|
||||
storageClient *storage.Client
|
||||
}
|
||||
|
||||
type profileResponse struct {
|
||||
Profile profileData `json:"profile"`
|
||||
NumProfiles int32 `json:"numProfiles"`
|
||||
Deployments []interface{} `json:"deployments"`
|
||||
}
|
||||
|
||||
type profileData struct {
|
||||
Samples []int32 `json:"samples"`
|
||||
SampleMetrics interface{} `json:"sampleMetrics"`
|
||||
DefaultMetricType string `json:"defaultMetricType"`
|
||||
TreeNodes interface{} `json:"treeNodes"`
|
||||
Functions functionArray `json:"functions"`
|
||||
SourceFiles interface{} `json:"sourceFiles"`
|
||||
}
|
||||
|
||||
type functionArray struct {
|
||||
Name []string `json:"name"`
|
||||
Sourcefile []int32 `json:"sourceFile"`
|
||||
}
|
||||
|
||||
func validateProfileData(rawData []byte, wantFunctionName string) error {
|
||||
var pr profileResponse
|
||||
if err := json.Unmarshal(rawData, &pr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pr.NumProfiles == 0 {
|
||||
return fmt.Errorf("profile response contains zero profiles: %v", pr)
|
||||
}
|
||||
|
||||
if len(pr.Deployments) == 0 {
|
||||
return fmt.Errorf("profile response contains zero deployments: %v", pr)
|
||||
}
|
||||
|
||||
if len(pr.Profile.Functions.Name) == 0 {
|
||||
return fmt.Errorf("profile does not have function data")
|
||||
}
|
||||
|
||||
for _, name := range pr.Profile.Functions.Name {
|
||||
if strings.Contains(name, wantFunctionName) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("wanted function name %s not found in profile", wantFunctionName)
|
||||
}
|
||||
|
||||
type instanceConfig struct {
|
||||
name string
|
||||
service string
|
||||
goVersion string
|
||||
}
|
||||
|
||||
func newInstanceConfigs() []instanceConfig {
|
||||
return []instanceConfig{
|
||||
{
|
||||
name: fmt.Sprintf("profiler-test-go19-%d", runID),
|
||||
service: fmt.Sprintf("profiler-test-go19-%d-gce", runID),
|
||||
goVersion: "1.9",
|
||||
},
|
||||
{
|
||||
name: fmt.Sprintf("profiler-test-go18-%d", runID),
|
||||
service: fmt.Sprintf("profiler-test-go18-%d-gce", runID),
|
||||
goVersion: "1.8",
|
||||
},
|
||||
{
|
||||
name: fmt.Sprintf("profiler-test-go17-%d", runID),
|
||||
service: fmt.Sprintf("profiler-test-go17-%d-gce", runID),
|
||||
goVersion: "1.7",
|
||||
},
|
||||
{
|
||||
name: fmt.Sprintf("profiler-test-go16-%d", runID),
|
||||
service: fmt.Sprintf("profiler-test-go16-%d-gce", runID),
|
||||
goVersion: "1.6",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type clusterConfig struct {
|
||||
clusterName string
|
||||
podName string
|
||||
imageSourceName string
|
||||
imageName string
|
||||
service string
|
||||
}
|
||||
|
||||
func newClusterConfig(projectID string) clusterConfig {
|
||||
return clusterConfig{
|
||||
clusterName: fmt.Sprintf("profiler-test-cluster-%d", runID),
|
||||
podName: fmt.Sprintf("profiler-test-pod-%d", runID),
|
||||
imageSourceName: fmt.Sprintf("profiler-test/%d/Dockerfile.zip", runID),
|
||||
imageName: fmt.Sprintf("%s/profiler-test-%d", projectID, runID),
|
||||
service: fmt.Sprintf("profiler-test-%d-gke", runID),
|
||||
}
|
||||
}
|
||||
|
||||
func renderStartupScript(template *template.Template, inst instanceConfig) (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := template.Execute(&buf,
|
||||
struct {
|
||||
Service string
|
||||
GoVersion string
|
||||
Commit string
|
||||
}{
|
||||
Service: inst.service,
|
||||
GoVersion: inst.goVersion,
|
||||
Commit: *commit,
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to render startup script for %s: %v", inst.name, err)
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func (tr *testRunner) startInstance(ctx context.Context, inst instanceConfig, projectID, zone string) error {
|
||||
img, err := tr.computeService.Images.GetFromFamily("debian-cloud", "debian-9").Context(ctx).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startupScript, err := renderStartupScript(tr.startupTemplate, inst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tr.computeService.Instances.Insert(projectID, zone, &compute.Instance{
|
||||
MachineType: fmt.Sprintf("zones/%s/machineTypes/n1-standard-1", zone),
|
||||
Name: inst.name,
|
||||
Disks: []*compute.AttachedDisk{{
|
||||
AutoDelete: true, // delete the disk when the VM is deleted.
|
||||
Boot: true,
|
||||
Type: "PERSISTENT",
|
||||
Mode: "READ_WRITE",
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: img.SelfLink,
|
||||
DiskType: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/pd-standard", projectID, zone),
|
||||
},
|
||||
}},
|
||||
NetworkInterfaces: []*compute.NetworkInterface{{
|
||||
Network: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/default", projectID),
|
||||
AccessConfigs: []*compute.AccessConfig{{
|
||||
Name: "External NAT",
|
||||
}},
|
||||
}},
|
||||
Metadata: &compute.Metadata{
|
||||
Items: []*compute.MetadataItems{{
|
||||
Key: "startup-script",
|
||||
Value: googleapi.String(startupScript),
|
||||
}},
|
||||
},
|
||||
ServiceAccounts: []*compute.ServiceAccount{{
|
||||
Email: "default",
|
||||
Scopes: []string{
|
||||
monitorWriteScope,
|
||||
},
|
||||
}},
|
||||
}).Do()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (tr *testRunner) pollForSerialOutput(ctx context.Context, projectID, zone, instanceName string) error {
|
||||
var output string
|
||||
defer func() {
|
||||
log.Printf("Serial port output for %s:\n%s", instanceName, output)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("timed out waiting for profiling finishing on instance %s", instanceName)
|
||||
|
||||
case <-time.After(20 * time.Second):
|
||||
resp, err := tr.computeService.Instances.GetSerialPortOutput(projectID, zone, instanceName).Context(ctx).Do()
|
||||
if err != nil {
|
||||
// Transient failure.
|
||||
log.Printf("Transient error getting serial port output from instance %s (will retry): %v", instanceName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if output = resp.Contents; strings.Contains(output, benchFinishString) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *testRunner) queryAndCheckProfile(service, startTime, endTime, profileType, projectID string) error {
|
||||
queryURL := fmt.Sprintf("https://cloudprofiler.googleapis.com/v2/projects/%s/profiles:query", projectID)
|
||||
const queryJsonFmt = `{"endTime": "%s", "profileType": "%s","startTime": "%s", "target": "%s"}`
|
||||
|
||||
queryRequest := fmt.Sprintf(queryJsonFmt, endTime, profileType, startTime, service)
|
||||
|
||||
resp, err := tr.client.Post(queryURL, "application/json", strings.NewReader(queryRequest))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query API: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
if err := validateProfileData(body, "busywork"); err != nil {
|
||||
return fmt.Errorf("failed to validate profile %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *testRunner) runTestOnGCE(ctx context.Context, t *testing.T, inst instanceConfig, projectID, zone string) {
|
||||
if err := tr.startInstance(ctx, inst, projectID, zone); err != nil {
|
||||
t.Fatalf("startInstance(%s) got error: %v", inst.name, err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := tr.computeService.Instances.Delete(projectID, zone, inst.name).Context(ctx).Do(); err != nil {
|
||||
t.Errorf("Instances.Delete(%s) got error: %v", inst.name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*25)
|
||||
defer cancel()
|
||||
if err := tr.pollForSerialOutput(timeoutCtx, projectID, zone, inst.name); err != nil {
|
||||
t.Fatalf("pollForSerialOutput(%s) got error: %v", inst.name, err)
|
||||
}
|
||||
|
||||
timeNow := time.Now()
|
||||
endTime := timeNow.Format(time.RFC3339)
|
||||
startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
for _, pType := range []string{"CPU", "HEAP"} {
|
||||
if err := tr.queryAndCheckProfile(inst.service, startTime, endTime, pType, projectID); err != nil {
|
||||
t.Errorf("queryAndCheckProfile(%s, %s, %s, %s) got error: %v", inst.service, startTime, endTime, pType, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// createAndPublishDockerImage creates a docker image from source code in a GCS
|
||||
// bucket and pushes the image to Google Container Registry.
|
||||
func (tr *testRunner) createAndPublishDockerImage(ctx context.Context, projectID, sourceBucket, sourceObject, imageName string) error {
|
||||
cloudbuildService, err := cloudbuild.New(tr.client)
|
||||
|
||||
build := &cloudbuild.Build{
|
||||
Source: &cloudbuild.Source{
|
||||
StorageSource: &cloudbuild.StorageSource{
|
||||
Bucket: sourceBucket,
|
||||
Object: sourceObject,
|
||||
},
|
||||
},
|
||||
Steps: []*cloudbuild.BuildStep{
|
||||
{
|
||||
Name: "gcr.io/cloud-builders/docker",
|
||||
Args: []string{"build", "-t", imageName, "."},
|
||||
},
|
||||
},
|
||||
Images: []string{imageName},
|
||||
}
|
||||
|
||||
op, err := cloudbuildService.Projects.Builds.Create(projectID, build).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create image: %v", err)
|
||||
}
|
||||
opID := op.Name
|
||||
|
||||
// Wait for creating image.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("timed out waiting creating image")
|
||||
|
||||
case <-time.After(10 * time.Second):
|
||||
op, err := cloudbuildService.Operations.Get(opID).Context(ctx).Do()
|
||||
if err != nil {
|
||||
log.Printf("Transient error getting operation (will retry): %v", err)
|
||||
break
|
||||
}
|
||||
if op.Done == true {
|
||||
log.Printf("Published image %s to Google Container Registry.", imageName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type imageResponse struct {
|
||||
Manifest map[string]interface{} `json:"manifest"`
|
||||
Name string `json:"name"`
|
||||
Tags []string `json:"tags"`
|
||||
}
|
||||
|
||||
// deleteDockerImage deletes a docker image from Google Container Registry.
|
||||
func (tr *testRunner) deleteDockerImage(ctx context.Context, imageName string) []error {
|
||||
queryImageURL := fmt.Sprintf("https://gcr.io/v2/%s/tags/list", imageName)
|
||||
resp, err := tr.client.Get(queryImageURL)
|
||||
if err != nil {
|
||||
return []error{fmt.Errorf("failed to list tags: %v", err)}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return []error{err}
|
||||
}
|
||||
var ir imageResponse
|
||||
if err := json.Unmarshal(body, &ir); err != nil {
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
const deleteImageURLFmt = "https://gcr.io/v2/%s/manifests/%s"
|
||||
var errs []error
|
||||
for _, tag := range ir.Tags {
|
||||
if err := deleteDockerImageResource(tr.client, fmt.Sprintf(deleteImageURLFmt, imageName, tag)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete tag %s: %v", tag, err))
|
||||
}
|
||||
}
|
||||
|
||||
for manifest := range ir.Manifest {
|
||||
if err := deleteDockerImageResource(tr.client, fmt.Sprintf(deleteImageURLFmt, imageName, manifest)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete manifest %s: %v", manifest, err))
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func deleteDockerImageResource(client *http.Client, url string) error {
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get request: %v", err)
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete resource: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
|
||||
return fmt.Errorf("failed to delete resource: status code = %d", resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *testRunner) createCluster(ctx context.Context, client *http.Client, projectID, zone, clusterName string) error {
|
||||
request := &container.CreateClusterRequest{Cluster: &container.Cluster{
|
||||
Name: clusterName,
|
||||
InitialNodeCount: 3,
|
||||
NodeConfig: &container.NodeConfig{
|
||||
OauthScopes: []string{
|
||||
storageReadScope,
|
||||
},
|
||||
},
|
||||
}}
|
||||
op, err := tr.containerService.Projects.Zones.Clusters.Create(projectID, zone, request).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cluster %s: %v", clusterName, err)
|
||||
}
|
||||
opID := op.Name
|
||||
|
||||
// Wait for creating cluster.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("timed out waiting creating cluster")
|
||||
|
||||
case <-time.After(10 * time.Second):
|
||||
op, err := tr.containerService.Projects.Zones.Operations.Get(projectID, zone, opID).Context(ctx).Do()
|
||||
if err != nil {
|
||||
log.Printf("Transient error getting operation (will retry): %v", err)
|
||||
break
|
||||
}
|
||||
if op.Status == "DONE" {
|
||||
log.Printf("Created cluster %s.", clusterName)
|
||||
return nil
|
||||
}
|
||||
if op.Status == "ABORTING" {
|
||||
return fmt.Errorf("create cluster operation is aborted")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *testRunner) deployContainer(ctx context.Context, kubernetesClient *kubernetes.Client, podName, imageName string) error {
|
||||
pod := &k8sapi.Pod{
|
||||
ObjectMeta: k8sapi.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: k8sapi.PodSpec{
|
||||
Containers: []k8sapi.Container{
|
||||
{
|
||||
Name: "profiler-test",
|
||||
Image: fmt.Sprintf("gcr.io/%s:latest", imageName),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := kubernetesClient.RunLongLivedPod(ctx, pod); err != nil {
|
||||
return fmt.Errorf("failed to run pod %s: %v", podName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *testRunner) pollPodLog(ctx context.Context, kubernetesClient *kubernetes.Client, podName string) error {
|
||||
var output string
|
||||
defer func() {
|
||||
log.Printf("Log for pod %s:\n%s", podName, output)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("timed out waiting profiling finishing on container")
|
||||
|
||||
case <-time.After(20 * time.Second):
|
||||
var err error
|
||||
output, err = kubernetesClient.PodLog(ctx, podName)
|
||||
if err != nil {
|
||||
// Transient failure.
|
||||
log.Printf("Transient error getting log (will retry): %v", err)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(output, benchFinishString) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tr *testRunner) runTestOnGKE(ctx context.Context, t *testing.T, cfg clusterConfig, projectID, zone, bucket string) {
|
||||
if err := tr.uploadImageSource(ctx, bucket, cfg.imageSourceName, *commit, cfg.service); err != nil {
|
||||
t.Fatalf("uploadImageSource() got error: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := tr.storageClient.Bucket(bucket).Object(cfg.imageSourceName).Delete(ctx); err != nil {
|
||||
t.Errorf("Bucket(%s).Object(%s).Delete() got error: %v", bucket, cfg.imageSourceName, err)
|
||||
}
|
||||
}()
|
||||
|
||||
createImageCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
if err := tr.createAndPublishDockerImage(createImageCtx, projectID, bucket, cfg.imageSourceName, fmt.Sprintf("gcr.io/%s", cfg.imageName)); err != nil {
|
||||
t.Fatalf("createAndPublishDockerImage(%s) got error: %v", cfg.imageName, err)
|
||||
}
|
||||
defer func() {
|
||||
for _, err := range tr.deleteDockerImage(ctx, cfg.imageName) {
|
||||
t.Errorf("deleteDockerImage(%s) got error: %v", cfg.imageName, err)
|
||||
}
|
||||
}()
|
||||
|
||||
createClusterCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
if err := tr.createCluster(createClusterCtx, tr.client, projectID, zone, cfg.clusterName); err != nil {
|
||||
t.Fatalf("createCluster(%s) got error: %v", cfg.clusterName, err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := tr.containerService.Projects.Zones.Clusters.Delete(projectID, zone, cfg.clusterName).Context(ctx).Do(); err != nil {
|
||||
t.Errorf("Clusters.Delete(%s) got error: %v", cfg.clusterName, err)
|
||||
}
|
||||
}()
|
||||
|
||||
kubernetesClient, err := gke.NewClient(ctx, cfg.clusterName, gke.OptZone(zone), gke.OptProject(projectID))
|
||||
if err != nil {
|
||||
t.Fatalf("gke.NewClient() got error: %v", err)
|
||||
}
|
||||
|
||||
deployContainerCtx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
if err := tr.deployContainer(deployContainerCtx, kubernetesClient, cfg.podName, cfg.imageName); err != nil {
|
||||
t.Fatalf("deployContainer(%s, %s) got error: %v", cfg.podName, cfg.imageName, err)
|
||||
}
|
||||
|
||||
pollLogCtx, cancel := context.WithTimeout(ctx, 20*time.Minute)
|
||||
defer cancel()
|
||||
if err := tr.pollPodLog(pollLogCtx, kubernetesClient, cfg.podName); err != nil {
|
||||
t.Fatalf("pollPodLog(%s) got error: %v", cfg.podName, err)
|
||||
}
|
||||
|
||||
timeNow := time.Now()
|
||||
endTime := timeNow.Format(time.RFC3339)
|
||||
startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339)
|
||||
for _, pType := range []string{"CPU", "HEAP"} {
|
||||
if err := tr.queryAndCheckProfile(cfg.service, startTime, endTime, pType, projectID); err != nil {
|
||||
t.Errorf("queryAndCheckProfile(%s, %s, %s, %s) got error: %v", cfg.service, startTime, endTime, pType, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// uploadImageSource uploads source code for building docker image to GCS.
|
||||
func (tr *testRunner) uploadImageSource(ctx context.Context, bucket, objectName, commit, service string) error {
|
||||
zipBuf := new(bytes.Buffer)
|
||||
z := zip.NewWriter(zipBuf)
|
||||
f, err := z.Create("Dockerfile")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dockerfile := fmt.Sprintf(dockerfileFmt, commit, service)
|
||||
if _, err := f.Write([]byte(dockerfile)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := z.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
wc := tr.storageClient.Bucket(bucket).Object(objectName).NewWriter(ctx)
|
||||
wc.ContentType = "application/zip"
|
||||
wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}}
|
||||
if _, err := wc.Write(zipBuf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
return wc.Close()
|
||||
}
|
||||
|
||||
func TestAgentIntegration(t *testing.T) {
|
||||
projectID := os.Getenv("GCLOUD_TESTS_GOLANG_PROJECT_ID")
|
||||
if projectID == "" {
|
||||
t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_PROJECT_ID) got empty string")
|
||||
}
|
||||
|
||||
zone := os.Getenv("GCLOUD_TESTS_GOLANG_ZONE")
|
||||
if zone == "" {
|
||||
t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_ZONE) got empty string")
|
||||
}
|
||||
|
||||
bucket := os.Getenv("GCLOUD_TESTS_GOLANG_BUCKET")
|
||||
if bucket == "" {
|
||||
t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_BUCKET) got empty string")
|
||||
}
|
||||
|
||||
if *commit == "" {
|
||||
t.Fatal("commit flag is not set")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
client, err := google.DefaultClient(ctx, cloudScope)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get default client: %v", err)
|
||||
}
|
||||
|
||||
storageClient, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("storage.NewClient() error: %v", err)
|
||||
}
|
||||
|
||||
computeService, err := compute.New(client)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to initialize compute service: %v", err)
|
||||
}
|
||||
|
||||
containerService, err := container.New(client)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create container client: %v", err)
|
||||
}
|
||||
|
||||
template, err := template.New("startupScript").Parse(startupTemplate)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse startup script template: %v", err)
|
||||
}
|
||||
tr := testRunner{
|
||||
computeService: computeService,
|
||||
client: client,
|
||||
startupTemplate: template,
|
||||
containerService: containerService,
|
||||
storageClient: storageClient,
|
||||
}
|
||||
|
||||
cluster := newClusterConfig(projectID)
|
||||
t.Run(cluster.service, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tr.runTestOnGKE(ctx, t, cluster, projectID, zone, bucket)
|
||||
})
|
||||
|
||||
instances := newInstanceConfigs()
|
||||
for _, instance := range instances {
|
||||
inst := instance // capture range variable
|
||||
t.Run(inst.service, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tr.runTestOnGCE(ctx, t, inst, projectID, zone)
|
||||
})
|
||||
}
|
||||
}
|
40
vendor/cloud.google.com/go/profiler/profiler_test.go
generated
vendored
40
vendor/cloud.google.com/go/profiler/profiler_test.go
generated
vendored
@ -52,7 +52,6 @@ const (
|
||||
testSvcVersion = "test-service-version"
|
||||
testProfileDuration = time.Second * 10
|
||||
testServerTimeout = time.Second * 15
|
||||
wantFunctionName = "profilee"
|
||||
)
|
||||
|
||||
func createTestDeployment() *pb.Deployment {
|
||||
@ -570,8 +569,7 @@ func TestInitializeConfig(t *testing.T) {
|
||||
type fakeProfilerServer struct {
|
||||
pb.ProfilerServiceServer
|
||||
count int
|
||||
gotCPUProfile []byte
|
||||
gotHeapProfile []byte
|
||||
gotProfiles map[string][]byte
|
||||
done chan bool
|
||||
}
|
||||
|
||||
@ -590,9 +588,9 @@ func (fs *fakeProfilerServer) CreateProfile(ctx context.Context, in *pb.CreatePr
|
||||
func (fs *fakeProfilerServer) UpdateProfile(ctx context.Context, in *pb.UpdateProfileRequest) (*pb.Profile, error) {
|
||||
switch in.Profile.ProfileType {
|
||||
case pb.ProfileType_CPU:
|
||||
fs.gotCPUProfile = in.Profile.ProfileBytes
|
||||
fs.gotProfiles["CPU"] = in.Profile.ProfileBytes
|
||||
case pb.ProfileType_HEAP:
|
||||
fs.gotHeapProfile = in.Profile.ProfileBytes
|
||||
fs.gotProfiles["HEAP"] = in.Profile.ProfileBytes
|
||||
fs.done <- true
|
||||
}
|
||||
|
||||
@ -629,16 +627,7 @@ func profileeWork() {
|
||||
}
|
||||
}
|
||||
|
||||
func checkSymbolization(p *profile.Profile) error {
|
||||
for _, l := range p.Location {
|
||||
if len(l.Line) > 0 && l.Line[0].Function != nil && strings.Contains(l.Line[0].Function.Name, wantFunctionName) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("want function name %v not found in profile", wantFunctionName)
|
||||
}
|
||||
|
||||
func validateProfile(rawData []byte) error {
|
||||
func validateProfile(rawData []byte, wantFunctionName string) error {
|
||||
p, err := profile.ParseData(rawData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ParseData failed: %v", err)
|
||||
@ -656,11 +645,13 @@ func validateProfile(rawData []byte) error {
|
||||
return fmt.Errorf("profile contains zero functions: %v", p)
|
||||
}
|
||||
|
||||
if err := checkSymbolization(p); err != nil {
|
||||
return fmt.Errorf("checkSymbolization failed: %v for %v", err, p)
|
||||
}
|
||||
for _, l := range p.Location {
|
||||
if len(l.Line) > 0 && l.Line[0].Function != nil && strings.Contains(l.Line[0].Function.Name, wantFunctionName) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("wanted function name %s not found in the profile", wantFunctionName)
|
||||
}
|
||||
|
||||
func TestAgentWithServer(t *testing.T) {
|
||||
oldDialGRPC, oldConfig := dialGRPC, config
|
||||
@ -672,7 +663,7 @@ func TestAgentWithServer(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("testutil.NewServer(): %v", err)
|
||||
}
|
||||
fakeServer := &fakeProfilerServer{done: make(chan bool)}
|
||||
fakeServer := &fakeProfilerServer{gotProfiles: map[string][]byte{}, done: make(chan bool)}
|
||||
pb.RegisterProfilerServiceServer(srv.Gsrv, fakeServer)
|
||||
|
||||
srv.Start()
|
||||
@ -698,10 +689,11 @@ func TestAgentWithServer(t *testing.T) {
|
||||
}
|
||||
quitProfilee <- true
|
||||
|
||||
if err := validateProfile(fakeServer.gotCPUProfile); err != nil {
|
||||
t.Errorf("validateProfile(gotCPUProfile): %v", err)
|
||||
}
|
||||
if err := validateProfile(fakeServer.gotHeapProfile); err != nil {
|
||||
t.Errorf("validateProfile(gotHeapProfile): %v", err)
|
||||
for _, pType := range []string{"CPU", "HEAP"} {
|
||||
if profile, ok := fakeServer.gotProfiles[pType]; !ok {
|
||||
t.Errorf("fakeServer.gotProfiles[%s] got no profile, want profile", pType)
|
||||
} else if err := validateProfile(profile, "profilee"); err != nil {
|
||||
t.Errorf("validateProfile(%s) got error: %v", pType, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
vendor/github.com/Azure/azure-sdk-for-go/.travis.yml
generated
vendored
1
vendor/github.com/Azure/azure-sdk-for-go/.travis.yml
generated
vendored
@ -2,6 +2,7 @@ sudo: false
|
||||
|
||||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
|
||||
|
11
vendor/github.com/Azure/azure-sdk-for-go/CHANGELOG.md
generated
vendored
11
vendor/github.com/Azure/azure-sdk-for-go/CHANGELOG.md
generated
vendored
@ -1,5 +1,16 @@
|
||||
# CHANGELOG
|
||||
|
||||
## `v11.1.0-beta`
|
||||
|
||||
### ARM
|
||||
|
||||
- trafficmanager and containerregistry SDKs now reflect the services faithfully
|
||||
- trafficmanager also has a new operation group: user metrics.
|
||||
|
||||
### Generated code notes
|
||||
- [Azure REST API specs](https://github.com/Azure/azure-rest-api-specs) commit: c97a18ed775029207715b09c80761334724740b9
|
||||
- [AutoRest Go Generator](https://github.com/Azure/autorest.go) commit: 5d984152f2e9cff6878ea5060bed7e8d8a2ae1cc
|
||||
|
||||
## `v11.0.0-beta`
|
||||
|
||||
### ARM
|
||||
|
27
vendor/github.com/Azure/azure-sdk-for-go/README.md
generated
vendored
27
vendor/github.com/Azure/azure-sdk-for-go/README.md
generated
vendored
@ -23,17 +23,30 @@ for Go uses [glide](https://github.com/Masterminds/glide).
|
||||
|
||||
# Versioning
|
||||
## SDK Versions
|
||||
The tags in this repository are based on, but do not conform to [SemVer.org's recommendations](http://semver.org/).
|
||||
The entire SDK will
|
||||
continue to be distributed as a single repository, and be labeled with version tags that are applicable to the whole
|
||||
repository. The tags in this repository are based on, but do not conform to [SemVer.org's recommendations](http://semver.org/).
|
||||
For now, the "-beta" tag is an indicator that we are still in preview and still are planning on releasing some breaking
|
||||
changes.
|
||||
|
||||
While in beta, we will only accept contributions to the `dev` or `master` branches. Once the `beta` tag is removed, we'll
|
||||
only contribute new features and Azure API surface space to the most recent major version of our SDK. However, pull requests
|
||||
to older major versions will be evaluated and accepted as appropriate. Any critical bugs will be fixed in old versions as well.
|
||||
To facilitate pull requests, a branch will be created for each of the major versions accepting support. For example,
|
||||
should we have tags denoting the versions, `v11.1.0`, `v11.2.0`, and `v12.0.0`, a branch `v11` would be present for submission
|
||||
of PRs.
|
||||
|
||||
## Azure Versions
|
||||
Azure services _mostly_ do not use SemVer based versions. Rather, they use profiles identified by dates. One will often
|
||||
see this casually referred to as an "APIVersion". At the moment, our SDK only supports the most recent profiles. In
|
||||
order to lock to an API version, one must also lock to an SDK version. However, as discussed in
|
||||
[#517](https://github.com/Azure/azure-sdk-for-go/issues/517), our objective is to reorganize and publish independent
|
||||
packages for each profile. In that way, we'll be able to have parallel support in a single SDK version for all
|
||||
APIVersions supported by Azure.
|
||||
Azure services _mostly_ do not use SemVer based versions. Rather, they stamp a set of REST endpoints with a date identifier. One will often
|
||||
see these stamps casually referred to as "API Versions". At the moment, our SDK only supports the most recent stamp for each service. In
|
||||
order to lock to an API version, one must also lock to an SDK version. However, as discussed in [#517](https://github.com/Azure/azure-sdk-for-go/issues/517),
|
||||
our objective is to reorganize and publish an independent package for each stamped API version of each service. In that way, we'll be able to support all API Versions
|
||||
in a single SDK Version.
|
||||
|
||||
Knowing which API Versions of services are compatbile with one another, and finding which API Versions are available in which environments
|
||||
has been a common source of frustration for users. Along with Azure Stack, these problems have led to the development of "Profiles" which are
|
||||
aggregations of multiple services at particular API Versions. Using profiles with our SDK will be optional, and to opt-in you will need to be
|
||||
running Go 1.9 or higher.
|
||||
|
||||
# Documentation
|
||||
|
||||
|
20
vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go
generated
vendored
20
vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go
generated
vendored
@ -66,24 +66,28 @@ const (
|
||||
type SkuName string
|
||||
|
||||
const (
|
||||
// Basic specifies the basic state for sku name.
|
||||
Basic SkuName = "Basic"
|
||||
// Classic specifies the classic state for sku name.
|
||||
Classic SkuName = "Classic"
|
||||
// ManagedBasic specifies the managed basic state for sku name.
|
||||
ManagedBasic SkuName = "Managed_Basic"
|
||||
// ManagedPremium specifies the managed premium state for sku name.
|
||||
ManagedPremium SkuName = "Managed_Premium"
|
||||
// ManagedStandard specifies the managed standard state for sku name.
|
||||
ManagedStandard SkuName = "Managed_Standard"
|
||||
// Premium specifies the premium state for sku name.
|
||||
Premium SkuName = "Premium"
|
||||
// Standard specifies the standard state for sku name.
|
||||
Standard SkuName = "Standard"
|
||||
)
|
||||
|
||||
// SkuTier enumerates the values for sku tier.
|
||||
type SkuTier string
|
||||
|
||||
const (
|
||||
// SkuTierBasic specifies the sku tier basic state for sku tier.
|
||||
SkuTierBasic SkuTier = "Basic"
|
||||
// SkuTierClassic specifies the sku tier classic state for sku tier.
|
||||
SkuTierClassic SkuTier = "Classic"
|
||||
// SkuTierManaged specifies the sku tier managed state for sku tier.
|
||||
SkuTierManaged SkuTier = "Managed"
|
||||
// SkuTierPremium specifies the sku tier premium state for sku tier.
|
||||
SkuTierPremium SkuTier = "Premium"
|
||||
// SkuTierStandard specifies the sku tier standard state for sku tier.
|
||||
SkuTierStandard SkuTier = "Standard"
|
||||
)
|
||||
|
||||
// WebhookAction enumerates the values for webhook action.
|
||||
|
4
vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go
generated
vendored
4
vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go
generated
vendored
@ -19,10 +19,10 @@ package containerregistry
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/v11.0.0-beta arm-containerregistry/2017-10-01"
|
||||
return "Azure-SDK-For-Go/v11.1.0-beta arm-containerregistry/2017-10-01"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string {
|
||||
return "v11.0.0-beta"
|
||||
return "v11.1.0-beta"
|
||||
}
|
||||
|
2
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Package trafficmanager implements the Azure ARM Trafficmanager service API version 2017-09-01-preview.
|
||||
// Package trafficmanager implements the Azure ARM Trafficmanager service API version .
|
||||
//
|
||||
//
|
||||
package trafficmanager
|
||||
|
8
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go
generated
vendored
8
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go
generated
vendored
@ -76,7 +76,7 @@ func (client EndpointsClient) CreateOrUpdatePreparer(resourceGroupName string, p
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -147,7 +147,7 @@ func (client EndpointsClient) DeletePreparer(resourceGroupName string, profileNa
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -216,7 +216,7 @@ func (client EndpointsClient) GetPreparer(resourceGroupName string, profileName
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -286,7 +286,7 @@ func (client EndpointsClient) UpdatePreparer(resourceGroupName string, profileNa
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
2
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/geographichierarchies.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/geographichierarchies.go
generated
vendored
@ -63,7 +63,7 @@ func (client GeographicHierarchiesClient) GetDefault() (result GeographicHierarc
|
||||
|
||||
// GetDefaultPreparer prepares the GetDefault request.
|
||||
func (client GeographicHierarchiesClient) GetDefaultPreparer() (*http.Request, error) {
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
14
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go
generated
vendored
14
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go
generated
vendored
@ -288,3 +288,17 @@ type TrafficFlow struct {
|
||||
Longitude *float64 `json:"longitude,omitempty"`
|
||||
QueryExperiences *[]QueryExperience `json:"queryExperiences,omitempty"`
|
||||
}
|
||||
|
||||
// UserMetricsKeyModel is class representing a Traffic Manager Real User Metrics key response.
|
||||
type UserMetricsKeyModel struct {
|
||||
autorest.Response `json:"-"`
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Type *string `json:"type,omitempty"`
|
||||
*UserMetricsKeyProperties `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
// UserMetricsKeyProperties is class representing the properties of a Real User Metrics key operation response.
|
||||
type UserMetricsKeyProperties struct {
|
||||
Key *string `json:"key,omitempty"`
|
||||
}
|
||||
|
14
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go
generated
vendored
14
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go
generated
vendored
@ -65,7 +65,7 @@ func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailability(para
|
||||
|
||||
// CheckTrafficManagerRelativeDNSNameAvailabilityPreparer prepares the CheckTrafficManagerRelativeDNSNameAvailability request.
|
||||
func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailabilityPreparer(parameters CheckTrafficManagerRelativeDNSNameAvailabilityParameters) (*http.Request, error) {
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -134,7 +134,7 @@ func (client ProfilesClient) CreateOrUpdatePreparer(resourceGroupName string, pr
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -202,7 +202,7 @@ func (client ProfilesClient) DeletePreparer(resourceGroupName string, profileNam
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -268,7 +268,7 @@ func (client ProfilesClient) GetPreparer(resourceGroupName string, profileName s
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -332,7 +332,7 @@ func (client ProfilesClient) ListByResourceGroupPreparer(resourceGroupName strin
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -393,7 +393,7 @@ func (client ProfilesClient) ListBySubscriptionPreparer() (*http.Request, error)
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
@ -460,7 +460,7 @@ func (client ProfilesClient) UpdatePreparer(resourceGroupName string, profileNam
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
const APIVersion = "2017-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
222
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/usermetricskeys.go
generated
vendored
Normal file
222
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/usermetricskeys.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
package trafficmanager
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// UserMetricsKeysClient is the client for the UserMetricsKeys methods of the Trafficmanager service.
|
||||
type UserMetricsKeysClient struct {
|
||||
ManagementClient
|
||||
}
|
||||
|
||||
// NewUserMetricsKeysClient creates an instance of the UserMetricsKeysClient client.
|
||||
func NewUserMetricsKeysClient(subscriptionID string) UserMetricsKeysClient {
|
||||
return NewUserMetricsKeysClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewUserMetricsKeysClientWithBaseURI creates an instance of the UserMetricsKeysClient client.
|
||||
func NewUserMetricsKeysClientWithBaseURI(baseURI string, subscriptionID string) UserMetricsKeysClient {
|
||||
return UserMetricsKeysClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// CreateOrUpdate create or update a subscription-level key used for Realtime User Metrics collection.
|
||||
func (client UserMetricsKeysClient) CreateOrUpdate() (result UserMetricsKeyModel, err error) {
|
||||
req, err := client.CreateOrUpdatePreparer()
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "CreateOrUpdate", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CreateOrUpdateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "CreateOrUpdate", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CreateOrUpdateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "CreateOrUpdate", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
|
||||
func (client UserMetricsKeysClient) CreateOrUpdatePreparer() (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client UserMetricsKeysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client, req)
|
||||
}
|
||||
|
||||
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client UserMetricsKeysClient) CreateOrUpdateResponder(resp *http.Response) (result UserMetricsKeyModel, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Delete delete a subscription-level key used for Realtime User Metrics collection.
|
||||
func (client UserMetricsKeysClient) Delete() (result DeleteOperationResult, err error) {
|
||||
req, err := client.DeletePreparer()
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "Delete", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.DeleteSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "Delete", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.DeleteResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "Delete", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeletePreparer prepares the Delete request.
|
||||
func (client UserMetricsKeysClient) DeletePreparer() (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsDelete(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// DeleteSender sends the Delete request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client UserMetricsKeysClient) DeleteSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client, req)
|
||||
}
|
||||
|
||||
// DeleteResponder handles the response to the Delete request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client UserMetricsKeysClient) DeleteResponder(resp *http.Response) (result DeleteOperationResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// GetDefault get the subscription-level key used for Realtime User Metrics collection.
|
||||
func (client UserMetricsKeysClient) GetDefault() (result UserMetricsKeyModel, err error) {
|
||||
req, err := client.GetDefaultPreparer()
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "GetDefault", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetDefaultSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "GetDefault", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetDefaultResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "trafficmanager.UserMetricsKeysClient", "GetDefault", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetDefaultPreparer prepares the GetDefault request.
|
||||
func (client UserMetricsKeysClient) GetDefaultPreparer() (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-09-01-preview"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficManagerUserMetricsKeys/default", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// GetDefaultSender sends the GetDefault request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client UserMetricsKeysClient) GetDefaultSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client, req)
|
||||
}
|
||||
|
||||
// GetDefaultResponder handles the response to the GetDefault request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client UserMetricsKeysClient) GetDefaultResponder(resp *http.Response) (result UserMetricsKeyModel, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
4
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go
generated
vendored
4
vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go
generated
vendored
@ -19,10 +19,10 @@ package trafficmanager
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/v11.0.0-beta arm-trafficmanager/2017-09-01-preview"
|
||||
return "Azure-SDK-For-Go/v11.1.0-beta arm-trafficmanager/"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string {
|
||||
return "v11.0.0-beta"
|
||||
return "v11.1.0-beta"
|
||||
}
|
||||
|
2
vendor/github.com/Azure/azure-sdk-for-go/management/version.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/management/version.go
generated
vendored
@ -17,5 +17,5 @@ package management
|
||||
// limitations under the License.
|
||||
|
||||
var (
|
||||
sdkVersion = "v11.0.0-beta"
|
||||
sdkVersion = "v11.1.0-beta"
|
||||
)
|
||||
|
2
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go
generated
vendored
@ -104,7 +104,7 @@ type BlobProperties struct {
|
||||
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
|
||||
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
|
||||
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
|
||||
BlobType BlobType `xml:"x-ms-blob-blob-type"`
|
||||
BlobType BlobType `xml:"BlobType"`
|
||||
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
|
||||
CopyID string `xml:"CopyId"`
|
||||
CopyStatus string `xml:"CopyStatus"`
|
||||
|
1
vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go
generated
vendored
1
vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go
generated
vendored
@ -522,6 +522,7 @@ func (b *Blob) putSingleBlockBlob(chunk []byte) error {
|
||||
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil)
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
b.Properties.BlobType = BlobTypeBlock
|
||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||
headers["Content-Length"] = strconv.Itoa(len(chunk))
|
||||
|
||||
|
13
vendor/github.com/Azure/azure-sdk-for-go/storage/container_test.go
generated
vendored
13
vendor/github.com/Azure/azure-sdk-for-go/storage/container_test.go
generated
vendored
@ -230,6 +230,7 @@ func (s *ContainerSuite) TestListBlobsPagination(c *chk.C) {
|
||||
c.Assert(err, chk.IsNil)
|
||||
|
||||
blobs := []string{}
|
||||
types := []BlobType{}
|
||||
const n = 5
|
||||
const pageSize = 2
|
||||
for i := 0; i < n; i++ {
|
||||
@ -237,10 +238,11 @@ func (s *ContainerSuite) TestListBlobsPagination(c *chk.C) {
|
||||
b := cnt.GetBlobReference(name)
|
||||
c.Assert(b.putSingleBlockBlob([]byte("Hello, world!")), chk.IsNil)
|
||||
blobs = append(blobs, name)
|
||||
types = append(types, b.Properties.BlobType)
|
||||
}
|
||||
sort.Strings(blobs)
|
||||
|
||||
listBlobsPagination(c, cnt, pageSize, blobs)
|
||||
listBlobsPagination(c, cnt, pageSize, blobs, types)
|
||||
|
||||
// Service SAS test
|
||||
sasuriOptions := ContainerSASOptions{}
|
||||
@ -260,7 +262,7 @@ func (s *ContainerSuite) TestListBlobsPagination(c *chk.C) {
|
||||
c.Assert(err, chk.IsNil)
|
||||
cntServiceSAS.Client().HTTPClient = cli.client.HTTPClient
|
||||
|
||||
listBlobsPagination(c, cntServiceSAS, pageSize, blobs)
|
||||
listBlobsPagination(c, cntServiceSAS, pageSize, blobs, types)
|
||||
|
||||
// Account SAS test
|
||||
token, err := cli.client.GetAccountSASToken(accountSASOptions)
|
||||
@ -270,12 +272,13 @@ func (s *ContainerSuite) TestListBlobsPagination(c *chk.C) {
|
||||
cntAccountSAS := SAScli.GetContainerReference(cnt.Name)
|
||||
cntAccountSAS.Client().HTTPClient = cli.client.HTTPClient
|
||||
|
||||
listBlobsPagination(c, cntAccountSAS, pageSize, blobs)
|
||||
listBlobsPagination(c, cntAccountSAS, pageSize, blobs, types)
|
||||
}
|
||||
|
||||
func listBlobsPagination(c *chk.C, cnt *Container, pageSize uint, blobs []string) {
|
||||
func listBlobsPagination(c *chk.C, cnt *Container, pageSize uint, blobs []string, types []BlobType) {
|
||||
// Paginate
|
||||
seen := []string{}
|
||||
seenTypes := []BlobType{}
|
||||
marker := ""
|
||||
for {
|
||||
resp, err := cnt.ListBlobs(ListBlobsParameters{
|
||||
@ -285,6 +288,7 @@ func listBlobsPagination(c *chk.C, cnt *Container, pageSize uint, blobs []string
|
||||
|
||||
for _, b := range resp.Blobs {
|
||||
seen = append(seen, b.Name)
|
||||
seenTypes = append(seenTypes, b.Properties.BlobType)
|
||||
c.Assert(b.Container, chk.Equals, cnt)
|
||||
}
|
||||
|
||||
@ -296,6 +300,7 @@ func listBlobsPagination(c *chk.C, cnt *Container, pageSize uint, blobs []string
|
||||
|
||||
// Compare
|
||||
c.Assert(seen, chk.DeepEquals, blobs)
|
||||
c.Assert(seenTypes, chk.DeepEquals, types)
|
||||
}
|
||||
|
||||
// listBlobsAsFiles is a helper function to list blobs as "folders" and "files".
|
||||
|
2
vendor/github.com/Azure/go-autorest/.travis.yml
generated
vendored
2
vendor/github.com/Azure/go-autorest/.travis.yml
generated
vendored
@ -3,6 +3,7 @@ sudo: false
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.9
|
||||
- 1.8
|
||||
- 1.7
|
||||
- 1.6
|
||||
@ -15,6 +16,7 @@ install:
|
||||
- glide install
|
||||
|
||||
script:
|
||||
- grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee /dev/stderr | test -z "$(< /dev/stdin)"
|
||||
- test -z "$(gofmt -s -l -w ./autorest/. | tee /dev/stderr)"
|
||||
- test -z "$(golint ./autorest/... | tee /dev/stderr)"
|
||||
- go vet ./autorest/...
|
||||
|
58
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
58
vendor/github.com/Azure/go-autorest/CHANGELOG.md
generated
vendored
@ -1,5 +1,63 @@
|
||||
# CHANGELOG
|
||||
|
||||
|
||||
## v9.1.1
|
||||
|
||||
- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`.
|
||||
|
||||
## v9.1.0
|
||||
|
||||
### New Features
|
||||
|
||||
- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error.
|
||||
- Support for loading Azure CLI Authentication files.
|
||||
- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- RetriableRequest can now tolerate a ReadSeekable body being read but not reset.
|
||||
- Adding missing Apache Headers
|
||||
|
||||
## v9.0.0
|
||||
|
||||
> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes.
|
||||
|
||||
Adding MSI Endpoint Support and CLI token rehydration.
|
||||
|
||||
## v8.3.1
|
||||
|
||||
Pick up bug fix in adal for MSI support.
|
||||
|
||||
## v8.3.0
|
||||
|
||||
Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience.
|
||||
|
||||
## v8.2.0
|
||||
|
||||
### New Features
|
||||
|
||||
- Add support for bearer authentication callbacks
|
||||
- Support 429 response codes that include "Retry-After" header
|
||||
- Support validation constraint "Pattern" for map keys
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Make RetriableRequest work with multiple versions of Go
|
||||
|
||||
## v8.1.1
|
||||
Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8.
|
||||
|
||||
## v8.1.0
|
||||
Adds RetriableRequest type for more efficient handling of retrying HTTP requests.
|
||||
|
||||
## v8.0.0
|
||||
|
||||
ADAL refactored into its own package.
|
||||
Support for UNIX time.
|
||||
|
||||
## v7.3.1
|
||||
- Version Testing now removed from production bits that are shipped with the library.
|
||||
|
||||
## v7.3.0
|
||||
- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations
|
||||
to acknowledge that they do not need either the entire or a trailing portion
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/cmd/adal.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/cmd/adal.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package main
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
This file is largely based on rjw57/oauth2device's code, with the follow differences:
|
||||
* scope -> resource, and only allow a single one
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/msi.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/msi.go
generated
vendored
@ -2,5 +2,19 @@
|
||||
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// msiPath is the path to the MSI Extension settings file (to discover the endpoint)
|
||||
var msiPath = "/var/lib/waagent/ManagedIdentity-Settings"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go
generated
vendored
@ -2,6 +2,20 @@
|
||||
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package adal
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package autorest
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/authorization_test.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/authorization_test.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package autorest
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/autorest.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/autorest.go
generated
vendored
@ -57,6 +57,20 @@ generated clients, see the Client described below.
|
||||
*/
|
||||
package autorest
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/autorest_test.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/autorest_test.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package autorest
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package azure
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package azure
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
143
vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile.go
generated
vendored
Normal file
143
vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode/utf16"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/dimchansky/utfbom"
|
||||
)
|
||||
|
||||
// ClientSetup includes authentication details and cloud specific
|
||||
// parameters for ARM clients
|
||||
type ClientSetup struct {
|
||||
*autorest.BearerAuthorizer
|
||||
File
|
||||
BaseURI string
|
||||
}
|
||||
|
||||
// File represents the authentication file
|
||||
type File struct {
|
||||
ClientID string `json:"clientId,omitempty"`
|
||||
ClientSecret string `json:"clientSecret,omitempty"`
|
||||
SubscriptionID string `json:"subscriptionId,omitempty"`
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"`
|
||||
ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"`
|
||||
GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"`
|
||||
SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"`
|
||||
GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"`
|
||||
ManagementEndpoint string `json:"managementEndpointUrl,omitempty"`
|
||||
}
|
||||
|
||||
// GetClientSetup provides an authorizer, base URI, subscriptionID and
|
||||
// tenantID parameters from an Azure CLI auth file
|
||||
func GetClientSetup(baseURI string) (auth ClientSetup, err error) {
|
||||
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
|
||||
if fileLocation == "" {
|
||||
return auth, errors.New("auth file not found. Environment variable AZURE_AUTH_LOCATION is not set")
|
||||
}
|
||||
|
||||
contents, err := ioutil.ReadFile(fileLocation)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Auth file might be encoded
|
||||
decoded, err := decode(contents)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal(decoded, &auth.File)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resource, err := getResourceForToken(auth.File, baseURI)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
auth.BaseURI = resource
|
||||
|
||||
config, err := adal.NewOAuthConfig(auth.ActiveDirectoryEndpoint, auth.TenantID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
spToken, err := adal.NewServicePrincipalToken(*config, auth.ClientID, auth.ClientSecret, resource)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
auth.BearerAuthorizer = autorest.NewBearerAuthorizer(spToken)
|
||||
return
|
||||
}
|
||||
|
||||
func decode(b []byte) ([]byte, error) {
|
||||
reader, enc := utfbom.Skip(bytes.NewReader(b))
|
||||
|
||||
switch enc {
|
||||
case utfbom.UTF16LittleEndian:
|
||||
u16 := make([]uint16, (len(b)/2)-1)
|
||||
err := binary.Read(reader, binary.LittleEndian, &u16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(string(utf16.Decode(u16))), nil
|
||||
case utfbom.UTF16BigEndian:
|
||||
u16 := make([]uint16, (len(b)/2)-1)
|
||||
err := binary.Read(reader, binary.BigEndian, &u16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(string(utf16.Decode(u16))), nil
|
||||
}
|
||||
return ioutil.ReadAll(reader)
|
||||
}
|
||||
|
||||
func getResourceForToken(f File, baseURI string) (string, error) {
|
||||
// Compare dafault base URI from the SDK to the endpoints from the public cloud
|
||||
// Base URI and token resource are the same string. This func finds the authentication
|
||||
// file field that matches the SDK base URI. The SDK defines the public cloud
|
||||
// endpoint as its default base URI
|
||||
if !strings.HasSuffix(baseURI, "/") {
|
||||
baseURI += "/"
|
||||
}
|
||||
switch baseURI {
|
||||
case azure.PublicCloud.ServiceManagementEndpoint:
|
||||
return f.ManagementEndpoint, nil
|
||||
case azure.PublicCloud.ResourceManagerEndpoint:
|
||||
return f.ResourceManagerEndpoint, nil
|
||||
case azure.PublicCloud.ActiveDirectoryEndpoint:
|
||||
return f.ActiveDirectoryEndpoint, nil
|
||||
case azure.PublicCloud.GalleryEndpoint:
|
||||
return f.GalleryEndpoint, nil
|
||||
case azure.PublicCloud.GraphEndpoint:
|
||||
return f.GraphResourceID, nil
|
||||
}
|
||||
return "", fmt.Errorf("auth: base URI not found in endpoints")
|
||||
}
|
111
vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile_test.go
generated
vendored
Normal file
111
vendor/github.com/Azure/go-autorest/autorest/azure/auth/authfile_test.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
expectedFile = File{
|
||||
ClientID: "client-id-123",
|
||||
ClientSecret: "client-secret-456",
|
||||
SubscriptionID: "sub-id-789",
|
||||
TenantID: "tenant-id-123",
|
||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.com",
|
||||
ResourceManagerEndpoint: "https://management.azure.com/",
|
||||
GraphResourceID: "https://graph.windows.net/",
|
||||
SQLManagementEndpoint: "https://management.core.windows.net:8443/",
|
||||
GalleryEndpoint: "https://gallery.azure.com/",
|
||||
ManagementEndpoint: "https://management.core.windows.net/",
|
||||
}
|
||||
)
|
||||
|
||||
func TestGetClientSetup(t *testing.T) {
|
||||
os.Setenv("AZURE_AUTH_LOCATION", filepath.Join(getCredsPath(), "credsutf16le.json"))
|
||||
setup, err := GetClientSetup("https://management.azure.com")
|
||||
if err != nil {
|
||||
t.Logf("GetClientSetup failed, got error %v", err)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if setup.BaseURI != "https://management.azure.com/" {
|
||||
t.Logf("auth.BaseURI not set correctly, expected 'https://management.azure.com/', got '%s'", setup.BaseURI)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedFile, setup.File) {
|
||||
t.Logf("auth.File not set correctly, expected %v, got %v", expectedFile, setup.File)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if setup.BearerAuthorizer == nil {
|
||||
t.Log("auth.Authorizer not set correctly, got nil")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeAndUnmarshal(t *testing.T) {
|
||||
tests := []string{
|
||||
"credsutf8.json",
|
||||
"credsutf16le.json",
|
||||
"credsutf16be.json",
|
||||
}
|
||||
creds := getCredsPath()
|
||||
for _, test := range tests {
|
||||
b, err := ioutil.ReadFile(filepath.Join(creds, test))
|
||||
if err != nil {
|
||||
t.Logf("error reading file '%s': %s", test, err)
|
||||
t.Fail()
|
||||
}
|
||||
decoded, err := decode(b)
|
||||
if err != nil {
|
||||
t.Logf("error decoding file '%s': %s", test, err)
|
||||
t.Fail()
|
||||
}
|
||||
var got File
|
||||
err = json.Unmarshal(decoded, &got)
|
||||
if err != nil {
|
||||
t.Logf("error unmarshaling file '%s': %s", test, err)
|
||||
t.Fail()
|
||||
}
|
||||
if !reflect.DeepEqual(expectedFile, got) {
|
||||
t.Logf("unmarshaled map expected %v, got %v", expectedFile, got)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getCredsPath() string {
|
||||
gopath := os.Getenv("GOPATH")
|
||||
return filepath.Join(gopath, "src", "github.com", "Azure", "go-autorest", "testdata")
|
||||
}
|
||||
|
||||
func areMapsEqual(a, b map[string]string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for k := range a {
|
||||
if a[k] != b[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
22
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
22
vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
generated
vendored
@ -5,6 +5,20 @@ See the included examples for more detail.
|
||||
*/
|
||||
package azure
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -165,7 +179,13 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
|
||||
if decodeErr != nil {
|
||||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
|
||||
} else if e.ServiceError == nil {
|
||||
e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"}
|
||||
// Check if error is unwrapped ServiceError
|
||||
if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" {
|
||||
e.ServiceError = &ServiceError{
|
||||
Code: "Unknown",
|
||||
Message: "Unknown service error",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
e.RequestID = ExtractRequestID(resp)
|
||||
|
82
vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go
generated
vendored
82
vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package azure
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -333,6 +347,74 @@ func TestWithErrorUnlessStatusCode_NoAzureError(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestWithErrorUnlessStatusCode_UnwrappedError(t *testing.T) {
|
||||
j := `{
|
||||
"target": null,
|
||||
"code": "InternalError",
|
||||
"message": "Azure is having trouble right now.",
|
||||
"details": [{"code": "conflict1", "message":"error message1"},
|
||||
{"code": "conflict2", "message":"error message2"}],
|
||||
"innererror": []
|
||||
}`
|
||||
uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6"
|
||||
r := mocks.NewResponseWithContent(j)
|
||||
mocks.SetResponseHeader(r, HeaderRequestID, uuid)
|
||||
r.Request = mocks.NewRequest()
|
||||
r.StatusCode = http.StatusInternalServerError
|
||||
r.Status = http.StatusText(r.StatusCode)
|
||||
|
||||
err := autorest.Respond(r,
|
||||
WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByClosing())
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("azure: returned nil error for proper error response")
|
||||
}
|
||||
|
||||
azErr, ok := err.(*RequestError)
|
||||
if !ok {
|
||||
t.Fatalf("returned error is not azure.RequestError: %T", err)
|
||||
}
|
||||
|
||||
if expected := http.StatusInternalServerError; azErr.StatusCode != expected {
|
||||
t.Logf("Incorrect StatusCode got: %v want: %d", azErr.StatusCode, expected)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if expected := "Azure is having trouble right now."; azErr.ServiceError.Message != expected {
|
||||
t.Logf("Incorrect Message\n\tgot: %q\n\twant: %q", azErr.Message, expected)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
if expected := uuid; azErr.RequestID != expected {
|
||||
t.Logf("Incorrect request ID\n\tgot: %q\n\twant: %q", azErr.RequestID, expected)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
expectedServiceErrorDetails := `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]`
|
||||
if azErr.ServiceError == nil {
|
||||
t.Logf("`ServiceError` was nil when it shouldn't have been.")
|
||||
t.Fail()
|
||||
} else if azErr.ServiceError.Details == nil {
|
||||
t.Logf("`ServiceError.Details` was nil when it should have been %q", expectedServiceErrorDetails)
|
||||
t.Fail()
|
||||
} else if details, _ := json.Marshal(*azErr.ServiceError.Details); expectedServiceErrorDetails != string(details) {
|
||||
t.Logf("Error detaisl was not unmarshaled properly.\n\tgot: %q\n\twant: %q", string(details), expectedServiceErrorDetails)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// the error body should still be there
|
||||
defer r.Body.Close()
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if string(b) != j {
|
||||
t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRequestErrorString_WithError(t *testing.T) {
|
||||
j := `{
|
||||
"error": {
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package cli
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package cli
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package azure
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go
generated
vendored
@ -1,6 +1,20 @@
|
||||
// test
|
||||
package azure
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
14
vendor/github.com/Azure/go-autorest/autorest/azure/example/main.go
generated
vendored
14
vendor/github.com/Azure/go-autorest/autorest/azure/example/main.go
generated
vendored
@ -1,5 +1,19 @@
|
||||
package main
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user