From a951e7b1261ac8f5aef5911287812c3ff78ff0cd Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 30 Mar 2018 11:41:12 +0200 Subject: [PATCH] Update vendored library cloud.google.com/go --- Gopkg.lock | 108 +- vendor/cloud.google.com/go/README.md | 106 +- .../cloud.google.com/go/bigquery/bigquery.go | 7 +- vendor/cloud.google.com/go/bigquery/copy.go | 15 +- .../cloud.google.com/go/bigquery/copy_test.go | 40 +- .../cloud.google.com/go/bigquery/dataset.go | 4 + .../apiv1/ListDataSources_smoke_test.go | 2 +- .../apiv1/data_transfer_client.go | 2 +- .../go/bigquery/datatransfer/apiv1/doc.go | 2 - .../bigquery/datatransfer/apiv1/mock_test.go | 48 +- .../go/bigquery/examples_test.go | 44 + .../cloud.google.com/go/bigquery/extract.go | 2 +- .../go/bigquery/integration_test.go | 270 ++- vendor/cloud.google.com/go/bigquery/job.go | 50 +- .../cloud.google.com/go/bigquery/job_test.go | 55 +- vendor/cloud.google.com/go/bigquery/load.go | 31 +- .../cloud.google.com/go/bigquery/load_test.go | 34 +- vendor/cloud.google.com/go/bigquery/nulls.go | 130 ++ .../go/bigquery/nulls_test.go | 52 +- vendor/cloud.google.com/go/bigquery/params.go | 9 +- .../go/bigquery/params_test.go | 6 +- vendor/cloud.google.com/go/bigquery/query.go | 20 +- .../go/bigquery/query_test.go | 12 +- .../go/bigquery/schema_test.go | 2 +- vendor/cloud.google.com/go/bigquery/table.go | 31 + .../go/bigquery/table_test.go | 6 +- vendor/cloud.google.com/go/bigquery/value.go | 12 + .../go/bigquery/value_test.go | 42 +- vendor/cloud.google.com/go/bigtable/admin.go | 55 +- .../go/bigtable/admin_test.go | 132 ++ .../cloud.google.com/go/bigtable/bigtable.go | 99 +- .../go/bigtable/bigtable_test.go | 107 +- .../go/bigtable/bttest/inmem.go | 15 +- .../go/bigtable/bttest/inmem_test.go | 161 +- .../go/bigtable/cmd/cbt/cbt.go | 55 +- .../go/bigtable/cmd/cbt/cbt_test.go | 54 + vendor/cloud.google.com/go/bigtable/filter.go | 49 +- vendor/cloud.google.com/go/bigtable/gc.go | 8 +- vendor/cloud.google.com/go/bigtable/go18.go | 68 + .../go/bigtable/internal/gax/invoke_test.go | 4 +- .../cloud.google.com/go/bigtable/not_go18.go | 36 + .../go/bigtable/retry_test.go | 21 +- vendor/cloud.google.com/go/cloud.go | 30 +- .../valuecollector/valuecollector_test.go | 24 +- .../go/datastore/datastore.go | 63 +- .../go/datastore/datastore.replay | Bin 3325777 -> 0 bytes .../go/datastore/datastore_test.go | 64 +- vendor/cloud.google.com/go/datastore/doc.go | 49 +- .../go/datastore/example_test.go | 22 + .../go/datastore/integration_test.go | 160 ++ vendor/cloud.google.com/go/datastore/load.go | 43 +- .../go/datastore/load_test.go | 129 ++ .../cloud.google.com/go/datastore/mutation.go | 129 ++ .../go/datastore/mutation_test.go | 150 ++ .../cloud.google.com/go/datastore/oc_test.go | 59 + vendor/cloud.google.com/go/datastore/query.go | 21 +- vendor/cloud.google.com/go/datastore/save.go | 43 +- .../go/datastore/save_test.go | 110 +- .../go/datastore/transaction.go | 130 +- .../go/datastore/transaction_test.go | 78 + .../go/dlp/apiv2/dlp_client.go | 810 +++++++ .../go/dlp/apiv2/dlp_client_example_test.go | 498 +++++ vendor/cloud.google.com/go/dlp/apiv2/doc.go | 45 + .../go/dlp/apiv2/mock_test.go | 1902 +++++++++++++++++ .../apiv1beta1/ReportErrorEvent_smoke_test.go | 4 +- vendor/cloud.google.com/go/firestore/Makefile | 13 + .../cloud.google.com/go/firestore/client.go | 29 +- .../go/firestore/client_test.go | 52 +- .../go/firestore/cross_language_test.go | 136 +- vendor/cloud.google.com/go/firestore/doc.go | 3 + .../cloud.google.com/go/firestore/docref.go | 10 +- .../go/firestore/docref_test.go | 26 +- .../cloud.google.com/go/firestore/document.go | 14 +- .../go/firestore/document_test.go | 3 +- .../go/firestore/from_value.go | 8 + .../go/firestore/from_value_test.go | 8 +- .../go/firestore/genproto/README.md | 3 - .../go/firestore/genproto/test.pb.go | 673 +++++- .../go/firestore/integration_test.go | 73 + .../go/firestore/internal/doc.template | 3 + .../go/firestore/mock_test.go | 33 +- vendor/cloud.google.com/go/firestore/order.go | 216 ++ .../go/firestore/order_test.go | 118 + vendor/cloud.google.com/go/firestore/query.go | 251 ++- .../go/firestore/query_test.go | 410 +++- .../go/firestore/testdata/Makefile | 11 - .../go/firestore/testdata/VERSION | 2 +- .../query-cursor-docsnap-order.textproto | 68 + ...uery-cursor-docsnap-orderby-name.textproto | 76 + .../query-cursor-docsnap-where-eq.textproto | 53 + ...cursor-docsnap-where-neq-orderby.textproto | 72 + .../query-cursor-docsnap-where-neq.textproto | 64 + .../testdata/query-cursor-docsnap.textproto | 34 + .../testdata/query-cursor-no-order.textproto | 16 + .../testdata/query-cursor-vals-1a.textproto | 50 + .../testdata/query-cursor-vals-1b.textproto | 48 + .../testdata/query-cursor-vals-2.textproto | 71 + .../query-cursor-vals-docid.textproto | 50 + .../query-cursor-vals-last-wins.textproto | 60 + .../testdata/query-del-cursor.textproto | 23 + .../testdata/query-del-where.textproto | 19 + .../testdata/query-invalid-operator.textproto | 19 + .../query-invalid-path-order.textproto | 19 + .../query-invalid-path-select.textproto | 18 + .../query-invalid-path-where.textproto | 20 + .../query-offset-limit-last-wins.textproto | 30 + .../testdata/query-offset-limit.textproto | 24 + .../firestore/testdata/query-order.textproto | 42 + .../testdata/query-select-empty.textproto | 23 + .../testdata/query-select-last-wins.textproto | 36 + .../firestore/testdata/query-select.textproto | 32 + .../testdata/query-st-cursor.textproto | 23 + .../testdata/query-st-where.textproto | 19 + .../testdata/query-where-2.textproto | 59 + .../firestore/testdata/query-where.textproto | 34 + .../testdata/query-wrong-collection.textproto | 19 + .../cloud.google.com/go/firestore/to_value.go | 18 +- .../go/firestore/to_value_test.go | 82 +- .../go/firestore/transaction.go | 17 +- .../go/firestore/transaction_test.go | 41 +- .../go/firestore/util_test.go | 6 +- vendor/cloud.google.com/go/firestore/watch.go | 115 + .../go/firestore/watch_test.go | 66 + vendor/cloud.google.com/go/iam/iam.go | 48 +- .../go/internal/btree/README.md | 11 + .../go/internal/btree/benchmarks_test.go | 268 +++ .../go/internal/btree/btree.go | 991 +++++++++ .../go/internal/btree/btree_test.go | 396 ++++ .../go/internal/btree/debug.go | 37 + .../go/internal/testutil/server.go | 5 +- .../go/internal/trace/go18.go | 83 + .../go/internal/trace/go18_test.go | 55 + .../go/internal/trace/not_go18.go | 30 + .../go/internal/version/version.go | 2 +- vendor/cloud.google.com/go/issue_template.md | 17 + vendor/cloud.google.com/go/logging/doc.go | 31 +- .../go/logging/examples_test.go | 30 + vendor/cloud.google.com/go/logging/logging.go | 57 +- .../go/logging/logging_test.go | 102 +- .../go/logging/logging_unexported_test.go | 48 + .../go/longrunning/longrunning.go | 6 +- .../monitoring/apiv3/alert_policy_client.go | 274 +++ .../apiv3/alert_policy_client_example_test.go | 128 ++ .../go/monitoring/apiv3/mock_test.go | 955 ++++++++- .../apiv3/notification_channel_client.go | 369 ++++ ...otification_channel_client_example_test.go | 170 ++ vendor/cloud.google.com/go/old-news.md | 39 + .../go/privacy/dlp/apiv2beta2/dlp_client.go | 681 ++++++ .../dlp/apiv2beta2/dlp_client_example_test.go | 422 ++++ .../go/privacy/dlp/apiv2beta2/doc.go | 48 + .../go/privacy/dlp/apiv2beta2/mock_test.go | 1596 ++++++++++++++ .../go/profiler/busybench/busybench.go | 7 +- .../go/profiler/integration-test.sh | 13 - .../go/profiler/integration_test.go | 670 +----- .../cloud.google.com/go/profiler/profiler.go | 27 +- .../go/profiler/profiler_test.go | 92 +- .../go/profiler/proftest/proftest.go | 503 +++++ .../pubsub/apiv1/pubsub_pull_example_test.go | 106 + vendor/cloud.google.com/go/pubsub/doc.go | 6 + .../go/pubsub/endtoend_test.go | 60 +- .../cloud.google.com/go/pubsub/fake_test.go | 22 +- vendor/cloud.google.com/go/pubsub/go18.go | 168 ++ vendor/cloud.google.com/go/pubsub/iterator.go | 8 +- vendor/cloud.google.com/go/pubsub/message.go | 7 +- vendor/cloud.google.com/go/pubsub/not_go18.go | 54 + .../cloud.google.com/go/pubsub/pstest/fake.go | 66 +- .../go/pubsub/pstest/fake_test.go | 98 +- .../cloud.google.com/go/pubsub/pstest_test.go | 76 + vendor/cloud.google.com/go/pubsub/pubsub.go | 9 +- .../cloud.google.com/go/pubsub/pullstream.go | 20 + vendor/cloud.google.com/go/pubsub/service.go | 135 -- .../go/pubsub/streaming_pull_test.go | 9 +- .../go/pubsub/subscription.go | 33 +- .../cloud.google.com/go/pubsub/topic_test.go | 3 +- vendor/cloud.google.com/go/regen-gapic.sh | 5 +- .../go/rpcreplay/fake_test.go | 3 +- .../go/spanner/apiv1/mock_test.go | 148 ++ .../go/spanner/apiv1/spanner_client.go | 50 + .../apiv1/spanner_client_example_test.go | 36 + vendor/cloud.google.com/go/spanner/batch.go | 345 +++ .../cloud.google.com/go/spanner/batch_test.go | 73 + vendor/cloud.google.com/go/spanner/client.go | 112 +- vendor/cloud.google.com/go/spanner/doc.go | 2 +- .../go/spanner/errors_test.go | 4 +- .../go/spanner/examples_test.go | 55 + vendor/cloud.google.com/go/spanner/go18.go | 25 +- .../spanner/internal/testutil/mockclient.go | 19 +- .../spanner/internal/testutil/mockserver.go | 72 +- vendor/cloud.google.com/go/spanner/key.go | 10 +- .../cloud.google.com/go/spanner/mutation.go | 22 +- .../cloud.google.com/go/spanner/not_go18.go | 7 +- vendor/cloud.google.com/go/spanner/oc_test.go | 72 + .../cloud.google.com/go/spanner/read_test.go | 17 +- .../cloud.google.com/go/spanner/retry_test.go | 12 +- vendor/cloud.google.com/go/spanner/row.go | 12 +- .../go/spanner/session_test.go | 10 +- .../go/spanner/spanner_test.go | 257 ++- .../cloud.google.com/go/spanner/statement.go | 23 +- .../go/spanner/timestampbound.go | 39 +- .../go/spanner/transaction.go | 2 +- .../cloud.google.com/go/spanner/util_test.go | 3 +- vendor/cloud.google.com/go/storage/acl.go | 16 +- vendor/cloud.google.com/go/storage/bucket.go | 201 +- .../go/storage/bucket_test.go | 70 +- vendor/cloud.google.com/go/storage/copy.go | 12 +- vendor/cloud.google.com/go/storage/doc.go | 9 +- .../go/storage/example_test.go | 19 + vendor/cloud.google.com/go/storage/iam.go | 18 +- .../go/storage/integration_test.go | 404 +++- .../go/storage/notifications.go | 17 +- .../go/storage/notifications_test.go | 3 +- vendor/cloud.google.com/go/storage/oc_test.go | 54 + vendor/cloud.google.com/go/storage/reader.go | 151 ++ .../go/storage/reader_test.go | 112 + vendor/cloud.google.com/go/storage/storage.go | 152 +- vendor/cloud.google.com/go/storage/writer.go | 29 +- .../go/storage/writer_test.go | 28 + vendor/cloud.google.com/go/trace/trace.go | 6 +- .../go/vision/apiv1/client.go | 6 +- .../go/vision/apiv1/client_test.go | 20 +- .../go/vision/apiv1p1beta1/doc.go | 3 +- 221 files changed, 19911 insertions(+), 2075 deletions(-) create mode 100644 vendor/cloud.google.com/go/bigtable/go18.go create mode 100644 vendor/cloud.google.com/go/bigtable/not_go18.go delete mode 100644 vendor/cloud.google.com/go/datastore/datastore.replay create mode 100644 vendor/cloud.google.com/go/datastore/mutation.go create mode 100644 vendor/cloud.google.com/go/datastore/mutation_test.go create mode 100644 vendor/cloud.google.com/go/datastore/oc_test.go create mode 100644 vendor/cloud.google.com/go/datastore/transaction_test.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/dlp/apiv2/mock_test.go create mode 100644 vendor/cloud.google.com/go/firestore/Makefile delete mode 100644 vendor/cloud.google.com/go/firestore/genproto/README.md create mode 100644 vendor/cloud.google.com/go/firestore/order.go create mode 100644 vendor/cloud.google.com/go/firestore/order_test.go delete mode 100644 vendor/cloud.google.com/go/firestore/testdata/Makefile create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-del-where.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-invalid-operator.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-order.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-select.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-where.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-offset-limit-last-wins.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-offset-limit.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-order.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-select-empty.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-select-last-wins.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-select.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-st-cursor.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-st-where.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-where-2.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-where.textproto create mode 100644 vendor/cloud.google.com/go/firestore/testdata/query-wrong-collection.textproto create mode 100644 vendor/cloud.google.com/go/firestore/watch.go create mode 100644 vendor/cloud.google.com/go/firestore/watch_test.go create mode 100644 vendor/cloud.google.com/go/internal/btree/README.md create mode 100644 vendor/cloud.google.com/go/internal/btree/benchmarks_test.go create mode 100644 vendor/cloud.google.com/go/internal/btree/btree.go create mode 100644 vendor/cloud.google.com/go/internal/btree/btree_test.go create mode 100644 vendor/cloud.google.com/go/internal/btree/debug.go create mode 100644 vendor/cloud.google.com/go/internal/trace/go18.go create mode 100644 vendor/cloud.google.com/go/internal/trace/go18_test.go create mode 100644 vendor/cloud.google.com/go/internal/trace/not_go18.go create mode 100644 vendor/cloud.google.com/go/issue_template.md create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client_example_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client_example_test.go create mode 100644 vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client.go create mode 100644 vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client_example_test.go create mode 100644 vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/doc.go create mode 100644 vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/mock_test.go create mode 100644 vendor/cloud.google.com/go/profiler/proftest/proftest.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/pubsub_pull_example_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/go18.go create mode 100644 vendor/cloud.google.com/go/pubsub/not_go18.go create mode 100644 vendor/cloud.google.com/go/pubsub/pstest_test.go create mode 100644 vendor/cloud.google.com/go/spanner/batch.go create mode 100644 vendor/cloud.google.com/go/spanner/batch_test.go create mode 100644 vendor/cloud.google.com/go/spanner/oc_test.go create mode 100644 vendor/cloud.google.com/go/storage/oc_test.go create mode 100644 vendor/cloud.google.com/go/storage/reader_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 92f61a847..df748f198 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -4,18 +4,14 @@ [[projects]] branch = "master" name = "bazil.org/fuse" - packages = [ - ".", - "fs", - "fuseutil" - ] + packages = [".","fs","fuseutil"] revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748" [[projects]] name = "cloud.google.com/go" packages = ["compute/metadata"] - revision = "767c40d6a2e058483c25fa193e963a22da17236d" - version = "v0.18.0" + revision = "4b98a6370e36d7a85192e7bad08a4ebd82eac2a8" + version = "v0.20.0" [[projects]] name = "github.com/Azure/azure-sdk-for-go" @@ -25,12 +21,7 @@ [[projects]] name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/date" - ] + packages = ["autorest","autorest/adal","autorest/azure","autorest/date"] revision = "c2a68353555b68de3ee8455a4fd3e890a0ac6d99" version = "v9.8.1" @@ -96,12 +87,7 @@ [[projects]] name = "github.com/kurin/blazer" - packages = [ - "b2", - "base", - "internal/b2types", - "internal/blog" - ] + packages = ["b2","base","internal/b2types","internal/blog"] revision = "cd0304efa98725679cf68422cefa328d3d96f2f4" version = "v0.3.0" @@ -112,15 +98,7 @@ [[projects]] name = "github.com/minio/minio-go" - packages = [ - ".", - "pkg/credentials", - "pkg/encrypt", - "pkg/policy", - "pkg/s3signer", - "pkg/s3utils", - "pkg/set" - ] + packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"] revision = "14f1d472d115bac5ca4804094aa87484a72ced61" version = "4.0.6" @@ -186,10 +164,7 @@ [[projects]] name = "github.com/spf13/cobra" - packages = [ - ".", - "doc" - ] + packages = [".","doc"] revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" version = "v0.0.1" @@ -202,40 +177,19 @@ [[projects]] branch = "master" name = "golang.org/x/crypto" - packages = [ - "curve25519", - "ed25519", - "ed25519/internal/edwards25519", - "internal/chacha20", - "pbkdf2", - "poly1305", - "scrypt", - "ssh", - "ssh/terminal" - ] + packages = ["curve25519","ed25519","ed25519/internal/edwards25519","internal/chacha20","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"] revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b" [[projects]] branch = "master" name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "idna", - "lex/httplex" - ] + packages = ["context","context/ctxhttp","idna","lex/httplex"] revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec" [[projects]] branch = "master" name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt" - ] + packages = [".","google","internal","jws","jwt"] revision = "b28fcf2b08a19742b43084fb40ab78ac6c3d8067" [[projects]] @@ -247,58 +201,24 @@ [[projects]] branch = "master" name = "golang.org/x/sys" - packages = [ - "unix", - "windows" - ] + packages = ["unix","windows"] revision = "af50095a40f9041b3b38960738837185c26e9419" [[projects]] branch = "master" name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable" - ] + packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"] revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3" [[projects]] branch = "master" name = "google.golang.org/api" - packages = [ - "gensupport", - "googleapi", - "googleapi/internal/uritemplates", - "storage/v1" - ] + packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"] revision = "65b0d8655182691ad23b4fac11e6f7b897d9b634" [[projects]] name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch" - ] + packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"] revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" version = "v1.0.0" diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index ada5f57a4..17d9d3e74 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -33,6 +33,69 @@ make backwards-incompatible changes. ## News +_March 22, 2018_ + +*v0.20.* + +- bigquery: Support SchemaUpdateOptions for load jobs. + +- bigtable: + - Add SampleRowKeys. + - cbt: Support union, intersection GCPolicy. + - Retry admin RPCS. + - Add trace spans to retries. + +- datastore: Add OpenCensus tracing. + +- firestore: + - Fix queries involving Null and NaN. + - Allow Timestamp protobuffers for time values. + +- logging: Add a WriteTimeout option. + +- spanner: Support Batch API. + +- storage: Add OpenCensus tracing. + + +_February 26, 2018_ + +*v0.19.0* + +- bigquery: + - Support customer-managed encryption keys. + +- bigtable: + - Improved emulator support. + - Support GetCluster. + +- datastore: + - Add general mutations. + - Support pointer struct fields. + - Support transaction options. + +- firestore: + - Add Transaction.GetAll. + - Support document cursors. + +- logging: + - Support concurrent RPCs to the service. + - Support per-entry resources. + +- profiler: + - Add config options to disable heap and thread profiling. + - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. + +- pubsub: + - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the + callback returns). + - Add SubscriptionInProject. + - Add OpenCensus instrumentation for streaming pull. + +- storage: + - Support CORS. + + _January 18, 2018_ *v0.18.0* @@ -97,45 +160,6 @@ _December 11, 2017_ - TimePartitioning supports "Field". -_October 30, 2017_ - -*v0.16.0* - -- Other bigquery changes: - - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). - - UseStandardSQL is deprecated; set UseLegacySQL to true if you need - Legacy SQL. - - Uploader.Put will generate a random insert ID if you do not provide one. - - Support time partitioning for load jobs. - - Support dry-run queries. - - A `Job` remembers its last retrieved status. - - Support retrieving job configuration. - - Support labels for jobs and tables. - - Support dataset access lists. - - Improve support for external data sources, including data from Bigtable and - Google Sheets, and tables with external data. - - Support updating a table's view configuration. - - Fix uploading civil times with nanoseconds. - -- storage: - - Support PubSub notifications. - - Support Requester Pays buckets. - -- profiler: Support goroutine and mutex profile types. - - -_October 3, 2017_ - -*v0.15.0* - -- firestore: beta release. See the - [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). - -- errorreporting: The existing package has been redesigned. - -- errors: This package has been removed. Use errorreporting. - - [Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) ## Supported APIs @@ -143,7 +167,7 @@ _October 3, 2017_ Google API | Status | Package ---------------------------------|--------------|----------------------------------------------------------- [BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] -[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] [Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref] [Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref] [Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] @@ -153,7 +177,7 @@ Google API | Status | Package [Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] [Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] [Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] -[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref] +[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref] [Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] [Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref] [Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] diff --git a/vendor/cloud.google.com/go/bigquery/bigquery.go b/vendor/cloud.google.com/go/bigquery/bigquery.go index 324a379d5..6427a5ccf 100644 --- a/vendor/cloud.google.com/go/bigquery/bigquery.go +++ b/vendor/cloud.google.com/go/bigquery/bigquery.go @@ -47,6 +47,11 @@ func setClientHeader(headers http.Header) { // Client may be used to perform BigQuery operations. type Client struct { + // Location, if set, will be used as the default location for all subsequent + // dataset creation and job operations. A location specified directly in one of + // those operations will override this value. + Location string + projectID string bqs *bq.Service } @@ -152,5 +157,5 @@ func retryableError(err error) bool { if len(e.Errors) > 0 { reason = e.Errors[0].Reason } - return reason == "backendError" || reason == "rateLimitExceeded" + return e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded" } diff --git a/vendor/cloud.google.com/go/bigquery/copy.go b/vendor/cloud.google.com/go/bigquery/copy.go index 33feabb30..4f11ef503 100644 --- a/vendor/cloud.google.com/go/bigquery/copy.go +++ b/vendor/cloud.google.com/go/bigquery/copy.go @@ -37,6 +37,9 @@ type CopyConfig struct { // The labels associated with this job. Labels map[string]string + + // Custom encryption configuration (e.g., Cloud KMS keys). + DestinationEncryptionConfig *EncryptionConfig } func (c *CopyConfig) toBQ() *bq.JobConfiguration { @@ -47,10 +50,11 @@ func (c *CopyConfig) toBQ() *bq.JobConfiguration { return &bq.JobConfiguration{ Labels: c.Labels, Copy: &bq.JobConfigurationTableCopy{ - CreateDisposition: string(c.CreateDisposition), - WriteDisposition: string(c.WriteDisposition), - DestinationTable: c.Dst.toBQ(), - SourceTables: ts, + CreateDisposition: string(c.CreateDisposition), + WriteDisposition: string(c.WriteDisposition), + DestinationTable: c.Dst.toBQ(), + DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(), + SourceTables: ts, }, } } @@ -61,6 +65,7 @@ func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig { CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition), WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition), Dst: bqToTable(q.Copy.DestinationTable, c), + DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration), } for _, t := range q.Copy.SourceTables { cc.Srcs = append(cc.Srcs, bqToTable(t, c)) @@ -95,7 +100,7 @@ func (c *Copier) Run(ctx context.Context) (*Job, error) { func (c *Copier) newJob() *bq.Job { return &bq.Job{ - JobReference: c.JobIDConfig.createJobRef(c.c.projectID), + JobReference: c.JobIDConfig.createJobRef(c.c), Configuration: c.CopyConfig.toBQ(), } } diff --git a/vendor/cloud.google.com/go/bigquery/copy_test.go b/vendor/cloud.google.com/go/bigquery/copy_test.go index 3f29d18f3..4b2327be2 100644 --- a/vendor/cloud.google.com/go/bigquery/copy_test.go +++ b/vendor/cloud.google.com/go/bigquery/copy_test.go @@ -49,11 +49,12 @@ func defaultCopyJob() *bq.Job { func TestCopy(t *testing.T) { defer fixRandomID("RANDOM")() testCases := []struct { - dst *Table - srcs []*Table - jobID string - config CopyConfig - want *bq.Job + dst *Table + srcs []*Table + jobID string + location string + config CopyConfig + want *bq.Job }{ { dst: &Table{ @@ -84,15 +85,17 @@ func TestCopy(t *testing.T) { }, }, config: CopyConfig{ - CreateDisposition: CreateNever, - WriteDisposition: WriteTruncate, - Labels: map[string]string{"a": "b"}, + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, + Labels: map[string]string{"a": "b"}, }, want: func() *bq.Job { j := defaultCopyJob() j.Configuration.Labels = map[string]string{"a": "b"} j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"} return j }(), }, @@ -116,12 +119,33 @@ func TestCopy(t *testing.T) { return j }(), }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + location: "asia-northeast1", + want: func() *bq.Job { + j := defaultCopyJob() + j.JobReference.Location = "asia-northeast1" + return j + }(), + }, } c := &Client{projectID: "client-project-id"} for i, tc := range testCases { tc.dst.c = c copier := tc.dst.CopierFrom(tc.srcs...) copier.JobID = tc.jobID + copier.Location = tc.location tc.config.Srcs = tc.srcs tc.config.Dst = tc.dst copier.CopyConfig = tc.config diff --git a/vendor/cloud.google.com/go/bigquery/dataset.go b/vendor/cloud.google.com/go/bigquery/dataset.go index b712dd699..e3f2a25d9 100644 --- a/vendor/cloud.google.com/go/bigquery/dataset.go +++ b/vendor/cloud.google.com/go/bigquery/dataset.go @@ -91,6 +91,10 @@ func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error { return err } ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID} + // Use Client.Location as a default. + if ds.Location == "" { + ds.Location = d.c.Location + } call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx) setClientHeader(call.Header()) _, err = call.Do() diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go index ba7a8a669..86fdde5e5 100644 --- a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go @@ -55,7 +55,7 @@ func TestDataTransferServiceSmoke(t *testing.T) { t.Fatal(err) } - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", projectId, "us-central1") + var formattedParent string = fmt.Sprintf("projects/%s", projectId) var request = &datatransferpb.ListDataSourcesRequest{ Parent: formattedParent, } diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go index 2e45314c5..5ad6dd4da 100644 --- a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go @@ -294,7 +294,7 @@ func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.Li return it } -// ScheduleTransferRuns creates transfer runs for a time range [range_start_time, range_end_time]. +// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time]. // For each date - or whatever granularity the data source supports - in the // range, one transfer run is created. // Note that runs are created per UTC time in the time range. diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go index 3e7eab4ee..856e8f936 100644 --- a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go @@ -42,8 +42,6 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ - "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", } } diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go index 4b04ed64e..5278318eb 100644 --- a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go @@ -281,7 +281,7 @@ func TestDataTransferServiceGetDataSource(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]") + var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]") var request = &datatransferpb.GetDataSourceRequest{ Name: formattedName, } @@ -310,7 +310,7 @@ func TestDataTransferServiceGetDataSourceError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]") + var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]") var request = &datatransferpb.GetDataSourceRequest{ Name: formattedName, } @@ -343,7 +343,7 @@ func TestDataTransferServiceListDataSources(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]") + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") var request = &datatransferpb.ListDataSourcesRequest{ Parent: formattedParent, } @@ -382,7 +382,7 @@ func TestDataTransferServiceListDataSourcesError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]") + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") var request = &datatransferpb.ListDataSourcesRequest{ Parent: formattedParent, } @@ -428,7 +428,7 @@ func TestDataTransferServiceCreateTransferConfig(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]") + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} var request = &datatransferpb.CreateTransferConfigRequest{ Parent: formattedParent, @@ -459,7 +459,7 @@ func TestDataTransferServiceCreateTransferConfigError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]") + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} var request = &datatransferpb.CreateTransferConfigRequest{ Parent: formattedParent, @@ -567,7 +567,7 @@ func TestDataTransferServiceDeleteTransferConfig(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") var request = &datatransferpb.DeleteTransferConfigRequest{ Name: formattedName, } @@ -593,7 +593,7 @@ func TestDataTransferServiceDeleteTransferConfigError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") var request = &datatransferpb.DeleteTransferConfigRequest{ Name: formattedName, } @@ -638,7 +638,7 @@ func TestDataTransferServiceGetTransferConfig(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") var request = &datatransferpb.GetTransferConfigRequest{ Name: formattedName, } @@ -667,7 +667,7 @@ func TestDataTransferServiceGetTransferConfigError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") var request = &datatransferpb.GetTransferConfigRequest{ Name: formattedName, } @@ -700,7 +700,7 @@ func TestDataTransferServiceListTransferConfigs(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]") + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") var request = &datatransferpb.ListTransferConfigsRequest{ Parent: formattedParent, } @@ -739,7 +739,7 @@ func TestDataTransferServiceListTransferConfigsError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]") + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") var request = &datatransferpb.ListTransferConfigsRequest{ Parent: formattedParent, } @@ -766,7 +766,7 @@ func TestDataTransferServiceScheduleTransferRuns(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} var request = &datatransferpb.ScheduleTransferRunsRequest{ @@ -799,7 +799,7 @@ func TestDataTransferServiceScheduleTransferRunsError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} var request = &datatransferpb.ScheduleTransferRunsRequest{ @@ -841,7 +841,7 @@ func TestDataTransferServiceGetTransferRun(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") var request = &datatransferpb.GetTransferRunRequest{ Name: formattedName, } @@ -870,7 +870,7 @@ func TestDataTransferServiceGetTransferRunError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") var request = &datatransferpb.GetTransferRunRequest{ Name: formattedName, } @@ -897,7 +897,7 @@ func TestDataTransferServiceDeleteTransferRun(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") var request = &datatransferpb.DeleteTransferRunRequest{ Name: formattedName, } @@ -923,7 +923,7 @@ func TestDataTransferServiceDeleteTransferRunError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") var request = &datatransferpb.DeleteTransferRunRequest{ Name: formattedName, } @@ -955,7 +955,7 @@ func TestDataTransferServiceListTransferRuns(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") var request = &datatransferpb.ListTransferRunsRequest{ Parent: formattedParent, } @@ -994,7 +994,7 @@ func TestDataTransferServiceListTransferRunsError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]") var request = &datatransferpb.ListTransferRunsRequest{ Parent: formattedParent, } @@ -1027,7 +1027,7 @@ func TestDataTransferServiceListTransferLogs(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") var request = &datatransferpb.ListTransferLogsRequest{ Parent: formattedParent, } @@ -1066,7 +1066,7 @@ func TestDataTransferServiceListTransferLogsError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]") var request = &datatransferpb.ListTransferLogsRequest{ Parent: formattedParent, } @@ -1096,7 +1096,7 @@ func TestDataTransferServiceCheckValidCreds(t *testing.T) { mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]") + var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]") var request = &datatransferpb.CheckValidCredsRequest{ Name: formattedName, } @@ -1125,7 +1125,7 @@ func TestDataTransferServiceCheckValidCredsError(t *testing.T) { errCode := codes.PermissionDenied mockDataTransfer.err = gstatus.Error(errCode, "test error") - var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]") + var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]") var request = &datatransferpb.CheckValidCredsRequest{ Name: formattedName, } diff --git a/vendor/cloud.google.com/go/bigquery/examples_test.go b/vendor/cloud.google.com/go/bigquery/examples_test.go index 22233fd90..2750b8d8f 100644 --- a/vendor/cloud.google.com/go/bigquery/examples_test.go +++ b/vendor/cloud.google.com/go/bigquery/examples_test.go @@ -131,6 +131,23 @@ func ExampleClient_Query_parameters() { // TODO: Call Query.Run or Query.Read. } +// This example demonstrates how to run a query job on a table +// with a customer-managed encryption key. The same +// applies to load and copy jobs as well. +func ExampleClient_Query_encryptionKey() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + // TODO: Replace this key with a key you have created in Cloud KMS. + keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K" + q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName} + // TODO: set other options on the Query. + // TODO: Call Query.Run or Query.Read. +} + func ExampleQuery_Read() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") @@ -453,6 +470,33 @@ func ExampleTable_Create_initialize() { } } +// This example demonstrates how to create a table with +// a customer-managed encryption key. +func ExampleTable_Create_encryptionKey() { + ctx := context.Background() + // Infer table schema from a Go type. + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + // TODO: Handle error. + } + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("new-table") + + // TODO: Replace this key with a key you have created in Cloud KMS. + keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K" + if err := t.Create(ctx, + &bigquery.TableMetadata{ + Name: "My New Table", + Schema: schema, + EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName}, + }); err != nil { + // TODO: Handle error. + } +} + func ExampleTable_Delete() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") diff --git a/vendor/cloud.google.com/go/bigquery/extract.go b/vendor/cloud.google.com/go/bigquery/extract.go index e09736e8c..910008ba7 100644 --- a/vendor/cloud.google.com/go/bigquery/extract.go +++ b/vendor/cloud.google.com/go/bigquery/extract.go @@ -99,7 +99,7 @@ func (e *Extractor) Run(ctx context.Context) (*Job, error) { func (e *Extractor) newJob() *bq.Job { return &bq.Job{ - JobReference: e.JobIDConfig.createJobRef(e.c.projectID), + JobReference: e.JobIDConfig.createJobRef(e.c), Configuration: e.ExtractConfig.toBQ(), } } diff --git a/vendor/cloud.google.com/go/bigquery/integration_test.go b/vendor/cloud.google.com/go/bigquery/integration_test.go index 74dcf3b9a..622998eb0 100644 --- a/vendor/cloud.google.com/go/bigquery/integration_test.go +++ b/vendor/cloud.google.com/go/bigquery/integration_test.go @@ -59,6 +59,9 @@ var ( tableIDs = testutil.NewUIDSpaceSep("table", '_') ) +// Note: integration tests cannot be run in parallel, because TestIntegration_Location +// modifies the client. + func TestMain(m *testing.M) { cleanup := initIntegrationTest() r := m.Run() @@ -691,14 +694,14 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) { table := newTable(t, schema) defer table.Delete(ctx) - d := civil.Date{2016, 3, 20} - tm := civil.Time{15, 4, 5, 6000} + d := civil.Date{Year: 2016, Month: 3, Day: 20} + tm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000} ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC) - dtm := civil.DateTime{d, tm} - d2 := civil.Date{1994, 5, 15} - tm2 := civil.Time{1, 2, 4, 0} + dtm := civil.DateTime{Date: d, Time: tm} + d2 := civil.Date{Year: 1994, Month: 5, Day: 15} + tm2 := civil.Time{Hour: 1, Minute: 2, Second: 4, Nanosecond: 0} ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC) - dtm2 := civil.DateTime{d2, tm2} + dtm2 := civil.DateTime{Date: d2, Time: tm2} // Populate the table. upl := table.Uploader() @@ -797,8 +800,8 @@ func TestIntegration_UploadAndReadNullable(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } - ctm := civil.Time{15, 4, 5, 6000} - cdt := civil.DateTime{testDate, ctm} + ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000} + cdt := civil.DateTime{Date: testDate, Time: ctm} testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema))) testUploadAndReadNullable(t, testStructNullable{ String: NullString{"x", true}, @@ -943,23 +946,23 @@ func TestIntegration_TableUpdate(t *testing.T) { // Error cases when updating schema. for _, test := range []struct { desc string - fields []*FieldSchema + fields Schema }{ - {"change from optional to required", []*FieldSchema{ + {"change from optional to required", Schema{ {Name: "name", Type: StringFieldType, Required: true}, schema3[1], schema3[2], schema3[3], }}, - {"add a required field", []*FieldSchema{ + {"add a required field", Schema{ schema3[0], schema3[1], schema3[2], schema3[3], {Name: "req", Type: StringFieldType, Required: true}, }}, - {"remove a field", []*FieldSchema{schema3[0], schema3[1], schema3[2]}}, - {"remove a nested field", []*FieldSchema{ + {"remove a field", Schema{schema3[0], schema3[1], schema3[2]}}, + {"remove a nested field", Schema{ schema3[0], schema3[1], schema3[2], {Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}}, - {"remove all nested fields", []*FieldSchema{ + {"remove all nested fields", Schema{ schema3[0], schema3[1], schema3[2], {Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}}, } { @@ -1085,9 +1088,9 @@ func TestIntegration_TimeTypes(t *testing.T) { table := newTable(t, dtSchema) defer table.Delete(ctx) - d := civil.Date{2016, 3, 20} - tm := civil.Time{12, 30, 0, 6000} - dtm := civil.DateTime{d, tm} + d := civil.Date{Year: 2016, Month: 3, Day: 20} + tm := civil.Time{Hour: 12, Minute: 30, Second: 0, Nanosecond: 6000} + dtm := civil.DateTime{Date: d, Time: tm} ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) wantRows := [][]Value{ []Value{d, tm, dtm, ts}, @@ -1121,8 +1124,8 @@ func TestIntegration_StandardQuery(t *testing.T) { } ctx := context.Background() - d := civil.Date{2016, 3, 20} - tm := civil.Time{15, 04, 05, 0} + d := civil.Date{Year: 2016, Month: 3, Day: 20} + tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 0} ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) dtm := ts.Format("2006-01-02 15:04:05") @@ -1147,7 +1150,7 @@ func TestIntegration_StandardQuery(t *testing.T) { {fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}}, {fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}}, {fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}}, - {fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{d, tm}}}, + {fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{Date: d, Time: tm}}}, {fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}}, {fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}}, {"SELECT (1, 2)", []Value{ints(1, 2)}}, @@ -1206,9 +1209,11 @@ func TestIntegration_QueryParameters(t *testing.T) { } ctx := context.Background() - d := civil.Date{2016, 3, 20} - tm := civil.Time{15, 04, 05, 0} - dtm := civil.DateTime{d, tm} + d := civil.Date{Year: 2016, Month: 3, Day: 20} + tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 3008} + rtm := tm + rtm.Nanosecond = 3000 // round to microseconds + dtm := civil.DateTime{Date: d, Time: tm} ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) type ss struct { @@ -1226,20 +1231,93 @@ func TestIntegration_QueryParameters(t *testing.T) { query string parameters []QueryParameter wantRow []Value + wantConfig interface{} }{ - {"SELECT @val", []QueryParameter{{"val", 1}}, []Value{int64(1)}}, - {"SELECT @val", []QueryParameter{{"val", 1.3}}, []Value{1.3}}, - {"SELECT @val", []QueryParameter{{"val", true}}, []Value{true}}, - {"SELECT @val", []QueryParameter{{"val", "ABC"}}, []Value{"ABC"}}, - {"SELECT @val", []QueryParameter{{"val", []byte("foo")}}, []Value{[]byte("foo")}}, - {"SELECT @val", []QueryParameter{{"val", ts}}, []Value{ts}}, - {"SELECT @val", []QueryParameter{{"val", []time.Time{ts, ts}}}, []Value{[]Value{ts, ts}}}, - {"SELECT @val", []QueryParameter{{"val", dtm}}, []Value{dtm}}, - {"SELECT @val", []QueryParameter{{"val", d}}, []Value{d}}, - {"SELECT @val", []QueryParameter{{"val", tm}}, []Value{tm}}, - {"SELECT @val", []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}}, - []Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}}}, - {"SELECT @val.Timestamp, @val.SubStruct.String", []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}}, []Value{ts, "a"}}, + { + "SELECT @val", + []QueryParameter{{"val", 1}}, + []Value{int64(1)}, + int64(1), + }, + { + "SELECT @val", + []QueryParameter{{"val", 1.3}}, + []Value{1.3}, + 1.3, + }, + { + "SELECT @val", + []QueryParameter{{"val", true}}, + []Value{true}, + true, + }, + { + "SELECT @val", + []QueryParameter{{"val", "ABC"}}, + []Value{"ABC"}, + "ABC", + }, + { + "SELECT @val", + []QueryParameter{{"val", []byte("foo")}}, + []Value{[]byte("foo")}, + []byte("foo"), + }, + { + "SELECT @val", + []QueryParameter{{"val", ts}}, + []Value{ts}, + ts, + }, + { + "SELECT @val", + []QueryParameter{{"val", []time.Time{ts, ts}}}, + []Value{[]Value{ts, ts}}, + []interface{}{ts, ts}, + }, + { + "SELECT @val", + []QueryParameter{{"val", dtm}}, + []Value{civil.DateTime{Date: d, Time: rtm}}, + civil.DateTime{Date: d, Time: rtm}, + }, + { + "SELECT @val", + []QueryParameter{{"val", d}}, + []Value{d}, + d, + }, + { + "SELECT @val", + []QueryParameter{{"val", tm}}, + []Value{rtm}, + rtm, + }, + { + "SELECT @val", + []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}}, + []Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}}, + map[string]interface{}{ + "Timestamp": ts, + "StringArray": []interface{}{"a", "b"}, + "SubStruct": map[string]interface{}{"String": "c"}, + "SubStructArray": []interface{}{ + map[string]interface{}{"String": "d"}, + map[string]interface{}{"String": "e"}, + }, + }, + }, + { + "SELECT @val.Timestamp, @val.SubStruct.String", + []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}}, + []Value{ts, "a"}, + map[string]interface{}{ + "Timestamp": ts, + "SubStruct": map[string]interface{}{"String": "a"}, + "StringArray": nil, + "SubStructArray": nil, + }, + }, } for _, c := range testCases { q := client.Query(c.query) @@ -1256,6 +1334,15 @@ func TestIntegration_QueryParameters(t *testing.T) { t.Fatal(err) } checkRead(t, "QueryParameters", it, [][]Value{c.wantRow}) + config, err := job.Config() + if err != nil { + t.Fatal(err) + } + got := config.(*QueryConfig).Parameters[0].Value + if !testutil.Equal(got, c.wantConfig) { + t.Errorf("param %[1]v (%[1]T): config:\ngot %[2]v (%[2]T)\nwant %[3]v (%[3]T)", + c.parameters[0].Value, got, c.wantConfig) + } } } @@ -1519,6 +1606,117 @@ func TestIntegration_ListJobs(t *testing.T) { } } +const tokyo = "asia-northeast1" + +func TestIntegration_Location(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + client.Location = "" + testLocation(t, tokyo) + client.Location = tokyo + defer func() { + client.Location = "" + }() + testLocation(t, "") +} + +func testLocation(t *testing.T, loc string) { + ctx := context.Background() + tokyoDataset := client.Dataset("tokyo") + err := tokyoDataset.Create(ctx, &DatasetMetadata{Location: loc}) + if err != nil && !hasStatusCode(err, 409) { // 409 = already exists + t.Fatal(err) + } + md, err := tokyoDataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if md.Location != tokyo { + t.Fatalf("dataset location: got %s, want %s", md.Location, tokyo) + } + table := tokyoDataset.Table(tableIDs.New()) + err = table.Create(context.Background(), &TableMetadata{ + Schema: Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "nums", Type: IntegerFieldType}, + }, + ExpirationTime: testTableExpiration, + }) + if err != nil { + t.Fatal(err) + } + defer table.Delete(ctx) + loader := table.LoaderFrom(NewReaderSource(strings.NewReader("a,0\nb,1\nc,2\n"))) + loader.Location = loc + job, err := loader.Run(ctx) + if err != nil { + t.Fatal("loader.Run", err) + } + if job.Location() != tokyo { + t.Fatalf("job location: got %s, want %s", job.Location(), tokyo) + } + _, err = client.JobFromID(ctx, job.ID()) + if client.Location == "" && err == nil { + t.Error("JobFromID with Tokyo job, no client location: want error, got nil") + } + if client.Location != "" && err != nil { + t.Errorf("JobFromID with Tokyo job, with client location: want nil, got %v", err) + } + _, err = client.JobFromIDLocation(ctx, job.ID(), "US") + if err == nil { + t.Error("JobFromIDLocation with US: want error, got nil") + } + job2, err := client.JobFromIDLocation(ctx, job.ID(), loc) + if loc == tokyo && err != nil { + t.Errorf("loc=tokyo: %v", err) + } + if loc == "" && err == nil { + t.Error("loc empty: got nil, want error") + } + if job2 != nil && (job2.ID() != job.ID() || job2.Location() != tokyo) { + t.Errorf("got id %s loc %s, want id%s loc %s", job2.ID(), job2.Location(), job.ID(), tokyo) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + // Cancel should succeed even if the job is done. + if err := job.Cancel(ctx); err != nil { + t.Fatal(err) + } + + q := client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID)) + q.Location = loc + iter, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + wantRows := [][]Value{ + []Value{"a", int64(0)}, + []Value{"b", int64(1)}, + []Value{"c", int64(2)}, + } + checkRead(t, "location", iter, wantRows) + + table2 := tokyoDataset.Table(tableIDs.New()) + copier := table2.CopierFrom(table) + copier.Location = loc + if _, err := copier.Run(ctx); err != nil { + t.Fatal(err) + } + bucketName := testutil.ProjID() + objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID) + uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName) + defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx) + gr := NewGCSReference(uri) + gr.DestinationFormat = CSV + e := table.ExtractorTo(gr) + e.Location = loc + if _, err := e.Run(ctx); err != nil { + t.Fatal(err) + } +} + // Creates a new, temporary table with a unique name and the given schema. func newTable(t *testing.T, s Schema) *Table { table := dataset.Table(tableIDs.New()) diff --git a/vendor/cloud.google.com/go/bigquery/job.go b/vendor/cloud.google.com/go/bigquery/job.go index b415e9f04..27a3129af 100644 --- a/vendor/cloud.google.com/go/bigquery/job.go +++ b/vendor/cloud.google.com/go/bigquery/job.go @@ -35,6 +35,7 @@ type Job struct { c *Client projectID string jobID string + location string config *bq.JobConfiguration lastStatus *JobStatus @@ -43,8 +44,18 @@ type Job struct { // JobFromID creates a Job which refers to an existing BigQuery job. The job // need not have been created by this package. For example, the job may have // been created in the BigQuery console. +// +// For jobs whose location is other than "US" or "EU", set Client.Location or use +// JobFromIDLocation. func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { - bqjob, err := c.getJobInternal(ctx, id, "configuration", "jobReference", "status", "statistics") + return c.JobFromIDLocation(ctx, id, c.Location) +} + +// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job +// need not have been created by this package (for example, it may have +// been created in the BigQuery console), but it must exist in the specified location. +func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (*Job, error) { + bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics") if err != nil { return nil, err } @@ -56,6 +67,11 @@ func (j *Job) ID() string { return j.jobID } +// Location returns the job's location. +func (j *Job) Location() string { + return j.location +} + // State is one of a sequence of states that a Job progresses through as it is processed. type State int @@ -120,14 +136,20 @@ type JobIDConfig struct { // If AddJobIDSuffix is true, then a random string will be appended to JobID. AddJobIDSuffix bool + + // Location is the location for the job. + Location string } // createJobRef creates a JobReference. -// projectID must be non-empty. -func (j *JobIDConfig) createJobRef(projectID string) *bq.JobReference { +func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference { // We don't check whether projectID is empty; the server will return an // error when it encounters the resulting JobReference. - jr := &bq.JobReference{ProjectId: projectID} + loc := j.Location + if loc == "" { // Use Client.Location as a default. + loc = c.Location + } + jr := &bq.JobReference{ProjectId: c.projectID, Location: loc} if j.JobID == "" { jr.JobId = randomIDFn() } else if j.AddJobIDSuffix { @@ -176,7 +198,7 @@ func (s *JobStatus) Err() error { // Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined. func (j *Job) Status(ctx context.Context) (*JobStatus, error) { - bqjob, err := j.c.getJobInternal(ctx, j.jobID, "status", "statistics") + bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics") if err != nil { return nil, err } @@ -204,6 +226,7 @@ func (j *Job) Cancel(ctx context.Context) error { // to poll for the job status to see if the cancel completed // successfully". So it would be misleading to return a status. call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID). + Location(j.location). Fields(). // We don't need any of the response data. Context(ctx) setClientHeader(call.Header()) @@ -261,14 +284,17 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin } destTable := j.config.Query.DestinationTable // The destination table should only be nil if there was a query error. - if destTable == nil { - return nil, errors.New("bigquery: query job missing destination table") + projectID := j.projectID + if destTable != nil && projectID != destTable.ProjectId { + return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId) } - projectID := destTable.ProjectId schema, err := waitForQuery(ctx, projectID) if err != nil { return nil, err } + if destTable == nil { + return nil, errors.New("bigquery: query job missing destination table") + } dt := bqToTable(destTable, j.c) it := newRowIterator(ctx, dt, pf) it.Schema = schema @@ -278,7 +304,7 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin // waitForQuery waits for the query job to complete and returns its schema. func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) { // Use GetQueryResults only to wait for completion, not to read results. - call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Context(ctx).MaxResults(0) + call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0) setClientHeader(call.Header()) backoff := gax.Backoff{ Initial: 1 * time.Second, @@ -522,9 +548,12 @@ func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) { return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c) } -func (c *Client) getJobInternal(ctx context.Context, jobID string, fields ...googleapi.Field) (*bq.Job, error) { +func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) { var job *bq.Job call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx) + if location != "" { + call = call.Location(location) + } if len(fields) > 0 { call = call.Fields(fields...) } @@ -547,6 +576,7 @@ func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt j := &Job{ projectID: qr.ProjectId, jobID: qr.JobId, + location: qr.Location, c: c, } j.setConfig(qc) diff --git a/vendor/cloud.google.com/go/bigquery/job_test.go b/vendor/cloud.google.com/go/bigquery/job_test.go index 564c22aa7..d2d8becc0 100644 --- a/vendor/cloud.google.com/go/bigquery/job_test.go +++ b/vendor/cloud.google.com/go/bigquery/job_test.go @@ -23,37 +23,52 @@ import ( func TestCreateJobRef(t *testing.T) { defer fixRandomID("RANDOM")() + cNoLoc := &Client{projectID: "projectID"} + cLoc := &Client{projectID: "projectID", Location: "defaultLoc"} for _, test := range []struct { - jobID string - addJobIDSuffix bool - want string + in JobIDConfig + client *Client + want *bq.JobReference }{ { - jobID: "foo", - addJobIDSuffix: false, - want: "foo", + in: JobIDConfig{JobID: "foo"}, + want: &bq.JobReference{JobId: "foo"}, }, { - jobID: "", - addJobIDSuffix: false, - want: "RANDOM", + in: JobIDConfig{}, + want: &bq.JobReference{JobId: "RANDOM"}, }, { - jobID: "", - addJobIDSuffix: true, // irrelevant - want: "RANDOM", + in: JobIDConfig{AddJobIDSuffix: true}, + want: &bq.JobReference{JobId: "RANDOM"}, }, { - jobID: "foo", - addJobIDSuffix: true, - want: "foo-RANDOM", + in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true}, + want: &bq.JobReference{JobId: "foo-RANDOM"}, + }, + { + in: JobIDConfig{JobID: "foo", Location: "loc"}, + want: &bq.JobReference{JobId: "foo", Location: "loc"}, + }, + { + in: JobIDConfig{JobID: "foo"}, + client: cLoc, + want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"}, + }, + { + in: JobIDConfig{JobID: "foo", Location: "loc"}, + client: cLoc, + want: &bq.JobReference{JobId: "foo", Location: "loc"}, }, } { - jc := JobIDConfig{JobID: test.jobID, AddJobIDSuffix: test.addJobIDSuffix} - jr := jc.createJobRef("projectID") - got := jr.JobId - if got != test.want { - t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want) + client := test.client + if client == nil { + client = cNoLoc + } + got := test.in.createJobRef(client) + test.want.ProjectId = "projectID" + if !testutil.Equal(got, test.want) { + t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want) } } } diff --git a/vendor/cloud.google.com/go/bigquery/load.go b/vendor/cloud.google.com/go/bigquery/load.go index 10493e158..62d77302a 100644 --- a/vendor/cloud.google.com/go/bigquery/load.go +++ b/vendor/cloud.google.com/go/bigquery/load.go @@ -42,16 +42,25 @@ type LoadConfig struct { // If non-nil, the destination table is partitioned by time. TimePartitioning *TimePartitioning + + // Custom encryption configuration (e.g., Cloud KMS keys). + DestinationEncryptionConfig *EncryptionConfig + + // SchemaUpdateOptions allows the schema of the destination table to be + // updated as a side effect of the load job. + SchemaUpdateOptions []string } func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) { config := &bq.JobConfiguration{ Labels: l.Labels, Load: &bq.JobConfigurationLoad{ - CreateDisposition: string(l.CreateDisposition), - WriteDisposition: string(l.WriteDisposition), - DestinationTable: l.Dst.toBQ(), - TimePartitioning: l.TimePartitioning.toBQ(), + CreateDisposition: string(l.CreateDisposition), + WriteDisposition: string(l.WriteDisposition), + DestinationTable: l.Dst.toBQ(), + TimePartitioning: l.TimePartitioning.toBQ(), + DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(), + SchemaUpdateOptions: l.SchemaUpdateOptions, }, } media := l.Src.populateLoadConfig(config.Load) @@ -60,11 +69,13 @@ func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) { func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig { lc := &LoadConfig{ - Labels: q.Labels, - CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition), - WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition), - Dst: bqToTable(q.Load.DestinationTable, c), - TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning), + Labels: q.Labels, + CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition), + WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition), + Dst: bqToTable(q.Load.DestinationTable, c), + TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning), + DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration), + SchemaUpdateOptions: q.Load.SchemaUpdateOptions, } var fc *FileConfig if len(q.Load.SourceUris) == 0 { @@ -120,7 +131,7 @@ func (l *Loader) Run(ctx context.Context) (*Job, error) { func (l *Loader) newJob() (*bq.Job, io.Reader) { config, media := l.LoadConfig.toBQ() return &bq.Job{ - JobReference: l.JobIDConfig.createJobRef(l.c.projectID), + JobReference: l.JobIDConfig.createJobRef(l.c), Configuration: config, }, media } diff --git a/vendor/cloud.google.com/go/bigquery/load_test.go b/vendor/cloud.google.com/go/bigquery/load_test.go index 44c14f296..385269f60 100644 --- a/vendor/cloud.google.com/go/bigquery/load_test.go +++ b/vendor/cloud.google.com/go/bigquery/load_test.go @@ -74,25 +74,38 @@ func TestLoad(t *testing.T) { c := &Client{projectID: "client-project-id"} testCases := []struct { - dst *Table - src LoadSource - jobID string - config LoadConfig - want *bq.Job + dst *Table + src LoadSource + jobID string + location string + config LoadConfig + want *bq.Job }{ { dst: c.Dataset("dataset-id").Table("table-id"), src: NewGCSReference("uri"), want: defaultLoadJob(), }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: NewGCSReference("uri"), + location: "loc", + want: func() *bq.Job { + j := defaultLoadJob() + j.JobReference.Location = "loc" + return j + }(), + }, { dst: c.Dataset("dataset-id").Table("table-id"), jobID: "ajob", config: LoadConfig{ - CreateDisposition: CreateNever, - WriteDisposition: WriteTruncate, - Labels: map[string]string{"a": "b"}, - TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond}, + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + Labels: map[string]string{"a": "b"}, + TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond}, + DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, + SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"}, }, src: NewGCSReference("uri"), want: func() *bq.Job { @@ -104,10 +117,12 @@ func TestLoad(t *testing.T) { Type: "DAY", ExpirationMs: 1234, } + j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"} j.JobReference = &bq.JobReference{ JobId: "ajob", ProjectId: "client-project-id", } + j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"} return j }(), }, @@ -224,6 +239,7 @@ func TestLoad(t *testing.T) { for i, tc := range testCases { loader := tc.dst.LoaderFrom(tc.src) loader.JobID = tc.jobID + loader.Location = tc.location tc.config.Src = tc.src tc.config.Dst = tc.dst loader.LoadConfig = tc.config diff --git a/vendor/cloud.google.com/go/bigquery/nulls.go b/vendor/cloud.google.com/go/bigquery/nulls.go index 98a90f086..ae3045527 100644 --- a/vendor/cloud.google.com/go/bigquery/nulls.go +++ b/vendor/cloud.google.com/go/bigquery/nulls.go @@ -15,9 +15,11 @@ package bigquery import ( + "bytes" "encoding/json" "fmt" "reflect" + "strconv" "time" "cloud.google.com/go/civil" @@ -134,6 +136,134 @@ func nulljson(valid bool, v interface{}) ([]byte, error) { return json.Marshal(v) } +func (n *NullInt64) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Int64 = 0 + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Int64); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullFloat64) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Float64 = 0 + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Float64); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullBool) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Bool = false + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Bool); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullString) UnmarshalJSON(b []byte) error { + n.Valid = false + n.StringVal = "" + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.StringVal); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullTimestamp) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Timestamp = time.Time{} + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Timestamp); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullDate) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Date = civil.Date{} + if bytes.Equal(b, jsonNull) { + return nil + } + + if err := json.Unmarshal(b, &n.Date); err != nil { + return err + } + n.Valid = true + return nil +} + +func (n *NullTime) UnmarshalJSON(b []byte) error { + n.Valid = false + n.Time = civil.Time{} + if bytes.Equal(b, jsonNull) { + return nil + } + + s, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + + t, err := civil.ParseTime(s) + if err != nil { + return err + } + n.Time = t + + n.Valid = true + return nil +} + +func (n *NullDateTime) UnmarshalJSON(b []byte) error { + n.Valid = false + n.DateTime = civil.DateTime{} + if bytes.Equal(b, jsonNull) { + return nil + } + + s, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + + dt, err := parseCivilDateTime(s) + if err != nil { + return err + } + n.DateTime = dt + + n.Valid = true + return nil +} + var ( typeOfNullInt64 = reflect.TypeOf(NullInt64{}) typeOfNullFloat64 = reflect.TypeOf(NullFloat64{}) diff --git a/vendor/cloud.google.com/go/bigquery/nulls_test.go b/vendor/cloud.google.com/go/bigquery/nulls_test.go index 423ffb3e1..87fcfb25e 100644 --- a/vendor/cloud.google.com/go/bigquery/nulls_test.go +++ b/vendor/cloud.google.com/go/bigquery/nulls_test.go @@ -16,7 +16,16 @@ package bigquery import ( "encoding/json" + "reflect" "testing" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" +) + +var ( + nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000} + nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime} ) func TestNullsJSON(t *testing.T) { @@ -24,23 +33,23 @@ func TestNullsJSON(t *testing.T) { in interface{} want string }{ - {NullInt64{Valid: true, Int64: 3}, `3`}, - {NullFloat64{Valid: true, Float64: 3.14}, `3.14`}, - {NullBool{Valid: true, Bool: true}, `true`}, - {NullString{Valid: true, StringVal: "foo"}, `"foo"`}, - {NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`}, - {NullDate{Valid: true, Date: testDate}, `"2016-11-05"`}, - {NullTime{Valid: true, Time: testTime}, `"07:50:22.000000"`}, - {NullDateTime{Valid: true, DateTime: testDateTime}, `"2016-11-05 07:50:22.000000"`}, + {&NullInt64{Valid: true, Int64: 3}, `3`}, + {&NullFloat64{Valid: true, Float64: 3.14}, `3.14`}, + {&NullBool{Valid: true, Bool: true}, `true`}, + {&NullString{Valid: true, StringVal: "foo"}, `"foo"`}, + {&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`}, + {&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`}, + {&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`}, + {&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`}, - {NullInt64{}, `null`}, - {NullFloat64{}, `null`}, - {NullBool{}, `null`}, - {NullString{}, `null`}, - {NullTimestamp{}, `null`}, - {NullDate{}, `null`}, - {NullTime{}, `null`}, - {NullDateTime{}, `null`}, + {&NullInt64{}, `null`}, + {&NullFloat64{}, `null`}, + {&NullBool{}, `null`}, + {&NullString{}, `null`}, + {&NullTimestamp{}, `null`}, + {&NullDate{}, `null`}, + {&NullTime{}, `null`}, + {&NullDateTime{}, `null`}, } { bytes, err := json.Marshal(test.in) if err != nil { @@ -49,5 +58,16 @@ func TestNullsJSON(t *testing.T) { if got, want := string(bytes), test.want; got != want { t.Errorf("%#v: got %s, want %s", test.in, got, want) } + + typ := reflect.Indirect(reflect.ValueOf(test.in)).Type() + value := reflect.New(typ).Interface() + err = json.Unmarshal(bytes, value) + if err != nil { + t.Fatal(err) + } + + if !testutil.Equal(value, test.in) { + t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in) + } } } diff --git a/vendor/cloud.google.com/go/bigquery/params.go b/vendor/cloud.google.com/go/bigquery/params.go index bcc859e5a..bb9fa273d 100644 --- a/vendor/cloud.google.com/go/bigquery/params.go +++ b/vendor/cloud.google.com/go/bigquery/params.go @@ -20,7 +20,6 @@ import ( "fmt" "reflect" "regexp" - "strings" "time" "cloud.google.com/go/civil" @@ -205,6 +204,8 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) { case typeOfTime: // civil.Time has nanosecond resolution, but BigQuery TIME only microsecond. + // (If we send nanoseconds, then when we try to read the result we get "query job + // missing destination table"). res.Value = CivilTimeString(v.Interface().(civil.Time)) return res, nil @@ -306,11 +307,7 @@ func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterTyp case "TIMESTAMP": return time.Parse(timestampFormat, qval.Value) case "DATETIME": - parts := strings.Fields(qval.Value) - if len(parts) != 2 { - return nil, fmt.Errorf("bigquery: bad DATETIME value %q", qval.Value) - } - return civil.ParseDateTime(parts[0] + "T" + parts[1]) + return parseCivilDateTime(qval.Value) default: return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type]) } diff --git a/vendor/cloud.google.com/go/bigquery/params_test.go b/vendor/cloud.google.com/go/bigquery/params_test.go index 95fb9fd7a..327b1c7e0 100644 --- a/vendor/cloud.google.com/go/bigquery/params_test.go +++ b/vendor/cloud.google.com/go/bigquery/params_test.go @@ -45,9 +45,9 @@ var scalarTests = []struct { {time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)), "2016-03-20 04:22:09.000005-01:02", timestampParamType}, - {civil.Date{2016, 3, 20}, "2016-03-20", dateParamType}, - {civil.Time{4, 5, 6, 789000000}, "04:05:06.789000", timeParamType}, - {civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, + {civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType}, + {civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType}, + {civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}}, "2016-03-20 04:05:06.789000", dateTimeParamType}, } diff --git a/vendor/cloud.google.com/go/bigquery/query.go b/vendor/cloud.google.com/go/bigquery/query.go index 7d5fb603d..197ba15f5 100644 --- a/vendor/cloud.google.com/go/bigquery/query.go +++ b/vendor/cloud.google.com/go/bigquery/query.go @@ -115,17 +115,21 @@ type QueryConfig struct { // call LastStatus on the returned job to get statistics. Calling Status on a // dry-run job will fail. DryRun bool + + // Custom encryption configuration (e.g., Cloud KMS keys). + DestinationEncryptionConfig *EncryptionConfig } func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) { qconf := &bq.JobConfigurationQuery{ - Query: qc.Q, - CreateDisposition: string(qc.CreateDisposition), - WriteDisposition: string(qc.WriteDisposition), - AllowLargeResults: qc.AllowLargeResults, - Priority: string(qc.Priority), - MaximumBytesBilled: qc.MaxBytesBilled, - TimePartitioning: qc.TimePartitioning.toBQ(), + Query: qc.Q, + CreateDisposition: string(qc.CreateDisposition), + WriteDisposition: string(qc.WriteDisposition), + AllowLargeResults: qc.AllowLargeResults, + Priority: string(qc.Priority), + MaximumBytesBilled: qc.MaxBytesBilled, + TimePartitioning: qc.TimePartitioning.toBQ(), + DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(), } if len(qc.TableDefinitions) > 0 { qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration) @@ -274,7 +278,7 @@ func (q *Query) newJob() (*bq.Job, error) { return nil, err } return &bq.Job{ - JobReference: q.JobIDConfig.createJobRef(q.client.projectID), + JobReference: q.JobIDConfig.createJobRef(q.client), Configuration: config, }, nil } diff --git a/vendor/cloud.google.com/go/bigquery/query_test.go b/vendor/cloud.google.com/go/bigquery/query_test.go index fcae3e547..68ed63ea6 100644 --- a/vendor/cloud.google.com/go/bigquery/query_test.go +++ b/vendor/cloud.google.com/go/bigquery/query_test.go @@ -120,9 +120,7 @@ func TestQuery(t *testing.T) { g.MaxBadRecords = 1 g.Quote = "'" g.SkipLeadingRows = 2 - g.Schema = Schema([]*FieldSchema{ - {Name: "name", Type: StringFieldType}, - }) + g.Schema = Schema{{Name: "name", Type: StringFieldType}} return g }(), }, @@ -352,6 +350,7 @@ func TestConfiguringQuery(t *testing.T) { query.DefaultProjectID = "def-project-id" query.DefaultDatasetID = "def-dataset-id" query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"} + query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"} // Note: Other configuration fields are tested in other tests above. // A lot of that can be consolidated once Client.Copy is gone. @@ -363,9 +362,10 @@ func TestConfiguringQuery(t *testing.T) { ProjectId: "def-project-id", DatasetId: "def-dataset-id", }, - UseLegacySql: false, - TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"}, - ForceSendFields: []string{"UseLegacySql"}, + UseLegacySql: false, + TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"}, + DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, + ForceSendFields: []string{"UseLegacySql"}, }, }, JobReference: &bq.JobReference{ diff --git a/vendor/cloud.google.com/go/bigquery/schema_test.go b/vendor/cloud.google.com/go/bigquery/schema_test.go index 4182310c1..e30e2c351 100644 --- a/vendor/cloud.google.com/go/bigquery/schema_test.go +++ b/vendor/cloud.google.com/go/bigquery/schema_test.go @@ -179,7 +179,7 @@ func TestSchemaConversion(t *testing.T) { Name: "outer", Required: true, Type: "RECORD", - Schema: []*FieldSchema{ + Schema: Schema{ { Description: "inner field", Name: "inner", diff --git a/vendor/cloud.google.com/go/bigquery/table.go b/vendor/cloud.google.com/go/bigquery/table.go index 86dda6707..b02b0c074 100644 --- a/vendor/cloud.google.com/go/bigquery/table.go +++ b/vendor/cloud.google.com/go/bigquery/table.go @@ -76,6 +76,9 @@ type TableMetadata struct { // Information about a table stored outside of BigQuery. ExternalDataConfig *ExternalDataConfig + // Custom encryption configuration (e.g., Cloud KMS keys). + EncryptionConfig *EncryptionConfig + // All the fields below are read-only. FullID string // An opaque ID uniquely identifying the table. @@ -175,6 +178,32 @@ func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning { } } +// EncryptionConfig configures customer-managed encryption on tables. +type EncryptionConfig struct { + // Describes the Cloud KMS encryption key that will be used to protect + // destination BigQuery table. The BigQuery Service Account associated with your + // project requires access to this encryption key. + KMSKeyName string +} + +func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration { + if e == nil { + return nil + } + return &bq.EncryptionConfiguration{ + KmsKeyName: e.KMSKeyName, + } +} + +func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig { + if q == nil { + return nil + } + return &EncryptionConfig{ + KMSKeyName: q.KmsKeyName, + } +} + // StreamingBuffer holds information about the streaming buffer. type StreamingBuffer struct { // A lower-bound estimate of the number of bytes currently in the streaming @@ -265,6 +294,7 @@ func (tm *TableMetadata) toBQ() (*bq.Table, error) { edc := tm.ExternalDataConfig.toBQ() t.ExternalDataConfiguration = &edc } + t.EncryptionConfiguration = tm.EncryptionConfig.toBQ() if tm.FullID != "" { return nil, errors.New("cannot set FullID on create") } @@ -320,6 +350,7 @@ func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) { CreationTime: unixMillisToTime(t.CreationTime), LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)), ETag: t.Etag, + EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration), } if t.Schema != nil { md.Schema = bqToSchema(t.Schema) diff --git a/vendor/cloud.google.com/go/bigquery/table_test.go b/vendor/cloud.google.com/go/bigquery/table_test.go index 4b1cedc62..553c1e34a 100644 --- a/vendor/cloud.google.com/go/bigquery/table_test.go +++ b/vendor/cloud.google.com/go/bigquery/table_test.go @@ -53,6 +53,7 @@ func TestBQToTableMetadata(t *testing.T) { Type: "DAY", Field: "pfield", }, + EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, Type: "EXTERNAL", View: &bq.ViewDefinition{Query: "view-query"}, Labels: map[string]string{"a": "b"}, @@ -82,7 +83,8 @@ func TestBQToTableMetadata(t *testing.T) { EstimatedRows: 3, OldestEntryTime: aTime, }, - ETag: "etag", + EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, + ETag: "etag", }, }, } { @@ -115,6 +117,7 @@ func TestTableMetadataToBQ(t *testing.T) { ExpirationTime: aTime, Labels: map[string]string{"a": "b"}, ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable}, + EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, }, &bq.Table{ FriendlyName: "n", @@ -127,6 +130,7 @@ func TestTableMetadataToBQ(t *testing.T) { ExpirationTime: aTimeMillis, Labels: map[string]string{"a": "b"}, ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"}, + EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, }, }, { diff --git a/vendor/cloud.google.com/go/bigquery/value.go b/vendor/cloud.google.com/go/bigquery/value.go index c3c7c8701..bf1d1f341 100644 --- a/vendor/cloud.google.com/go/bigquery/value.go +++ b/vendor/cloud.google.com/go/bigquery/value.go @@ -21,6 +21,7 @@ import ( "math" "reflect" "strconv" + "strings" "time" "cloud.google.com/go/civil" @@ -540,6 +541,7 @@ func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { type StructSaver struct { // Schema determines what fields of the struct are uploaded. It should // match the table's schema. + // Schema is optional for StructSavers that are passed to Uploader.Put. Schema Schema // If non-empty, BigQuery will use InsertID to de-duplicate insertions @@ -707,6 +709,16 @@ func CivilDateTimeString(dt civil.DateTime) string { return dt.Date.String() + " " + CivilTimeString(dt.Time) } +// parseCivilDateTime parses a date-time represented in a BigQuery SQL +// compatible format and returns a civil.DateTime. +func parseCivilDateTime(s string) (civil.DateTime, error) { + parts := strings.Fields(s) + if len(parts) != 2 { + return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s) + } + return civil.ParseDateTime(parts[0] + "T" + parts[1]) +} + // convertRows converts a series of TableRows into a series of Value slices. // schema is used to interpret the data from rows; its length must match the // length of each row. diff --git a/vendor/cloud.google.com/go/bigquery/value_test.go b/vendor/cloud.google.com/go/bigquery/value_test.go index c8e2423c8..43c4cb545 100644 --- a/vendor/cloud.google.com/go/bigquery/value_test.go +++ b/vendor/cloud.google.com/go/bigquery/value_test.go @@ -30,7 +30,7 @@ import ( ) func TestConvertBasicValues(t *testing.T) { - schema := []*FieldSchema{ + schema := Schema{ {Type: StringFieldType}, {Type: IntegerFieldType}, {Type: FloatFieldType}, @@ -57,7 +57,7 @@ func TestConvertBasicValues(t *testing.T) { } func TestConvertTime(t *testing.T) { - schema := []*FieldSchema{ + schema := Schema{ {Type: TimestampFieldType}, {Type: DateFieldType}, {Type: TimeFieldType}, @@ -103,9 +103,7 @@ func TestConvertSmallTimes(t *testing.T) { } func TestConvertNullValues(t *testing.T) { - schema := []*FieldSchema{ - {Type: StringFieldType}, - } + schema := Schema{{Type: StringFieldType}} row := &bq.TableRow{ F: []*bq.TableCell{ {V: nil}, @@ -122,7 +120,7 @@ func TestConvertNullValues(t *testing.T) { } func TestBasicRepetition(t *testing.T) { - schema := []*FieldSchema{ + schema := Schema{ {Type: IntegerFieldType, Repeated: true}, } row := &bq.TableRow{ @@ -153,7 +151,7 @@ func TestBasicRepetition(t *testing.T) { } func TestNestedRecordContainingRepetition(t *testing.T) { - schema := []*FieldSchema{ + schema := Schema{ { Type: RecordFieldType, Schema: Schema{ @@ -190,7 +188,7 @@ func TestNestedRecordContainingRepetition(t *testing.T) { } func TestRepeatedRecordContainingRepetition(t *testing.T) { - schema := []*FieldSchema{ + schema := Schema{ { Type: RecordFieldType, Repeated: true, @@ -264,7 +262,7 @@ func TestRepeatedRecordContainingRepetition(t *testing.T) { } func TestRepeatedRecordContainingRecord(t *testing.T) { - schema := []*FieldSchema{ + schema := Schema{ { Type: RecordFieldType, Repeated: true, @@ -399,14 +397,17 @@ func TestValuesSaverConvertsToMap(t *testing.T) { }{ { vs: ValuesSaver{ - Schema: []*FieldSchema{ + Schema: Schema{ {Name: "intField", Type: IntegerFieldType}, {Name: "strField", Type: StringFieldType}, {Name: "dtField", Type: DateTimeFieldType}, }, InsertID: "iid", Row: []Value{1, "a", - civil.DateTime{civil.Date{1, 2, 3}, civil.Time{4, 5, 6, 7000}}}, + civil.DateTime{ + Date: civil.Date{Year: 1, Month: 2, Day: 3}, + Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 7000}}, + }, }, wantInsertID: "iid", wantRow: map[string]Value{"intField": 1, "strField": "a", @@ -414,12 +415,12 @@ func TestValuesSaverConvertsToMap(t *testing.T) { }, { vs: ValuesSaver{ - Schema: []*FieldSchema{ + Schema: Schema{ {Name: "intField", Type: IntegerFieldType}, { Name: "recordField", Type: RecordFieldType, - Schema: []*FieldSchema{ + Schema: Schema{ {Name: "nestedInt", Type: IntegerFieldType, Repeated: true}, }, }, @@ -559,8 +560,8 @@ func TestStructSaver(t *testing.T) { } } - ct1 := civil.Time{1, 2, 3, 4000} - ct2 := civil.Time{5, 6, 7, 8000} + ct1 := civil.Time{Hour: 1, Minute: 2, Second: 3, Nanosecond: 4000} + ct2 := civil.Time{Hour: 5, Minute: 6, Second: 7, Nanosecond: 8000} in := T{ S: "x", R: []int{1, 2}, @@ -629,7 +630,7 @@ func TestStructSaverErrors(t *testing.T) { } func TestConvertRows(t *testing.T) { - schema := []*FieldSchema{ + schema := Schema{ {Type: StringFieldType}, {Type: IntegerFieldType}, {Type: FloatFieldType}, @@ -772,9 +773,9 @@ var ( } testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC) - testDate = civil.Date{2016, 11, 5} - testTime = civil.Time{7, 50, 22, 8} - testDateTime = civil.DateTime{testDate, testTime} + testDate = civil.Date{Year: 2016, Month: 11, Day: 5} + testTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 8} + testDateTime = civil.DateTime{Date: testDate, Time: testTime} testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), int64(8), 3.14, true, testTimestamp, testDate, testTime, testDateTime, @@ -1069,7 +1070,7 @@ func TestStructLoaderErrors(t *testing.T) { t.Errorf("%T: got nil, want error", bad6) } - // sl.set's error is sticky, with even good input. + // sl.set's error is sticky, even with good input. err2 := sl.set(&repStruct{}, repSchema) if err2 != err { t.Errorf("%v != %v, expected equal", err2, err) @@ -1087,6 +1088,7 @@ func TestStructLoaderErrors(t *testing.T) { {Name: "b", Type: BooleanFieldType}, {Name: "s", Type: StringFieldType}, {Name: "d", Type: DateFieldType}, + {Name: "r", Type: RecordFieldType, Schema: Schema{{Name: "X", Type: IntegerFieldType}}}, } type s struct { I int diff --git a/vendor/cloud.google.com/go/bigtable/admin.go b/vendor/cloud.google.com/go/bigtable/admin.go index 91849d104..2d81b32d8 100644 --- a/vendor/cloud.google.com/go/bigtable/admin.go +++ b/vendor/cloud.google.com/go/bigtable/admin.go @@ -106,10 +106,17 @@ func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { req := &btapb.ListTablesRequest{ Parent: prefix, } - res, err := ac.tClient.ListTables(ctx, req) + + var res *btapb.ListTablesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + res, err = ac.tClient.ListTables(ctx, req) + return err + }, retryOptions...) if err != nil { return nil, err } + names := make([]string, 0, len(res.Tables)) for _, tbl := range res.Tables { names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) @@ -145,13 +152,13 @@ func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) ctx = mergeOutgoingMetadata(ctx, ac.md) var req_splits []*btapb.CreateTableRequest_Split for _, split := range conf.SplitKeys { - req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)}) + req_splits = append(req_splits, &btapb.CreateTableRequest_Split{Key: []byte(split)}) } var tbl btapb.Table if conf.Families != nil { tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily) for fam, policy := range conf.Families { - tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{policy.proto()} + tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{GcRule: policy.proto()} } } prefix := ac.instancePrefix() @@ -174,7 +181,7 @@ func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family str Name: prefix + "/tables/" + table, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: family, - Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, }}, } _, err := ac.tClient.ModifyColumnFamilies(ctx, req) @@ -200,7 +207,7 @@ func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family str Name: prefix + "/tables/" + table, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: family, - Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true}, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{Drop: true}, }}, } _, err := ac.tClient.ModifyColumnFamilies(ctx, req) @@ -227,10 +234,18 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, req := &btapb.GetTableRequest{ Name: prefix + "/tables/" + table, } - res, err := ac.tClient.GetTable(ctx, req) + + var res *btapb.Table + + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + res, err = ac.tClient.GetTable(ctx, req) + return err + }, retryOptions...) if err != nil { return nil, err } + ti := &TableInfo{} for name, fam := range res.ColumnFamilies { ti.Families = append(ti.Families, name) @@ -249,7 +264,7 @@ func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, po Name: prefix + "/tables/" + table, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: family, - Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}}, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}}, }}, } _, err := ac.tClient.ModifyColumnFamilies(ctx, req) @@ -262,7 +277,7 @@ func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix str prefix := ac.instancePrefix() req := &btapb.DropRowRangeRequest{ Name: prefix + "/tables/" + table, - Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)}, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte(rowKeyPrefix)}, } _, err := ac.tClient.DropRowRange(ctx, req) return err @@ -697,7 +712,7 @@ func (iac *InstanceAdminClient) CreateInstanceWithClusters(ctx context.Context, // DeleteInstance deletes an instance from the project. func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error { ctx = mergeOutgoingMetadata(ctx, iac.md) - req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId} + req := &btapb.DeleteInstanceRequest{Name: "projects/" + iac.project + "/instances/" + instanceId} _, err := iac.iClient.DeleteInstance(ctx, req) return err } @@ -808,7 +823,7 @@ func (iac *InstanceAdminClient) CreateCluster(ctx context.Context, conf *Cluster // production use. It is not subject to any SLA or deprecation policy. func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceId, clusterId string) error { ctx = mergeOutgoingMetadata(ctx, iac.md) - req := &btapb.DeleteClusterRequest{"projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId} + req := &btapb.DeleteClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId} _, err := iac.iClient.DeleteCluster(ctx, req) return err } @@ -848,3 +863,23 @@ func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceId string) } return cis, nil } + +// GetCluster fetches a cluster in an instance +func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clusterID string) (*ClusterInfo, error) { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters" + clusterID} + c, err := iac.iClient.GetCluster(ctx, req) + if err != nil { + return nil, err + } + + nameParts := strings.Split(c.Name, "/") + locParts := strings.Split(c.Location, "/") + cis := &ClusterInfo{ + Name: nameParts[len(nameParts)-1], + Zone: locParts[len(locParts)-1], + ServeNodes: int(c.ServeNodes), + State: c.State.String(), + } + return cis, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/admin_test.go b/vendor/cloud.google.com/go/bigtable/admin_test.go index f57a3dcdc..052844191 100644 --- a/vendor/cloud.google.com/go/bigtable/admin_test.go +++ b/vendor/cloud.google.com/go/bigtable/admin_test.go @@ -25,6 +25,7 @@ import ( "fmt" "golang.org/x/net/context" "google.golang.org/api/iterator" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" "strings" ) @@ -183,6 +184,67 @@ func TestAdminIntegration(t *testing.T) { } } +func TestInstanceUpdate(t *testing.T) { + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + timeout := 2 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + + defer adminClient.Close() + + iAdminClient, err := testEnv.NewInstanceAdminClient() + if err != nil { + t.Fatalf("NewInstanceAdminClient: %v", err) + } + + if iAdminClient == nil { + return + } + + defer iAdminClient.Close() + + iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance) + if err != nil { + t.Errorf("InstanceInfo: %v", err) + } + + if iInfo.Name != adminClient.instance { + t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) + } + + if iInfo.DisplayName != adminClient.instance { + t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) + } + + const numNodes = 4 + // update cluster nodes + if err := iAdminClient.UpdateCluster(ctx, adminClient.instance, testEnv.Config().Cluster, int32(numNodes)); err != nil { + t.Errorf("UpdateCluster: %v", err) + } + + // get cluster after updating + cis, err := iAdminClient.GetCluster(ctx, adminClient.instance, testEnv.Config().Cluster) + if err != nil { + t.Errorf("GetCluster %v", err) + } + if cis.ServeNodes != int(numNodes) { + t.Errorf("ServeNodes returned %d, want %d", cis.ServeNodes, int(numNodes)) + } +} + func TestAdminSnapshotIntegration(t *testing.T) { testEnv, err := NewIntegrationEnv() if err != nil { @@ -299,3 +361,73 @@ func TestAdminSnapshotIntegration(t *testing.T) { t.Fatalf("List after delete len: %d, want: %d", got, want) } } + +func TestGranularity(t *testing.T) { + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + timeout := 2 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + } + ctx, _ := context.WithTimeout(context.Background(), timeout) + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + + list := func() []string { + tbls, err := adminClient.Tables(ctx) + if err != nil { + t.Fatalf("Fetching list of tables: %v", err) + } + sort.Strings(tbls) + return tbls + } + containsAll := func(got, want []string) bool { + gotSet := make(map[string]bool) + + for _, s := range got { + gotSet[s] = true + } + for _, s := range want { + if !gotSet[s] { + return false + } + } + return true + } + + defer adminClient.DeleteTable(ctx, "mytable") + + if err := adminClient.CreateTable(ctx, "mytable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + + tables := list() + if got, want := tables, []string{"mytable"}; !containsAll(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + + // calling ModifyColumnFamilies to check the granularity of table + prefix := adminClient.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + "mytable", + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + table, err := adminClient.tClient.ModifyColumnFamilies(ctx, req) + if err != nil { + t.Fatalf("Creating column family: %v", err) + } + if table.Granularity != btapb.Table_TimestampGranularity(btapb.Table_MILLIS) { + t.Errorf("ModifyColumnFamilies returned granularity %#v, want %#v", table.Granularity, btapb.Table_TimestampGranularity(btapb.Table_MILLIS)) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go index 09f264cff..69e261d97 100644 --- a/vendor/cloud.google.com/go/bigtable/bigtable.go +++ b/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) const prodAddr = "bigtable.googleapis.com:443" @@ -83,11 +84,13 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C if err != nil { return nil, fmt.Errorf("dialing: %v", err) } + return &Client{ - conn: conn, - client: btpb.NewBigtableClient(conn), - project: project, - instance: instance, + conn: conn, + client: btpb.NewBigtableClient(conn), + project: project, + instance: instance, + appProfile: config.AppProfile, }, nil } @@ -147,7 +150,11 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ctx = mergeOutgoingMetadata(ctx, t.md) var prevRowKey string - err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows") + defer func() { traceEndSpan(ctx, err) }() + attrMap := make(map[string]interface{}) + err = gax.Invoke(ctx, func(ctx context.Context) error { if !arg.valid() { // Empty row set, no need to make an API call. // NOTE: we must return early if arg == RowList{} because reading @@ -165,6 +172,7 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ctx, cancel := context.WithCancel(ctx) // for aborting the stream defer cancel() + startTime := time.Now() stream, err := t.c.client.ReadRows(ctx, req) if err != nil { return err @@ -178,6 +186,10 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts if err != nil { // Reset arg for next Invoke call. arg = arg.retainRowsAfter(prevRowKey) + attrMap["rowKey"] = prevRowKey + attrMap["error"] = err.Error() + attrMap["time_secs"] = time.Since(startTime).Seconds() + tracePrintf(ctx, attrMap, "Retry details in ReadRows") return err } @@ -317,10 +329,10 @@ func (r RowRange) String() string { func (r RowRange) proto() *btpb.RowSet { rr := &btpb.RowRange{ - StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)}, + StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)}, } if !r.Unbounded() { - rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)} + rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)} } return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}} } @@ -462,6 +474,9 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl } } + var err error + ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/Apply") + defer func() { traceEndSpan(ctx, err) }() var callOptions []gax.CallOption if m.cond == nil { req := &btpb.MutateRowRequest{ @@ -507,7 +522,7 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl callOptions = retryOptions } var cmRes *btpb.CheckAndMutateRowResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + err = gax.Invoke(ctx, func(ctx context.Context) error { var err error cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) return err @@ -564,7 +579,7 @@ func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { // The timestamp will be truncated to millisecond granularity. // A timestamp of ServerTime means to use the server timestamp. func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { - m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ FamilyName: family, ColumnQualifier: []byte(column), TimestampMicros: int64(ts.TruncateToMilliseconds()), @@ -574,7 +589,7 @@ func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { // DeleteCellsInColumn will delete all the cells whose columns are family:column. func (m *Mutation) DeleteCellsInColumn(family, column string) { - m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{ FamilyName: family, ColumnQualifier: []byte(column), }}}) @@ -585,7 +600,7 @@ func (m *Mutation) DeleteCellsInColumn(family, column string) { // If end is zero, it will be interpreted as infinity. // The timestamps will be truncated to millisecond granularity. func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { - m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{ FamilyName: family, ColumnQualifier: []byte(column), TimeRange: &btpb.TimestampRange{ @@ -597,14 +612,14 @@ func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timest // DeleteCellsInFamily will delete all the cells whose columns are family:*. func (m *Mutation) DeleteCellsInFamily(family string) { - m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{ + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{DeleteFromFamily: &btpb.Mutation_DeleteFromFamily{ FamilyName: family, }}}) } // DeleteRow deletes the entire row. func (m *Mutation) DeleteRow() { - m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}}) + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{DeleteFromRow: &btpb.Mutation_DeleteFromRow{}}}) } // entryErr is a container that combines an entry with the error that was returned for it. @@ -642,7 +657,13 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio // entries will be reduced after each invocation to just what needs to be retried. entries := make([]*entryErr, len(rowKeys)) copy(entries, origEntries) - err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk") + defer func() { traceEndSpan(ctx, err) }() + attrMap := make(map[string]interface{}) + err = gax.Invoke(ctx, func(ctx context.Context) error { + attrMap["rowCount"] = len(entries) + tracePrintf(ctx, attrMap, "Row count in ApplyBulk") err := t.doApplyBulk(ctx, entries, opts...) if err != nil { // We want to retry the entire request with the current entries @@ -652,11 +673,10 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio if len(entries) > 0 && len(idempotentRetryCodes) > 0 { // We have at least one mutation that needs to be retried. // Return an arbitrary error that is retryable according to callOptions. - return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") + return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") } return nil }, retryOptions...) - if err != nil { return nil, err } @@ -721,11 +741,11 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ... } for i, entry := range res.Entries { - status := entry.Status - if status.Code == int32(codes.OK) { + s := entry.Status + if s.Code == int32(codes.OK) { entryErrs[i].Err = nil } else { - entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message) + entryErrs[i].Err = status.Errorf(codes.Code(s.Code), s.Message) } } after(res) @@ -803,7 +823,7 @@ func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ FamilyName: family, ColumnQualifier: []byte(column), - Rule: &btpb.ReadModifyWriteRule_AppendValue{v}, + Rule: &btpb.ReadModifyWriteRule_AppendValue{AppendValue: v}, }) } @@ -815,7 +835,7 @@ func (m *ReadModifyWrite) Increment(family, column string, delta int64) { m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ FamilyName: family, ColumnQualifier: []byte(column), - Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta}, + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: delta}, }) } @@ -825,3 +845,40 @@ func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context mdCopy, _ := metadata.FromOutgoingContext(ctx) return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md)) } + +func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) { + ctx = mergeOutgoingMetadata(ctx, t.md) + var sampledRowKeys []string + err := gax.Invoke(ctx, func(ctx context.Context) error { + sampledRowKeys = nil + req := &btpb.SampleRowKeysRequest{ + TableName: t.c.fullTableName(t.table), + AppProfileId: t.c.appProfile, + } + ctx, cancel := context.WithCancel(ctx) // for aborting the stream + defer cancel() + + stream, err := t.c.client.SampleRowKeys(ctx, req) + if err != nil { + return err + } + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + + key := string(res.RowKey) + if key == "" { + continue + } + + sampledRowKeys = append(sampledRowKeys, key) + } + return nil + }, retryOptions...) + return sampledRowKeys, err +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable_test.go b/vendor/cloud.google.com/go/bigtable/bigtable_test.go index 13c6a6d25..0dec53f2a 100644 --- a/vendor/cloud.google.com/go/bigtable/bigtable_test.go +++ b/vendor/cloud.google.com/go/bigtable/bigtable_test.go @@ -154,10 +154,11 @@ func TestClientIntegration(t *testing.T) { } checkpoint("inserted initial data") - if err := adminClient.WaitForReplication(ctx, table); err != nil { - t.Errorf("Waiting for replication for table %q: %v", table, err) - } - checkpoint("waited for replication") + // TODO(igorbernstein): re-enable this when ready + //if err := adminClient.WaitForReplication(ctx, table); err != nil { + // t.Errorf("Waiting for replication for table %q: %v", table, err) + //} + //checkpoint("waited for replication") // Do a conditional mutation with a complex filter. mutTrue := NewMutation() @@ -1062,3 +1063,101 @@ func clearTimestamps(r Row) { } } } + +func TestSampleRowKeys(t *testing.T) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + ctx := context.Background() + client, adminClient, table, err := doSetup(ctx) + if err != nil { + t.Fatalf("%v", err) + } + defer client.Close() + defer adminClient.Close() + tbl := client.Open(table) + // Delete the table at the end of the test. + // Do this even before creating the table so that if this is running + // against production and CreateTable fails there's a chance of cleaning it up. + defer adminClient.DeleteTable(ctx, table) + + // Insert some data. + initialData := map[string][]string{ + "wmckinley11": {"tjefferson11"}, + "gwashington77": {"jadams77"}, + "tjefferson0": {"gwashington0", "jadams0"}, + } + + for row, ss := range initialData { + mut := NewMutation() + for _, name := range ss { + mut.Set("follows", name, 0, []byte("1")) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Mutating row %q: %v", row, err) + } + } + checkpoint("inserted initial data") + sampleKeys, err := tbl.SampleRowKeys(context.Background()) + if err != nil { + t.Errorf("%s: %v", "SampleRowKeys:", err) + } + if len(sampleKeys) == 0 { + t.Error("SampleRowKeys length 0") + } + checkpoint("tested SampleRowKeys.") +} + +func doSetup(ctx context.Context) (*Client, *AdminClient, string, error) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + fmt.Printf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + + testEnv, err := NewIntegrationEnv() + if err != nil { + return nil, nil, "", fmt.Errorf("IntegrationEnv: %v", err) + } + + var timeout time.Duration + if testEnv.Config().UseProd { + timeout = 10 * time.Minute + fmt.Printf("Running test against production") + } else { + timeout = 1 * time.Minute + fmt.Printf("bttest.Server running on %s", testEnv.Config().AdminEndpoint) + } + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + client, err := testEnv.NewClient() + if err != nil { + return nil, nil, "", fmt.Errorf("Client: %v", err) + } + checkpoint("dialed Client") + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + return nil, nil, "", fmt.Errorf("AdminClient: %v", err) + } + checkpoint("dialed AdminClient") + + table := testEnv.Config().Table + if err := adminClient.CreateTable(ctx, table); err != nil { + return nil, nil, "", fmt.Errorf("Creating table: %v", err) + } + checkpoint("created table") + if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { + return nil, nil, "", fmt.Errorf("Creating column family: %v", err) + } + checkpoint(`created "follows" column family`) + + return client, adminClient, table, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go index afabd6974..c925e25f4 100644 --- a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go @@ -121,7 +121,7 @@ func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) s.mu.Lock() if _, ok := s.tables[tbl]; ok { s.mu.Unlock() - return nil, grpc.Errorf(codes.AlreadyExists, "table %q already exists", tbl) + return nil, status.Errorf(codes.AlreadyExists, "table %q already exists", tbl) } s.tables[tbl] = newTable(req) s.mu.Unlock() @@ -151,7 +151,7 @@ func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*bta tblIns, ok := s.tables[tbl] s.mu.Unlock() if !ok { - return nil, grpc.Errorf(codes.NotFound, "table %q not found", tbl) + return nil, status.Errorf(codes.NotFound, "table %q not found", tbl) } return &btapb.Table{ @@ -177,7 +177,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu tbl, ok := s.tables[req.Name] s.mu.Unlock() if !ok { - return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) + return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name) } tbl.mu.Lock() @@ -186,7 +186,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu for _, mod := range req.Modifications { if create := mod.GetCreate(); create != nil { if _, ok := tbl.families[mod.Id]; ok { - return nil, grpc.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id) + return nil, status.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id) } newcf := &columnFamily{ name: req.Name + "/columnFamilies/" + mod.Id, @@ -218,6 +218,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu return &btapb.Table{ Name: tblName, ColumnFamilies: toColumnFamilies(tbl.families), + Granularity: btapb.Table_TimestampGranularity(btapb.Table_MILLIS), }, nil } @@ -415,7 +416,7 @@ func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) ( // We can't have a cell with just COMMIT set, which would imply a new empty cell. // So modify the last cell to have the COMMIT flag set. if len(rrr.Chunks) > 0 { - rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{true} + rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true} } return true, stream.Send(rrr) @@ -429,6 +430,10 @@ func filterRow(f *btpb.RowFilter, r *row) bool { } // Handle filters that apply beyond just including/excluding cells. switch f := f.Filter.(type) { + case *btpb.RowFilter_BlockAllFilter: + return !f.BlockAllFilter + case *btpb.RowFilter_PassAllFilter: + return f.PassAllFilter case *btpb.RowFilter_Chain_: for _, sub := range f.Chain.Filters { if !filterRow(sub, r) { diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go index 38ed0fcb0..8b5c6d83f 100644 --- a/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go @@ -46,7 +46,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { Name: name, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: "cf", - Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, }}, } _, err := s.ModifyColumnFamilies(ctx, req) @@ -57,8 +57,8 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { Name: name, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: "cf", - Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{ - GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{ + GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}, }}, }}, } @@ -70,7 +70,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { var ts int64 ms := func() []*btpb.Mutation { return []*btpb.Mutation{{ - Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ FamilyName: "cf", ColumnQualifier: []byte(`col`), TimestampMicros: atomic.AddInt64(&ts, 1000), @@ -85,7 +85,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { Rules: []*btpb.ReadModifyWriteRule{{ FamilyName: "cf", ColumnQualifier: []byte("col"), - Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1}, }}, } } @@ -139,8 +139,8 @@ func TestCreateTableWithFamily(t *testing.T) { ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ - "cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{123}}}, - "cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{456}}}, + "cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 123}}}, + "cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 456}}}, }, } cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) @@ -184,7 +184,7 @@ func TestSampleRowKeys(t *testing.T) { ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ - "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, }, } tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) @@ -200,7 +200,7 @@ func TestSampleRowKeys(t *testing.T) { TableName: tbl.Name, RowKey: []byte("row-" + strconv.Itoa(i)), Mutations: []*btpb.Mutation{{ - Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ FamilyName: "cf", ColumnQualifier: []byte("col"), TimestampMicros: 0, @@ -235,7 +235,7 @@ func TestDropRowRange(t *testing.T) { ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ - "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, }, } tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) @@ -255,7 +255,7 @@ func TestDropRowRange(t *testing.T) { TableName: tblInfo.Name, RowKey: []byte(prefix + strconv.Itoa(i)), Mutations: []*btpb.Mutation{{ - Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ FamilyName: "cf", ColumnQualifier: []byte("col"), TimestampMicros: 0, @@ -274,7 +274,7 @@ func TestDropRowRange(t *testing.T) { tblSize := tbl.rows.Len() req := &btapb.DropRowRangeRequest{ Name: tblInfo.Name, - Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")}, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("AAA")}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping first range: %v", err) @@ -286,7 +286,7 @@ func TestDropRowRange(t *testing.T) { req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, - Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("DDD")}, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("DDD")}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping second range: %v", err) @@ -298,7 +298,7 @@ func TestDropRowRange(t *testing.T) { req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, - Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("XXX")}, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("XXX")}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping invalid range: %v", err) @@ -310,7 +310,7 @@ func TestDropRowRange(t *testing.T) { req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, - Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true}, + Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping all data: %v", err) @@ -326,7 +326,7 @@ func TestDropRowRange(t *testing.T) { req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, - Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true}, + Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping all data: %v", err) @@ -344,7 +344,7 @@ func TestDropRowRange(t *testing.T) { req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, - Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("BBB")}, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("BBB")}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping range: %v", err) @@ -373,7 +373,7 @@ func TestReadRows(t *testing.T) { } newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ - "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, }, } tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) @@ -384,7 +384,7 @@ func TestReadRows(t *testing.T) { TableName: tblInfo.Name, RowKey: []byte("row"), Mutations: []*btpb.Mutation{{ - Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ FamilyName: "cf0", ColumnQualifier: []byte("col"), TimestampMicros: 1000, @@ -398,11 +398,11 @@ func TestReadRows(t *testing.T) { for _, rowset := range []*btpb.RowSet{ {RowKeys: [][]byte{[]byte("row")}}, - {RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{[]byte("")}}}}, - {RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{[]byte("r")}}}}, + {RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")}}}}, + {RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("r")}}}}, {RowRanges: []*btpb.RowRange{{ - StartKey: &btpb.RowRange_StartKeyClosed{[]byte("")}, - EndKey: &btpb.RowRange_EndKeyOpen{[]byte("s")}, + StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")}, + EndKey: &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte("s")}, }}}, } { mock := &MockReadRowsServer{} @@ -423,7 +423,7 @@ func TestReadRowsOrder(t *testing.T) { ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ - "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, }, } tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) @@ -436,7 +436,7 @@ func TestReadRowsOrder(t *testing.T) { Name: tblInfo.Name, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: "cf" + strconv.Itoa(i), - Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, }}, } } @@ -454,7 +454,7 @@ func TestReadRowsOrder(t *testing.T) { TableName: tblInfo.Name, RowKey: []byte("row"), Mutations: []*btpb.Mutation{{ - Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ FamilyName: "cf" + strconv.Itoa(fc), ColumnQualifier: []byte("col" + strconv.Itoa(cc)), TimestampMicros: int64((tc + 1) * 1000), @@ -512,16 +512,17 @@ func TestReadRowsOrder(t *testing.T) { // Read with interleave filter inter := &btpb.RowFilter_Interleave{} - fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}} - cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}} + fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: "1"}} + cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte("2")}} inter.Filters = append(inter.Filters, fnr, cqr) req = &btpb.ReadRowsRequest{ TableName: tblInfo.Name, Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, Filter: &btpb.RowFilter{ - Filter: &btpb.RowFilter_Interleave_{inter}, + Filter: &btpb.RowFilter_Interleave_{Interleave: inter}, }, } + mock = &MockReadRowsServer{} if err = s.ReadRows(req, mock); err != nil { t.Errorf("ReadRows error: %v", err) @@ -542,7 +543,7 @@ func TestReadRowsOrder(t *testing.T) { Rules: []*btpb.ReadModifyWriteRule{{ FamilyName: "cf3", ColumnQualifier: []byte("col" + strconv.Itoa(i)), - Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1}, }}, } } @@ -573,7 +574,7 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) { ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ - "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, }, } tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) @@ -587,7 +588,7 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) { TableName: tbl.Name, RowKey: []byte("row-present"), Mutations: []*btpb.Mutation{{ - Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ FamilyName: "cf", ColumnQualifier: []byte("col"), TimestampMicros: 0, @@ -619,3 +620,99 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) { t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want) } } + +// helper function to populate table data +func populateTable(ctx context.Context, s *server) (*btapb.Table, error) { + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + return nil, err + } + count := 3 + mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { + return &btapb.ModifyColumnFamiliesRequest{ + Name: tblInfo.Name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf" + strconv.Itoa(i), + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + } + for i := 1; i <= count; i++ { + _, err = s.ModifyColumnFamilies(ctx, mcf(i)) + if err != nil { + return nil, err + } + } + // Populate the table + for fc := 0; fc < count; fc++ { + for cc := count; cc > 0; cc-- { + for tc := 0; tc < count; tc++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf" + strconv.Itoa(fc), + ColumnQualifier: []byte("col" + strconv.Itoa(cc)), + TimestampMicros: int64((tc + 1) * 1000), + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + return nil, err + } + } + } + } + + return tblInfo, nil +} + +func TestFilters(t *testing.T) { + tests := []struct { + in *btpb.RowFilter + out int + }{ + {in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{true}}, out: 0}, + {in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{false}}, out: 1}, + {in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{true}}, out: 1}, + {in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{false}}, out: 0}, + } + + ctx := context.Background() + + s := &server{ + tables: make(map[string]*table), + } + + tblInfo, err := populateTable(ctx, s) + if err != nil { + t.Fatal(err) + } + + req := &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + + for _, tc := range tests { + req.Filter = tc.in + + mock := &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + continue + } + + if len(mock.responses) != tc.out { + t.Errorf("Response count: got %d, want %d", len(mock.responses), tc.out) + continue + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go index 93904231a..203564774 100644 --- a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go @@ -1074,12 +1074,12 @@ func doSet(ctx context.Context, args ...string) { func doSetGCPolicy(ctx context.Context, args ...string) { if len(args) < 3 { - log.Fatalf("usage: cbt setgcpolicy ( maxage= | maxversions= )") + log.Fatalf("usage: cbt setgcpolicy
( maxage= | maxversions= | maxage= (and|or) maxversions= )") } table := args[0] fam := args[1] - pol, err := parseGCPolicy(args[2]) + pol, err := parseGCPolicy(strings.Join(args[2:], " ")) if err != nil { log.Fatal(err) } @@ -1101,24 +1101,55 @@ func doWaitForReplicaiton(ctx context.Context, args ...string) { } func parseGCPolicy(policyStr string) (bigtable.GCPolicy, error) { - var pol bigtable.GCPolicy - switch p := policyStr; { - case strings.HasPrefix(p, "maxage="): - d, err := parseDuration(p[7:]) + words := strings.Fields(policyStr) + switch len(words) { + case 1: + return parseSinglePolicy(words[0]) + case 3: + p1, err := parseSinglePolicy(words[0]) if err != nil { return nil, err } - pol = bigtable.MaxAgePolicy(d) - case strings.HasPrefix(p, "maxversions="): - n, err := strconv.ParseUint(p[12:], 10, 16) + p2, err := parseSinglePolicy(words[2]) if err != nil { return nil, err } - pol = bigtable.MaxVersionsPolicy(int(n)) + switch words[1] { + case "and": + return bigtable.IntersectionPolicy(p1, p2), nil + case "or": + return bigtable.UnionPolicy(p1, p2), nil + default: + return nil, fmt.Errorf("Expected 'and' or 'or', saw %q", words[1]) + } default: - return nil, fmt.Errorf("Bad GC policy %q", p) + return nil, fmt.Errorf("Expected '1' or '3' parameter count, saw %d", len(words)) } - return pol, nil + return nil, nil +} + +func parseSinglePolicy(s string) (bigtable.GCPolicy, error) { + words := strings.Split(s, "=") + if len(words) != 2 { + return nil, fmt.Errorf("Expected 'name=value', got %q", words) + } + switch words[0] { + case "maxage": + d, err := parseDuration(words[1]) + if err != nil { + return nil, err + } + return bigtable.MaxAgePolicy(d), nil + case "maxversions": + n, err := strconv.ParseUint(words[1], 10, 16) + if err != nil { + return nil, err + } + return bigtable.MaxVersionsPolicy(int(n)), nil + default: + return nil, fmt.Errorf("Expected 'maxage' or 'maxversions', got %q", words[1]) + } + return nil, nil } func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) { diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go index 350e4f006..2616fb496 100644 --- a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go @@ -17,6 +17,9 @@ package main import ( "testing" "time" + + "cloud.google.com/go/bigtable" + "github.com/google/go-cmp/cmp" ) func TestParseDuration(t *testing.T) { @@ -57,3 +60,54 @@ func TestParseDuration(t *testing.T) { } } } + +func TestParseGCPolicy(t *testing.T) { + tests := []struct { + in string + out bigtable.GCPolicy + fail bool + }{ + {in: "maxage=1h", out: bigtable.MaxAgePolicy(time.Hour * 1)}, + {in: "maxversions=2", out: bigtable.MaxVersionsPolicy(int(2))}, + {in: "maxversions=2 and maxage=1h", out: bigtable.IntersectionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)}, + {in: "maxversions=2 or maxage=1h", out: bigtable.UnionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)}, + + {in: "maxage=1", fail: true}, + {in: "maxage = 1h", fail: true}, + {in: "maxage =1h", fail: true}, + {in: "maxage= 1h", fail: true}, + {in: "foomaxage=1h", fail: true}, + {in: "maxversions=1h", fail: true}, + {in: "maxversions= 1", fail: true}, + {in: "maxversions = 1", fail: true}, + {in: "maxversions =1", fail: true}, + {in: "barmaxversions=1", fail: true}, + {in: "maxage = 1h or maxversions=1h", fail: true}, + {in: "foomaxversions=2 or maxage=1h", fail: true}, + {in: "maxversions=2 or barmaxage=1h", fail: true}, + {in: "foomaxversions=2 or barmaxage=1h", fail: true}, + {in: "maxage = 1h and maxversions=1h", fail: true}, + {in: "foomaxage=1h and maxversions=1", fail: true}, + {in: "maxage=1h and barmaxversions=1", fail: true}, + {in: "foomaxage=1h and barmaxversions=1", fail: true}, + } + for _, tc := range tests { + got, err := parseGCPolicy(tc.in) + if !tc.fail && err != nil { + t.Errorf("parseGCPolicy(%q) unexpectedly failed: %v", tc.in, err) + continue + } + if tc.fail && err == nil { + t.Errorf("parseGCPolicy(%q) did not fail", tc.in) + continue + } + if tc.fail { + continue + } + var cmpOpts cmp.Options + cmpOpts = append(cmpOpts, cmp.AllowUnexported(bigtable.IntersectionPolicy([]bigtable.GCPolicy{}...)), cmp.AllowUnexported(bigtable.UnionPolicy([]bigtable.GCPolicy{}...))) + if !cmp.Equal(got, tc.out, cmpOpts) { + t.Errorf("parseGCPolicy(%q) =%v, want %v", tc.in, got, tc.out) + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/filter.go b/vendor/cloud.google.com/go/bigtable/filter.go index b8e453bb5..1fea1d2f2 100644 --- a/vendor/cloud.google.com/go/bigtable/filter.go +++ b/vendor/cloud.google.com/go/bigtable/filter.go @@ -51,7 +51,7 @@ func (cf chainFilter) proto() *btpb.RowFilter { chain.Filters = append(chain.Filters, sf.proto()) } return &btpb.RowFilter{ - Filter: &btpb.RowFilter_Chain_{chain}, + Filter: &btpb.RowFilter_Chain_{Chain: chain}, } } @@ -77,7 +77,7 @@ func (ilf interleaveFilter) proto() *btpb.RowFilter { inter.Filters = append(inter.Filters, sf.proto()) } return &btpb.RowFilter{ - Filter: &btpb.RowFilter_Interleave_{inter}, + Filter: &btpb.RowFilter_Interleave_{Interleave: inter}, } } @@ -91,7 +91,7 @@ type rowKeyFilter string func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } func (rkf rowKeyFilter) proto() *btpb.RowFilter { - return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{RowKeyRegexFilter: []byte(rkf)}} } // FamilyFilter returns a filter that matches cells whose family name @@ -104,7 +104,7 @@ type familyFilter string func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } func (ff familyFilter) proto() *btpb.RowFilter { - return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: string(ff)}} } // ColumnFilter returns a filter that matches cells whose column name @@ -117,7 +117,7 @@ type columnFilter string func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } func (cf columnFilter) proto() *btpb.RowFilter { - return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte(cf)}} } // ValueFilter returns a filter that matches cells whose value @@ -130,7 +130,7 @@ type valueFilter string func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } func (vf valueFilter) proto() *btpb.RowFilter { - return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{ValueRegexFilter: []byte(vf)}} } // LatestNFilter returns a filter that matches the most recent N cells in each column. @@ -141,7 +141,7 @@ type latestNFilter int32 func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } func (lnf latestNFilter) proto() *btpb.RowFilter { - return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{CellsPerColumnLimitFilter: int32(lnf)}} } // StripValueFilter returns a filter that replaces each value with the empty string. @@ -151,7 +151,7 @@ type stripValueFilter struct{} func (stripValueFilter) String() string { return "strip_value()" } func (stripValueFilter) proto() *btpb.RowFilter { - return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{StripValueTransformer: true}} } // TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero @@ -186,11 +186,10 @@ func (trf timestampRangeFilter) String() string { func (trf timestampRangeFilter) proto() *btpb.RowFilter { return &btpb.RowFilter{ - Filter: &btpb.RowFilter_TimestampRangeFilter{ - &btpb.TimestampRange{ - int64(trf.startTime.TruncateToMilliseconds()), - int64(trf.endTime.TruncateToMilliseconds()), - }, + Filter: &btpb.RowFilter_TimestampRangeFilter{TimestampRangeFilter: &btpb.TimestampRange{ + StartTimestampMicros: int64(trf.startTime.TruncateToMilliseconds()), + EndTimestampMicros: int64(trf.endTime.TruncateToMilliseconds()), + }, }} } @@ -213,12 +212,12 @@ func (crf columnRangeFilter) String() string { func (crf columnRangeFilter) proto() *btpb.RowFilter { r := &btpb.ColumnRange{FamilyName: crf.family} if crf.start != "" { - r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)} + r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{StartQualifierClosed: []byte(crf.start)} } if crf.end != "" { - r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)} + r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{EndQualifierOpen: []byte(crf.end)} } - return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnRangeFilter{ColumnRangeFilter: r}} } // ValueRangeFilter returns a filter that matches cells with values that fall within @@ -239,12 +238,12 @@ func (vrf valueRangeFilter) String() string { func (vrf valueRangeFilter) proto() *btpb.RowFilter { r := &btpb.ValueRange{} if vrf.start != nil { - r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start} + r.StartValue = &btpb.ValueRange_StartValueClosed{StartValueClosed: vrf.start} } if vrf.end != nil { - r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end} + r.EndValue = &btpb.ValueRange_EndValueOpen{EndValueOpen: vrf.end} } - return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRangeFilter{ValueRangeFilter: r}} } // ConditionFilter returns a filter that evaluates to one of two possible filters depending @@ -278,10 +277,10 @@ func (cf conditionFilter) proto() *btpb.RowFilter { ff = cf.falseFilter.proto() } return &btpb.RowFilter{ - &btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{ - cf.predicateFilter.proto(), - tf, - ff, + Filter: &btpb.RowFilter_Condition_{Condition: &btpb.RowFilter_Condition{ + PredicateFilter: cf.predicateFilter.proto(), + TrueFilter: tf, + FalseFilter: ff, }}} } @@ -297,7 +296,7 @@ func (cof cellsPerRowOffsetFilter) String() string { } func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter { - return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{int32(cof)}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{CellsPerRowOffsetFilter: int32(cof)}} } // CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row. @@ -312,7 +311,7 @@ func (clf cellsPerRowLimitFilter) String() string { } func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter { - return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{int32(clf)}} + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{CellsPerRowLimitFilter: int32(clf)}} } // TODO(dsymonds): More filters: sampling diff --git a/vendor/cloud.google.com/go/bigtable/gc.go b/vendor/cloud.google.com/go/bigtable/gc.go index e7054ade6..74510daea 100644 --- a/vendor/cloud.google.com/go/bigtable/gc.go +++ b/vendor/cloud.google.com/go/bigtable/gc.go @@ -52,7 +52,7 @@ func (ip intersectionPolicy) proto() *bttdpb.GcRule { inter.Rules = append(inter.Rules, sp.proto()) } return &bttdpb.GcRule{ - Rule: &bttdpb.GcRule_Intersection_{inter}, + Rule: &bttdpb.GcRule_Intersection_{Intersection: inter}, } } @@ -77,7 +77,7 @@ func (up unionPolicy) proto() *bttdpb.GcRule { union.Rules = append(union.Rules, sp.proto()) } return &bttdpb.GcRule{ - Rule: &bttdpb.GcRule_Union_{union}, + Rule: &bttdpb.GcRule_Union_{Union: union}, } } @@ -90,7 +90,7 @@ type maxVersionsPolicy int func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { - return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}} + return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{MaxNumVersions: int32(mvp)}} } // MaxAgePolicy returns a GC policy that applies to all cells @@ -123,7 +123,7 @@ func (ma maxAgePolicy) proto() *bttdpb.GcRule { // Fix this if people care about GC policies over 290 years. ns := time.Duration(ma).Nanoseconds() return &bttdpb.GcRule{ - Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{ + Rule: &bttdpb.GcRule_MaxAge{MaxAge: &durpb.Duration{ Seconds: ns / 1e9, Nanos: int32(ns % 1e9), }}, diff --git a/vendor/cloud.google.com/go/bigtable/go18.go b/vendor/cloud.google.com/go/bigtable/go18.go new file mode 100644 index 000000000..552b7b6f2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/go18.go @@ -0,0 +1,68 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package bigtable + +import ( + "fmt" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/trace" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func openCensusOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), + } +} + +func traceStartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name) + return ctx +} + +func traceEndSpan(ctx context.Context, err error) { + span := trace.FromContext(ctx) + if err != nil { + span.SetStatus(trace.Status{Message: err.Error()}) + } + + span.End() +} + +func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { + var attrs []trace.Attribute + for k, v := range attrMap { + var a trace.Attribute + switch v := v.(type) { + case string: + a = trace.StringAttribute(k, v) + case bool: + a = trace.BoolAttribute(k, v) + case int: + a = trace.Int64Attribute(k, int64(v)) + case int64: + a = trace.Int64Attribute(k, v) + default: + a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) + } + attrs = append(attrs, a) + } + trace.FromContext(ctx).Annotatef(attrs, format, args...) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go index f32e834d3..6d3c67e96 100644 --- a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go @@ -20,8 +20,8 @@ import ( "time" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func TestRandomizedDelays(t *testing.T) { @@ -43,7 +43,7 @@ func TestRandomizedDelays(t *testing.T) { } invokeTime = time.Now() // Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90 - errf := grpc.Errorf + errf := status.Errorf return errf(codes.Unavailable, "") }, settings...) } diff --git a/vendor/cloud.google.com/go/bigtable/not_go18.go b/vendor/cloud.google.com/go/bigtable/not_go18.go new file mode 100644 index 000000000..f86700db2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/not_go18.go @@ -0,0 +1,36 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package bigtable + +import ( + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +// OpenCensus only supports go 1.8 and higher. + +func openCensusOptions() []option.ClientOption { return nil } + +func traceStartSpan(ctx context.Context, _ string) context.Context { + return ctx +} + +func traceEndSpan(context.Context, error) { +} + +func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) { +} diff --git a/vendor/cloud.google.com/go/bigtable/retry_test.go b/vendor/cloud.google.com/go/bigtable/retry_test.go index 518ce3f66..03a9389e5 100644 --- a/vendor/cloud.google.com/go/bigtable/retry_test.go +++ b/vendor/cloud.google.com/go/bigtable/retry_test.go @@ -30,6 +30,7 @@ import ( rpcpb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) { @@ -42,12 +43,12 @@ func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err return nil, nil, err } - client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) + client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock())) if err != nil { return nil, nil, err } - adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) + adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock())) if err != nil { return nil, nil, err } @@ -76,7 +77,7 @@ func TestRetryApply(t *testing.T) { errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 { errCount++ - return nil, grpc.Errorf(code, "") + return nil, status.Errorf(code, "") } return handler(ctx, req) } @@ -156,7 +157,7 @@ func TestRetryApplyBulk(t *testing.T) { f = func(ss grpc.ServerStream) error { if errCount < 3 { errCount++ - return grpc.Errorf(codes.Aborted, "") + return status.Errorf(codes.Aborted, "") } return nil } @@ -182,7 +183,7 @@ func TestRetryApplyBulk(t *testing.T) { switch errCount { case 0: // Retryable request failure - err = grpc.Errorf(codes.Unavailable, "") + err = status.Errorf(codes.Unavailable, "") case 1: // Two mutations fail writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted) @@ -235,8 +236,8 @@ func TestRetryApplyBulk(t *testing.T) { t.Errorf("unretryable errors: request failed %v", err) } want := []error{ - grpc.Errorf(codes.FailedPrecondition, ""), - grpc.Errorf(codes.Aborted, ""), + status.Errorf(codes.FailedPrecondition, ""), + status.Errorf(codes.Aborted, ""), } if !testutil.Equal(want, errors) { t.Errorf("unretryable errors: got: %v, want: %v", errors, want) @@ -323,20 +324,20 @@ func TestRetryReadRows(t *testing.T) { switch errCount { case 0: // Retryable request failure - err = grpc.Errorf(codes.Unavailable, "") + err = status.Errorf(codes.Unavailable, "") case 1: // Write two rows then error if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { t.Errorf("first retry, no data received yet: got %q, want %q", got, want) } writeReadRowsResponse(ss, "a", "b") - err = grpc.Errorf(codes.Unavailable, "") + err = status.Errorf(codes.Unavailable, "") case 2: // Retryable request failure if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { t.Errorf("2 range retries: got %q, want %q", got, want) } - err = grpc.Errorf(codes.Unavailable, "") + err = status.Errorf(codes.Unavailable, "") case 3: // Write two more rows writeReadRowsResponse(ss, "c", "d") diff --git a/vendor/cloud.google.com/go/cloud.go b/vendor/cloud.google.com/go/cloud.go index 6ba428dc6..0be0df33f 100644 --- a/vendor/cloud.google.com/go/cloud.go +++ b/vendor/cloud.google.com/go/cloud.go @@ -12,9 +12,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package cloud is the root of the packages used to access Google Cloud -// Services. See https://godoc.org/cloud.google.com/go for a full list -// of sub-packages. -// -// This package documents how to authorize and authenticate the sub packages. +/* +Package cloud is the root of the packages used to access Google Cloud +Services. See https://godoc.org/cloud.google.com/go for a full list +of sub-packages. + +Examples in this package show ways to authorize and authenticate the +sub packages. + +Connection Pooling + +Connection pooling differs in clients based on their transport. Cloud +clients either rely on HTTP or gRPC transports to communicate +with Google Cloud. + +Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the +underlying HTTP transport to cache connections for later re-use. These are cached to +the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in +http.DefaultTransport. + +For gPRC clients (all others in this repo), connection pooling is configurable. Users +of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client +option to NewClient calls. This configures the underlying gRPC connections to be +pooled and addressed in a round robin fashion. + +*/ package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go index 9d2ab9406..980cbfa93 100644 --- a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go @@ -43,15 +43,15 @@ func TestValueCollector(t *testing.T) { c := NewCollector(&Program{}, 26) // Add some variables of various types, whose values we want the collector to read. variablesToAdd := []debug.LocalVar{ - {Name: "a", Var: debug.Var{int16Type, 0x1}}, - {Name: "b", Var: debug.Var{stringType, 0x2}}, - {Name: "c", Var: debug.Var{structType, 0x3}}, - {Name: "d", Var: debug.Var{pointerType, 0x4}}, - {Name: "e", Var: debug.Var{arrayType, 0x5}}, - {Name: "f", Var: debug.Var{debugStringType, 0x6}}, - {Name: "g", Var: debug.Var{mapType, 0x7}}, - {Name: "h", Var: debug.Var{channelType, 0x8}}, - {Name: "i", Var: debug.Var{sliceType, 0x9}}, + {Name: "a", Var: debug.Var{TypeID: int16Type, Address: 0x1}}, + {Name: "b", Var: debug.Var{TypeID: stringType, Address: 0x2}}, + {Name: "c", Var: debug.Var{TypeID: structType, Address: 0x3}}, + {Name: "d", Var: debug.Var{TypeID: pointerType, Address: 0x4}}, + {Name: "e", Var: debug.Var{TypeID: arrayType, Address: 0x5}}, + {Name: "f", Var: debug.Var{TypeID: debugStringType, Address: 0x6}}, + {Name: "g", Var: debug.Var{TypeID: mapType, Address: 0x7}}, + {Name: "h", Var: debug.Var{TypeID: channelType, Address: 0x8}}, + {Name: "i", Var: debug.Var{TypeID: sliceType, Address: 0x9}}, } expectedResults := []*cd.Variable{ &cd.Variable{Name: "a", VarTableIndex: 1}, @@ -195,17 +195,17 @@ func (p *Program) Value(v debug.Var) (debug.Value, error) { Fields: []debug.StructField{ { Name: "x", - Var: debug.Var{int16Type, 0x1}, + Var: debug.Var{TypeID: int16Type, Address: 0x1}, }, { Name: "y", - Var: debug.Var{stringType, 0x2}, + Var: debug.Var{TypeID: stringType, Address: 0x2}, }, }, }, nil case pointerType: // A pointer to the first variable above. - return debug.Pointer{int16Type, 0x1}, nil + return debug.Pointer{TypeID: int16Type, Address: 0x1}, nil case arrayType: // An array of 4 32-bit-wide elements. return debug.Array{ diff --git a/vendor/cloud.google.com/go/datastore/datastore.go b/vendor/cloud.google.com/go/datastore/datastore.go index b35db8718..2d5da5ba8 100644 --- a/vendor/cloud.google.com/go/datastore/datastore.go +++ b/vendor/cloud.google.com/go/datastore/datastore.go @@ -21,6 +21,7 @@ import ( "os" "reflect" + "cloud.google.com/go/internal/trace" "golang.org/x/net/context" "google.golang.org/api/option" gtransport "google.golang.org/api/transport/grpc" @@ -302,11 +303,14 @@ func (c *Client) Close() error { // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. ErrFieldMismatch is only returned if // dst is a struct pointer. -func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error { +func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Get") + defer func() { trace.EndSpan(ctx, err) }() + if dst == nil { // get catches nil interfaces; we need to catch nil ptr here return ErrInvalidEntityType } - err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil) + err = c.get(ctx, []*Key{key}, []interface{}{dst}, nil) if me, ok := err.(MultiError); ok { return me[0] } @@ -323,7 +327,10 @@ func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error { // As a special case, PropertyList is an invalid type for dst, even though a // PropertyList is a slice of structs. It is treated as invalid to avoid being // mistakenly passed when []PropertyList was intended. -func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error { +func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.GetMulti") + defer func() { trace.EndSpan(ctx, err) }() + return c.get(ctx, keys, dst, nil) } @@ -452,7 +459,11 @@ func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, erro // PutMulti is a batch version of Put. // // src must satisfy the same conditions as the dst argument to GetMulti. -func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) { +// TODO(jba): rewrite in terms of Mutate. +func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) (_ []*Key, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.PutMulti") + defer func() { trace.EndSpan(ctx, err) }() + mutations, err := putMutations(keys, src) if err != nil { return nil, err @@ -540,7 +551,11 @@ func (c *Client) Delete(ctx context.Context, key *Key) error { } // DeleteMulti is a batch version of Delete. -func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error { +// TODO(jba): rewrite in terms of Mutate. +func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.DeleteMulti") + defer func() { trace.EndSpan(ctx, err) }() + mutations, err := deleteMutations(keys) if err != nil { return err @@ -572,3 +587,41 @@ func deleteMutations(keys []*Key) ([]*pb.Mutation, error) { } return mutations, nil } + +// Mutate applies one or more mutations atomically. +// It returns the keys of the argument Mutations, in the same order. +// +// If any of the mutations are invalid, Mutate returns a MultiError with the errors. +// Mutate returns a MultiError in this case even if there is only one Mutation. +func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) (_ []*Key, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Mutate") + defer func() { trace.EndSpan(ctx, err) }() + + pmuts, err := mutationProtos(muts) + if err != nil { + return nil, err + } + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: pmuts, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + resp, err := c.client.Commit(ctx, req) + if err != nil { + return nil, err + } + // Copy any newly minted keys into the returned keys. + ret := make([]*Key, len(muts)) + for i, mut := range muts { + if mut.key.Incomplete() { + // This key is in the mutation results. + ret[i], err = protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + } else { + ret[i] = mut.key + } + } + return ret, nil +} diff --git a/vendor/cloud.google.com/go/datastore/datastore.replay b/vendor/cloud.google.com/go/datastore/datastore.replay deleted file mode 100644 index d6ff0c935b26161c63894c8c61482bfffea62931..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3325777 zcmeF)dqCCo{l{?+q~fs_oo%h1-JHARDpAQ$+og3`o7FZoTdiH3=}9RSgJYY!q9P)q zBBCOqBBCOaA(@e>p^|ya5Xs2Q(A3b((8$!xeg`>lc;z{9?40Mv-W4EVdtX8Wa;VXow?XV1H--IA=_(BjWKc5qC8FL#Nwgh7OH(+7m6-o17zuJ0j}; z*Zqe_$3+Z?8S3>9*Z+||j>kqg;+(c$cL<5>-ag9O*4D<_`nLXY(F5Whu(!5adfGz5 zZ?jvimV0e2lPw`Vdf07NOJCa+ecFaCt(dr_Wq3*A%%Qhk`Kidq>t1z;eYiF1LSIeX z@R(t74qKEx(tfS=D%+-1`=Z@VhE?~zES8M%UI)})ej2^4zqQ_Bz24hx!q?fClis)s&xvZY@PaOse|%mHx4Ev9j_t3vUSVr%ZF5g_ zobz}6hm3I8?N-Y@wy)mxWotUImR51j*yv${tRZ2R^UmMj>d#;7=e|O^+q>F- zd$?99b<8IMn!hD{>b(XtK|Xv)qm^g?9#PIkErh5x_0Z__~yyxdcX33 zy=Ao1F|)t< z>%$8;kt%Do#eM;&(C`0gr2j{~(f|Kx*Yj2oUw4{9?59Hf-$6sa`whaHkaOOD;_d$D z=cVPjL>jjl@G(7?YvVC(J*_{p2J$*@4Bnw48xPgt%QIW~b*Z}VZ0}zFd2ek`UG6eJ z`||dgmpA?5t00&;6h4{^6kOpYF9)wN19{ zZu_@O|G9JqE}el(XW-Hq_@6ri=e2k+cKGVWek>k3 z_gs9d?YRfzrKjHi+@*dg=+YUubOtV+0oNJu^;M7Si=lgMp{_5BF4|+UDCy92YrCqc zdmA2$U(39puNIGf?ynY?*L}4(=koTmKK0dN<2kV(KKFp!U;lu7%XU%%L2E}nZBHhhX&_i<@2*TCzdvbOtV+fxqd$By0TA=;A#VQ{t0cpQ3(P+wfTY#>IXtHl7n-|C+JxW7fNE=RRh= z^t8N0>e3mwbOtV+f&a}jaM2!%9+vfAf03RCVbLTsi|^_%l%V*=ha1M)dJD(8YT$?k!2Tw#z#>rQx~wow*nE_s^Z5 z_phS<*!Axgf7t%}R;w+{+UhsaL+ZY*=jysA;#JpOX}iLD*`v|J2HIN=>p#@d`Mi@e zqW*7<$N2v@mK*+tzx!`2-(>G%|Dm_Pu6$L)YpMSWNwddU+f~1lTYqjsJA60wg3gWO z^PZa68#)<9Yu~H8P);vzI2qsD9(o~vX?a2G&wDoB zY2`W{(d`FWd)RKUhTYq{SO3_$Z~ki6*Y&-YcSa9!I%4Czf9HbhyD#tUef!|(Ap;w~ z0io`{+~xW<%yWOY+4WbR!_R%=WgW`<8vY>&qneq}@MrFJMY$dUmfZY z`?gk}``$XWmHjXF`@G#;FHEoNyOZ5|>+Z#M|FO2KoL+YB_Crl*gHLEnPbdk)C*XT1 z!S_&t?;+c>hjw|E4nMHee>d8-UVnao=Z6pU88NK)2uJKl`wok>o4?=i-|&YTzN3D| z`QPDcA7iQe_E=l%e*GSGjO^FX?s+)2kG2kS{a6Io4+yY^+AMv;TiV@^MO)o>)Vlsp zJ;OuoA>kI=t+rd593i}|z4LVu7xqYgsO7o4%j;Ha_X}@xzjvEswa&7Jh1%9ztoK`y z`%GK=VC%!y`Wt6hi0%GIu8RBjr+z3y>}_prjc-NTH~UuP^0u~yo7pw>_paDWanTB@pj~8(i`GmIHQH6flSPl)Tu(=rH8p78qjwc}d_ z@Z{Or(z>Z7`Q*8D&-U@9{Q1w%lK-W9cGLR=`*dh}pZL*#^?l+p*Nq|6bDyxB`^UiF zq5h`+qs3nZaGSW?Y7Mir53#mzooq{o>;J3Y__uVO*2{u$`;D#tArtR}1aMxLhFF(` zxNg5IsW<*5R=evbgfZ_3-1n>Y^nj+oJHB84akbxHbG{J38-ej&-UvK?alXo2Soi%= zA@;df1aPtKyWINb<*v6Xr!3Yn7V68)#t)<2AJT=$XLItYbki>&*e^*NKRyE6U2k-n ze&p`?Y|?!qLiI#6c^l1sA#0@Fs&SurA}(vXclOI)BkgS(K96qc&q-*qclK?Ok@mLd zUFNMm<0Ld`XTQ`o(eC=9`_uQj*8a7#Up68ssQ!!dC9MN^U>|K|eWw-r!2W_iv_1{^GXDTydT0gx-yM|Hf70vq=EXiI zo8G*-{I}h_{#!mVxbeU2%eG*BMvzzk8Ns~J0Gv zJ?O9ZJvRY*Qd9dky?cS5EIc2;8<#O&-na~J`o85s>jU2Zr9$8z2VU2@{$GGw=7gR5 zOku`V0bHEB!mY1{qi<2-o4GhY_1E5-e2vk9`PeJI{$u*as}%f9A->IL!?fu8l_u>L zsQpuqX|nzNuPwhwpCx_zt?8#eK9c)&lfD11e3m4C*{}ZZGrB)V`_y~%Chh+#Z_6e3 zvp!U6($2m|HgT^jY5Uptx=($u)TI4?;q8^If8pfEmwn#vkX(P6>pLXxxcRmV`(})~U%L3|@30EWQQ8FM zC~bprl#;NZ9K{-xqXgx=lmz9xWQU#ioqt*NfAewL1^n8#PkmSQA6&nHt9t&knRP#5 zC8+v^z&*Ovmn&Psqq_*?=klc#{rG9c>aoo&o4NFROR`$V&7le z1ZmWTeZz4;C-B0);W(gCEzRx9^KHfWu6%rtv3yh&q|2hon{4`GZ}N2i~ULiqkLWj4d*Sq z@kIoB0=-=pS2Z48?oxq{a{p@(oA;v!bywc1zU+N&5UTvV*uN6!sAeyVuQnzII?BDP zs~Zm}S3*#C?Fv{B%_;v4dgRm<r*}U%u zVN~7S;s@SC%-$`2*!UuXRCR6R0cAZwt#UsaJD3~wBXgs!GdJpbbE9rBH>#tVQGVXp zBLa2R?ER=yV{f3N+^#w|9uOzo#oVY!Go$>xEN%>RSIu4)T^oA?9p%0(ZfZOrepy7B z8Rh4$x&`S3b~jU1_uit1flZ_$ z{JdM-8l;Q;HUm}76286hMFd*q8g<_1>~}OCT|5Um%KtCrei4LGb+_9)y@%9QHAV+% z)Gy7A`jxp+Jq?U%_F})Q@kIo>tEPLeJ80cCa(Cm=FVe++PY|j;`zLw-x-l`(QSNj0 zo5lmmIU`*C*4(IG21a?i0(*O(8-!i?`DpAD=%{81-`kiN=qUFU*thY3_!W4cxlz9} zGs@4);{G7*>H#xVbvN_hn;Z3@xlw;GFsfO?e{6gafnFBg?ty=5Ji2%ebd>*ZIsY7l zQFT{fKkp&Lqwz21M)fx^s@W^>p~e>xq+JbYJfK{3#IDZ$7MW)5yFsXGW?sK1#T zHPYOuzndHN4|AjbX>QcN%#HfDxl#Y|XVfFE@7X-}3m-q{ zS)&Zj8f|da7=yDC49;@>@Z07;TZjzKwUKCW);NQ+k_^roZ*W$!!C4aw&Pp*jYodu+ z%?qyk-P?7Kf=QMjS)Od7%l8+o?wnN5s|d8qHOuoAtNZX%JcrjWSp&Qu`oFKVVC`$F zj}-m-IKZxE_m%E>6@l){y`pKJBg%?`+U1@#-QcVl24`g$oHf(ntW1NmW*MB7WpLJP zgR|zCnC0z0Ki9|LK<`oh-{-RfzRx#%@15s~47AJJIn41KUT(yJ&T`*-=leK(Q1A2J zx^jIC4pP?wABPWWm-`$pG&pOK!C83*XDv23E8oN{|L;*t0=`GNufqZpUCrJPpYXhj zAX#4OIilPG3Cm9!oK$iVV(r%HXW!24_8OaMlWgvz{?HtJvVIXARC;X>ita z24}4@G0WR~(bYZ%2kH5>#>e4_=hyQFXO)3Tr9KW%T!$|hoVCH=tQSqp@^)=(^f5TlYr}tEn|NQFJ&}QSc{_(&JclQI zZS`?@!q-a%XKgb$>tz$O{J-YQ_-lTWCzlm9{efjuifA_06;CU5+ z?#tV?anN&k;+lWk$KivzFK=DdJ_ZMR4&7Th+n6#5rr>t9o86}bi1$++O&#%)yQi6RQe)jvQk36p;&~xZ*UuQgr zzexM~IN*J`x<30Hp7lf$=kOEH;V;rTj1LJy*Jq!@QJ!Cz73eItuhE_(%4rL#FZZl5 z24^K0oHf?qtVDye#u=QIWMY>8&#&>kHVRD-jo7@U=6aMo0Vv(in>^7dYInvcOjy5^_*IDAmwZ+PpP;bU;1UH;#rG6KFw zx!*_4^pO(ObLgIxX<}BhUpvh5yox~gugzKL1QUWZFO zuOdj-VS(p}_;vV%!C6ZU&U(_sEdPBK2Fuqn6J5<-^F^Ll5$HAVnswgK6?w{YcmWXT zEcbn5xsSuw&1!rtL7Mfn!C5Oz%<})5eDpN1IU;^-Ja1x_|JO!Iu&#}@Cc50$;W~q})*GBvYH-#I z24`(BF{|18#*3a;5v2RZM$ZxP`^F}Nvo;%?wZ-79tp;bkWN_9t6SMq(ro0@iXG)og zF84EKyTMuI250RsIIF_MtY+`?uXtWXp!a##tj4b!8-9}LPS4@xrXT1m_jUNHkHZsR zr&Jo8^_q!U{@T zH)=euB1qT#G0ze4YyP;wS??R1Rcmn82L@-IFfq&9^Xo$&g9E+h{l9OV4EVm$tmRXl z$UwWiox{_f!wc*{XSwf(ANe?ZP@k3Fy3Y6*9B7yS*T%;IUmNaoc-BWsu+QOVf1lJR zo>vj*Idpf$hno8xPooUZ8f{{h|JUJ|&>-zA!9-WH_rtNCR}pBJxBEt-=kUb+aGa0B z2ix*_pNl5>7#!$1bXy+pYCu=@ImczpTiV`vnCpxHOb(t$p&Yo8k{x7 z#4P`xDQN*eQ<}ZcPxTZB$#S~qh;nNuEKf5yYr4T%GYrnkFfq&jYknqw&1ZUwgLKW$ z@*EMr=CcgWnr(2_9D}pw8l07FaMnD7vvLg1nr~v3|M#d|{vNf!Qyipw)I!e@@q5%F z6SMr^S6;yTa)0fx*hH6mR=&YmOAO8`Ffpsy_q|VeUPX|6E%h7`_w}TSS^oPf4A@t* z`&#BH4s>7MK94H$9A1_e=q&%w;Zp&hL-&1SxsQ~(uEx)+0_}3odfLRSX0MGEo>vj1 zu4g<)l*@rQhs6eGJ!^2*N`teWGcl{FpN78&yUNGlK(9mp?;ERwb>CRyBPFQUe6yCH z_q>Wg&!PJsRpL1!Zh5VVS^i%e>wkgmfGo+IMd;fn@m zZ8SJ*lZjd0?(>^{3=Yz@vBk&XgL?0E``T)7)=MU4`F}s$7V!PB+55)Jp5j2S4R6pg?BYD~=X-`BBV`8sZ*%YDti@6W8;t{gaG$N-14PV#W`&6xW237FZTPaSJ^&&aN7;muzPy<>L2SE=Crr#>xgsS89l`5 zh>h#$?9#PIkErh5x_0Xv8QJ5e$SzS)H`;90*7x?leQ@-Uf#LD>|FO0!nOvT^nJ*4A% zsKED7f$yOL-$MnyhvoPlmg9R^j_+YPzK2?T54HFnYVkeP;(JKJ_mG0`AqC$<3ciP} z_#U?6d)SKaVJp6e0(=hz_#O)IJrv-3ID+rt2)>6S_#Tend&t7~kcICd3*SQ)zK7lT z9(Ln<*p2UDH@=5e_#Rf_dsv0RM&o-JjqhPJzK3=A z9@gP|ScmUn9lnPgd=EMJ9&+$Kd9`@jS z*n{t3559*r_#W2adsu_-VGX{Av-lp);(IuY@8K-Ihf(+*M&WxHh3{b$zK6B=9@gS} zSc~srExw0&_#WordzgpsVIIDRz4#vX;(OSO?_n>#hbj0Trr>*+g70ApzK8Aj9=79q z*pBaEJHCfy_#T$wdsv3=VHv)MWB4A9;d?lS@8KA}hw=Cx#^ZY!kMChTzK2ct9yZ~7 z*o5z46TXMV_#PJHdsvL`VKKgkL--yJ;d?lQ@8J->hne^uX5xF8iSJ=1zK7TFJ-mkR z;Wc~@ui<-m7T?3O_#U3c_wX#fhg0|-PT_kvh40}MzK5~+9>(H(7>n;=EWU>q@IAbM z@8Jb}4=><*Sb*d1>eIf z_#R%t_wWk7ho|v9JdN++X?zb)<9ql3-@^y^9zMYL@BzMuiTEBS;(M5g?_naohnMg@ zyoB%JC43Jr;d^)j-@_C59-hGW@C3exckw;Ei|^rGd=Ky9dzg*yVK%;p+4vr2<9m1m z-@_aD9^SzB@CLqz)%YG(<9k?*?_o8*hmY|+e2nkmV|))E<9ir`?_mtShcWmb#^8Hc zkMChUzK8Yr9@gV~n2+yaKE8+f_#Wotdw2`q!&~?s-op3r7QTn6_#UR>dzgyvVJg0d z9rzx0;CtAC?_meNho|s8JcaM!DSQu4;d^)=-^2U(9^S|I@IJnW3HTl+;Cq;W?_mPI zhb{OXw%~i%g70AqzK13F9+u#HSc30i3BHGS@IAbP@8KPM5AWc6n1%0Q7QTmB_#S5A zdw3n+!|V7SUdQ+FI=+YJ@I5?-@8LOo56|Iy_z2&_NBAB-!uRkIzK3!69>(E&7>Dm+ z9KMGa@jbkV@8Ly!4=>_-ScLCk5x$2-_#PJFdw3h)!`t{C-p2RvHok`$_#S59dzgXm zVFtd3SMfc(itpi7d=IbUdw2%l!!!6Ep27F<48DgC@jZNq@8Lsy4+Aae-Wff_>4=T%=oRwx`mjCZJrd}54wbAU?4(XoaKxet{8`C^T#NTgBH#lpCiCNz68yP+Z2f8o+ zufv)Ab(rai47AIA9nSI`5x)+zOw96sU$X<=m-`&fG129oHP^(fX77jDo>vj*b?D9V zJkR0fq6~DF`yA%@IDAmw19ehd<+isI`rRHLBPJ;*Txe*Qi6INHfwpQ=T!uH4!!N` zNzdVfYT5sN6$Wcx%Y3Am-B*$4RRp>(_jUM`=ZLbRpmw=uEjKvpX@j#?n3(1NHUCV& z*L<`0jbcx6px2?d`^K}L!^=e(=q&%w;Y$7-KIe%Hw9D;lmFI|Z+6c?5P0aFtUuy#1 zm-`$(Z=%aRtHj`}wFYOcGcl{#d(?W*s|b>>QqK`_UoV)L<-e~D!SeN@iLPd^jg6jH z5u|Hlljn%|wXxaYtSts-Z8bRSC4;lJ8JzX9iCNy>vz7T69H=kV!`ER$|BiH(kHLX<`F}s$%ij<8c_IVtazDS`@*EMrAMQ6e>wv*o z2Mx}8+r+Gvj*zPw!< z?|Kd|H<3VR`G0L34fxt<_8h+Fi43&M+c~W996qS$(0v^q^Ktl~Ui03%j{6uK=s9$E zz3=1jLG5zSsx>(41B0_pn3(15+W64N;6Sep|L+?o`TNExPh_B7?(6Wh=ZJEvAg=k3 z49+@ZaMs5LXPq@M%bTxHd<+isI`rRHe9J&*xw^c6qiw?*(@`yh_Pu7_&f#d!;RRcu zU2e-`d>o#zoM3R)Sc9_?4bB>8a8{DRS>p}PN;Wae|9jMgfbUUm%PA(hn!O)R^t_57 z-J>RXjwrW4;vO~G#4P`>jnrUW8&gblxv#@CgR`a@oRw~3R~e zU<-7X``Vb{~8l07BaMmn?v$71%nr(2_9D}pw8l07FVwV5U%6S1lE1SLd z=6H&ObU&Q$IilQdiTh!$!C4DT%<})*SQzlN;eLKCGSTIpm1kmBv)BA$&#MU1HJ|S} zB7V&;F*vKh;H)Q1%<|vY(qQ>|(nMFY*LkZB- zH8|@9gR?dmob{r?SsM+`+GKFnW)rjge=oWvSkIKLCc2t^ro7~N6~r@To9FPvGv#F; zhY#vA#oK%AG9QBjJ%{e+*LEL=4{DctR=L4hI}FaMFfq&j`^GE5x^L_>(dF%$f7Qp} zAYJp7J`PV@^RF44waehF*G0*(y$(P26bE`8dV5x$^&DO<%0OrNe-1wh_#C>QmGP|_zw=5` zi0ASsgR@2(oHfSatOOIYn)Nl-^C|+p=H1s}qUVVCbvVx8tR#c8#v7cKY;e{DgR@c$ z&YEa&)+B?oCYzY$|Fbf+RgmscQ%rON&jJhy$JF|Gv_LwXbPD zQi%69(+$p=VPaOZ*I|a|RRnr%c)Jc~dJZ4d>(KpvBh$y>iECq)!C6@bXU#S^YmUKL zbN!ih+m!=H3>n~Xb{rlP>+IMqD)KvVEn0+y*k>HHSfeiV_uR(~j~N!{u)Ti3V*iW% zKI>JsPapgOfA77m@%P?`$JhO5{}gMxvID0wcbu&Vp(eD!C$yy}Y!AaH;CtAP?_oQ> zhh_L4mf?F?hVNk+zK3J@9**IAIEL@x7`}(`_#VdNdl--JVLZNvP52%*;d|JG?_m?Z zhsF3F7UO$ZjPGGFzK28j9uDDqIE3%v5Wa_*_#S5BdzgvuVJ5zZ*YG{OhVS7ud=IbT zdw3S#!?XAvp2hd@EWU?R_#RH-dpL#f;S|1yvG^Xw;(Hj2?_n&yhZpcYynyfF1$+-L z;Conr?_mMHhXwc^7T|k0fbZb|zJ~+&9uDApn1=6R8oq~V_#UR=dw2!k!z=h6UcvY9 z3ciP@@jX0^@8M~D4^QKJ_yFI-2lyU7!1wS0zK4nU9wy>@n27ITBEE;0@IAbQ@8KnU z4=>?+cmm(U6ZjsU!1wS3zK3`5J-mzW;az+W@8WxyjqhPLzK7ZP9%kcvcmvn1JtL0=|bW_#U?4 zd)R{SVGF*8CHNke;Cont?_mkPhj;Kjyo2xI9efY(;Cq;b?_n0chgtX@X5o8y9pA(2 z_#R%z_wYKthv)D;JcsY$IeZV#;d}T9-@`}v9zMeN@DaX;arhp_;d>Z|?_nIihZpfZ zyom4NMSKr0;(J(x?_m+Xheh}v7U6q%8{fm*_#WQI_wY8phZ*=DX5f36f$w1kzK2)w zJ-mwV;Z=MOui|@n2H(Ro_#U3Y_wWq9hY#^Re2DMiLwpY(;(M5k?_n~&hspRJCgXc} z8Q;Up_#R%y_wX{lhbQqpJc;k&Nqi4a;(K@x-@|+O9^S+E@E*R0x%eLD;(M5j?_n;! zhd1#(yovAOO?(e;;(K@=-^26x9-hbd@I1bUPw+i_g74uId=H<{dq|I`_mCb>?;$;& z-a~pkzK0Tg4<+~>O7K0D;Csl%_mGY6AsgRAHok`{d=FLl9;)y?RN;F_#rKek?;#c6 zLn^+9GJFqZ_#VpeJ(S^lD8%!u2aMEw*br4T^~wG{g}x zu)ni^oHHiY5%GAJh&vkop;Mm`!+MW!#E!I=S*+ds{Z*O`f2fb+u@R0qr!C!b-f!r% z$6IV|t!-?r`}KR&F|uDjdz;2zliB`3>jSP==K95()=-!>U_AGmZ5Lc>Zfw_M=!U`GIVf*D@!1Y<7l^Ptzjx`*QF3%ng} z4d5iCdN~RIx~R9qOYe$Wez7lrFLwR03&g)Z0Q}>;;CGt-a?o|hvwXqd0Di&2U%KPB z{GtQ6^!~rP^%woCz}nEz3w-zgN&wHknO>fKV=wZ%|JqR3=YqANS(mv!9SyNhYh&$x z;UD|P4UZWX=de|VSzieY3$?l49^7xe&+7U_ci@O2102qd!((Ed9lJ$EUf74XHe2Io z#NqMv|FPN!TOYQzwpx1HLc&_u?tkQ}xPO0YM(k~EZDIHH?$tlmG0fS%U0>G+(sxD= zaXMn-Iy$>_?a?Eud$+FLI!8wKxGAzrRMd@^x3#stxA*OXqlXNaw4p27HmwMOD%!#oC1Fh~LYaz^FuFoqTn1HG&sDT_pdnHT3wQbNBzF<)HyQ0191yx}ZGt(j!2h zFMYgzT169y2vc-iJs-Rs|iHo*OMHTI$N?cqHRnRV~#Kjd*1?{3r zTzmqEK91169y23dP0Op$ghXp}4R^6|{>Qaq$hN zqK0-+BQCxPRnRVK#KpIu3fe`DxcD|yLAyv27vEtjl4uu6;-Vc?LAyv27vF^{XctN1 zqCHeWyVxi$zQ5j zB7=63Auc*Y6|{>CanS{;pj~8$i%6(~c2OxVZe%JdX&05^qAOHEyQmZwH$fG&i%M}3 z1y#^4ip51YrlOd3Q7kUHLlv}(VsX&}s-RsIi;J6~3fjd4ad8V%kwCjh5Enm(Drgr8;^J1Qf_9N0E^dP=Xcwj8;&!H@ zly*@nF7ALTXcwj8;ulZ_?V?m%+zD0CE^@`iFPVy5+C{Fo_!U$^yT}z6J)sKPMXtEG z3#y=9>=zezGZp)37yHG>n*e@=A169y2(#6GZnTmAUMY_1?1y#^4 z(#1t@sDgHpE-v~&6|{>Aad9tGQ9-+?5Ep%+3fe`5xVR6hpj}jmi{C*Nw2S59;(n%L zIqhP(xOf1npj|8%7r%!pXcx=H#e+}grrKc*suc99}3{sL9dE>grrf2e|Xks>Z0f+}biTgAlyreZ7YVyn0q2vyK7 zwu%b}R6)DgDlQ&|Drgr4;$jd}Q9!#W5Ep}?3fe`1xQK=-XcqVmIw#x43XZ6|{@p;$j3;LA%&3E*^&}Xcw!*#b23m1?^&$ zxEKjl&@NVqi@!q^w2L$1;vY=K8QR4eaq&;6f_8C6T>J~Fpk1617ypJTXcwc!#ebNJ z(QX%|)UQAqZM8>R2U)}Feg#r!SUmLGgGReya2aiyX1IPSJo2lYM!RKj8Eu9h|GH4rVVk3>pply_kjj2#ZIc=j{Y@|~ew2gAHF^$Tg zZIp|R=~M=7qeyJbfHI0`8%1IxgUX<76p4+QR0eIMNNi+M8MKY#Vq+GRah$esTx?`f z8MKY#Vq-RyLEAVkHs(+nw2fr3F&D~6rfnpPjch7|wvjA0=2026jbyQrLuJr5Hj9n< zP{wB3#%8gROJ&eDHj9k~R0eHhv)EWjWzaVA#l|8iBcHaBFE;Y14BAG%*jP+u&^GeL zMn086+c+#XmOvSYX&Z;dMgf&U+c+#Xo}e;l8;8ZlQYwSCktsHwgfcQ|8<}FGkjkKK zWQvVtR0eG$Q*0Db8MKXEV&f?&V;5~>m)KZNWzaTuiH)bJ4BEynv9W^6plz%a8_z%) zD`^`m#YQoeLEBg^@c+Qw<3M6t1s%Ajp*5F6{Ej19Dn4Pv8|%Ajp* z5F0O08MKWJVq*i9LEBg;HeQ4>7Sc8rij9p_25n=Z*w{p6&^8u|jm=aBZR4QW*aBr7 zq-`7&8(XOi+Qvb#@e-9m+c+pTwow_hjp<_JWhi4hZDYFFD5ElH8`H(cb}ECmFh2%4BEyDv9XKFplzHG8?QqdCukcd#KvwagSK%(Y`j5b&^AtpjXhKbZDW$ycoWK) zMBA7oHmax$+QuZYv6srAZA=my`=|`s#x}9>7L>7#wy{lY?58ql8{5Rj0V;#Gu}y3o zq%vq5OU1_9P{vZ)#!|6SO=ZwFmWqu-R0eHhsn|G7WzaT`ij8-mjH9%TqhjL-l|kD$ zDmLDwGH4q|#l}%8gSIh8Y`h0$%%N?}5gRpB25n=G*f>UI&^G3XjpI}XZDWtvcpu8x zL)+LRHfpI1+QuHS@d1@V+t?#EPEZ-NjWuH9LnvbnZDWntI7wyDHr9xZQ&a|RV~yB2 zO=ZwF&WeqXpp3J$jk99o43$CKI4d?jrZQ+7XT`=@DucE$N^E=rWsIV2j1n91AHpW01YiS#6#YO^^LEBg>HpWsJw2if5BazCWZOjuJ zgim#uVDd6tR&?WzaUJh>a;!25n=C*hr%?XdBzb##AU{J8fgT*hr@` zXdBzb#xyE}wy|AoOs6tv8_UGT3@Bq6ZDX0($e=Q48_UGTOe%x6u}o}aQW><3V`5_# zlyQuoV8r4zW*bW9@$7XEAPg%&<6ztuoB|N?2H^ zZN0^Mzx6(=>jZu3q+Zy$u-O_#oL-uolc;GKzQKOu33SO)H?k z@fGjBmvR+tn^r*gpm_JK6ssr+Yg!S?Tu~+NcNtuTxVQ|esCUu6i9mKzhY-)>pwAyK z@Nuv-fXBgc%lVIkJr?6n0AnZmp^z}k*MD>F{y!zl62Jps$1IEWgad8Dy zLA$6D7hi%ZXcwvC;!36>m3EOTF2bP-+C{3kxC*MEU8IT&8&pBNC=(Z7W-7{P7iHq& zD^LaPqD)+T6{?_Jl!=S0p$ghXp}6=OQ&C8}C=?ghKozu$LUHkRsDgG;C@$LGR6|{>Qaq%svf_70OF1`&_&@Ph1#dnyBB-%xixM&Ae&@Ph1#do0! z+C`GMXb)A;E;fpb?=ck{X%`#C#rL5K+Qmk3@dK!WcCk@h{1B?3UF3<2Ynh5X+C`qY z=m1sFF7m|1kDvC zanXsX$e>+hh>Olp1??h3Ty%jdXcrmcA`+^gT~vyT8<~nq+C`O}+I7z!WDK35j zRnRU@ii@8@6|{?!;^Jpe1??h1T-?G`B+xDr#Kq5{3fe`2xVROnpj{-0i`$?I+C{0j zxSgperCpSYi#wnS+C{0j_ytr!yC@YGcS04ki(GN>OQs^1c9APCeg##~E^@_1PpE=+ zkt;6lf+}bi`^ClGOvQfM#eQ*d4^%R&gh-=ceJf?M|&8mpzmk}az`7)R20xp`T}t=7^8FBG%sDgGeT3q~xsTl2cQA+*ts?qYR0OO(G`!w1OgUgU#1vrYzaLeE_%>L^l|kEBCpN}X8MKXcVk42tpl#%cjd4&$4s9bxY$Q<` zw2d6GF`mkxZRCiJWGaKUu}^GFfHL;cHui~)6e@$Zu}^GFq%vq5`^3g1DucF>CN?HR z8ELeQG_jFNWzaU##KsgVgSL?-HqxjJ+D5t9mBV*Mmm*2+b99w2k9pV-A%;+ej80bD@l6+D5Y2$fh!A8_8m09+g4cNERD8R0eHhv)GsqWo)Kx zY!(~2R0eHhv)EWbWzaS@i;aa;25lo>Y%GE@@@X6SVk3{rpl#%fjm1<3Z6jZ7hv_b5O=<+QwcbTTSyrDll9t1TL)Rg>L!JT!BHJTIuGn# zcHmUzj6@z9`6#-XT-4HBOI|K`#9o;#|(>e z*yg$Z^z0p>VWBqH4>!EudY|n~Xb{rlP>+IMqD$;g?HSC_=z52&GhB@u+ z`a0sAcSa9!I%4BGI=gi3(IcvRx31kfM@IIzDY8pc)QvWqwe`KdZyy{zWMFvwx&M^N zw;Z=*zRk77ee>Zes%RHg;^K0sf_70QF0OzoXctxD;!98k?IKlNT**|V(k@cPML1MJ zyGRumS3woDi&Sx8gDPkjW#Zz?Ohp;(qD)+T1*)K3l!=S4LKU=&GI4P=R6)Ba6c=A( zDhg>Ah2r8GsDgG;C@#JZRnRU9#f2TJpk366i*GO$HMENwaq&&4f_70OF1`g-&@O7k z#kZjf+C`GM_zqK%M7u~57ww=5+C`GM_%2jIyGRlj?V$?V#YS=QJ*HwK?P8<2_&!uY zyVxi$egIX_E;fpbA3_zhi#&00EmM(4yT}t49iR%@MV`3$5mZ6D$P*XWK^3%%YH@Kr zQ&CO3s1_GDKozu$YH`sKs-Rs|i;D=Tf_9N1E;=z48MKQGanTv7pj~8$i!M+F?IJ^5 zL_!s`i%M~EBU4dHyQmZwU7-rvMWwj7396u7REmozsDgG;EH1h+6~(lRVsX(Os-RsI zi;Esm1?{3(T-*#*&@N7jiyt!;CutWa#l=sc3fjdK2Gpj{-0i(8nA z1lmP{xcE6#LAyv07q>zcw2K6BaT`=YyC@YGw=)%`w2M-4aR*dEyC@YGzkn)e7p3Cj zPN;%*kt;5K$yDUhE^@`iub>LrMXtE$302T8a>d16PzCK`zqq)Ysn}1u*e@>bfhuSh z`^Cktp$gi?esS>|sDgHpE-rq{RHV}`(#1tDsDgHpE-rdQ6|{?VanT2=pj}jmi+h=h z3fe`5xabR2&@L*(#eGl(?V>_l{0^$1T`U(D_cImCX&1}I#RE_U?P9sO_&roXyI3wR z9)v1r7q#Nz4@^ZZ?V?s({1K|4UDS$;KS33=i&}BboF%>DaixhG37pQ`E zks>boLlv}(6mjtoR6)DgDlXU`oW=d*rLE#(AhV0Dw2Q6c!U0v#F1Ct`hoK7EMS-{& z#8ec}&x-iaq$>bLA%Hj7qL(U?P9mMh+`^t(=K+43nx@T zyVxx*MnDy`i{0Ylaj1fJu}WP0m8n=oyI3VI{svXhE>?+)kx&KgVwJe~J5)itI3q6p z!Bm`~U7QgY|AZ=N7iYx9zn}`*#Tjw&Z>WNHF<3bz&ot z%AjrJh>dYjMh(KhypjT9<_wy{ra zOr$bs8~en@Br1cpktQ}KLm6qbjWn^5N@dVC(!|CTDucF>CN|Qj4BAGy*q91sl+!lK z#YQ@nLE9)78`G!^+D5t9m`-KTHj2c?3@D?BwoxQDGN=sNMv>TY~)iJw2i}JV+oXTn6`0PY!px#w2i}J;|VH*wsBZ&ETuAN8<}F` zNhl+ewvj0|3aJd*MyA+UMrF`8GQ~y_l|kFsB{rUdGIr56c8QJUR0eHhm)Llk%AjrR z5*sV14BEy@vGEL)v68m2Qfw4c8MKX+V&hpVgSN3!Y^^`d+QwE&^8W=jV(~dLE6Sav9XoPpluu!8!u5Aw2gyeV;hx0+n6pkUWPKJ(>A7yjWQ~O zwlQ67Y^O448`H%`Ih8@%*eN!4Kp8t}8#~2D1(iYD*eNz%p)zP2JH^IMDucGMLTtPW zWvrlWtPmTOR0eHhh1htF%Ajqm5F5Lw4BEyCvGF>Tae}sSLTv1&GH4qo#Ks#`25sYn z*w{m5&^9KCjW?l;NwkegVxx-6plwVN8+)k?+QuZYv5(52ZEO=8Z$TN`XdBza#(pY; zwy{lY9H2628{5RjK`Mi`u~cll4P`8)Z7dZV)l>#;W2x9UL}k!6mWqwTR0eJ1sMvT1 z$~a2fI4U-dP#Lt1qhjM-DucFhRBRljGH4rf#KwD2#vIzl9I;VDWzaU}h>c@Z25n=G z*f>sQ&^GpnjrXC9J+zHIVxyMIpl$3C8y`>^w2eJt;{=sK+gKwuK7=yX&^FeHjgwRc zZDWntI7Ma9Hr9xZ(^Lj+HWH}}+QvMwF%HU@N86YuHj=0e+QvMwF`mkxZOjuJ$y5eyW3Skl0A=i@ZR`~r zDO3h+W3SklNM+DA_KJ;3R0eHhirAP8WlW)MOc5KYR0eHhirAP!WzaUJh>bKVgSN3< zY)pkRw$nDYi;Z+DgSN3(7L`HUI3_k`QyH|4V`5_tl|kDWFE-{v8RKaiVCS#$vINPi4?H4vCE=P{twJ#v!p$KxNQ24vCE?s0`Z1 zA+fQP%Ajq`6dO-M88c}cGsQ+Bl~HfQKG^!OwYAmK(-snT8S^){O6n)IkG2l7hSg09 z4TGaPSUR`tUUuMA=8m%!Asu@C(bA$tn8kK&r$I3>gN8UF2KIOMk8{SvIwBtL5^+bv zKXmFdVp#7Hj@Xg*r6JaC&XL0%5%vH3{==i=A_l|^_4>yf{!kysVQMGtg~acs7U)Bi@lAt^}W4s9~?boVEYHH54c{;Rkkawp*Bn3@Rs&g;Y%wf zZfR+|)pm=ujjeURevdjv_Ujkk*53KL2-^+TuzPy<>L2SE=Crr#>zZ(9^bn^bHm;+y zOV=JfqPlnM+O2bBWRII7yF^9ZcwOtdWA+TU+-z*C4~2wT9zWzdzajRc<)PN@7k=L2 zhQ|zxbJz-9f4ZPNG%VD%-eSGqdY{#GhCX#7FYIL4Y>nqUJih)v)}~i`3+8H%cRzEu ziZ)FvpuaR4?|$TP6>XbVK=-(K_Y()JC<$v?5z1UqCGH0fS0OIgrvRtBL|n9lc0s!+ z5f`nX3fe`sxMm$yB7$E>gusI8;HqNEH`XK^3%%RB>U0DrgsF;^NCp zMH%g)Ok8{gs-RtziHom76|{>oad9pf_70TF1`*`&@Kwa zg&nG(UDSw+Z!i@#w2K;X@lB|Lc2OfPz6Dj#E^5TZx1kE!MUuGq4pWgtyGRlj?Vt+U zMUuGqE>uCgND>$Ap$gi?Mse{yreY)QVxzeDK2$-w*eEW309DW~Hj0ZMLKU=&JaKU? zQ;|oz$P*VGpbFYWp1AlCR6)DQ6BpM(6|{?LadACUQBAw378f@_6|{?LanTW~pj}jp ziwLNKc99`2Ix!U)w2KUJ(HW|sU1W%hE>H#SB12q6LKU=&N^x-`Q&CB~s1z4np$ghX zrMS2Ws-Rs|ii;?yf_70XF1j%l#k7lJanT*Bpj{M;iylw~?V?y*+zeIFE>4PzA2Ssv zX%{EO#ZRCL+Qmt6@l&XRc5zZ%{0yp~T_lK$TbPOj+C_r6_&HQTyGRfhw?Y-Piv)3T z8&pBNC>0mCGZm$@i&AlM2UJ13C>0mKfGTJgrQ+gFsDgHpD=vP?ROHewa>d23pbFYW zuDIw4RnRVS#l>Av1?^(LxVW3C*iXCIFD~wZDrgt`#l^3o3fjefaq%0df_9NEE`G~Y zq|+|a#YHcuf_9NEE_y>1w2O3c(FdxaT~vsRdzp#~+C_!9=nGZQE-J*ueNYAMqC#B! z4yvGCEEgB|GZo8e7t6)P15gF+V!62ZJyb!vSS~Iegeqtkwc_FrOhqm2qE=k|5vrhF z)QXEgK^3%%T5<7bsDgHpA};zd6)Cif6mjtvsDgHpA};zv6|{>Kaq$pTLA%&0E(S0a zTWJ?t#l=9Vf_AZ0TsWW#+Qn9J@i0_DyC@JBgP4i}+C_o57z|a=E(*j&G*m&mC=eHq zKozu$BjVywrs4?g;)u8y0#(p1j);q)PzCMch`1OARnRW7#6=8Kkwv@65*Ndv3fe`M zxOfbzpj~8%i&&_FcClMr#4#1SX&1Z2g%hfvUF;SYBcKY}#cpx&I8;HqSS2q0%2ceP zU91upe}gJ$7pug@NT`B#u}WP09jc&RoDmoQU@FegF3yOHe?k?si!Q<8S+b)MpGGX8d%0U z`K3!^pp132jdfxpfy$t5tP>k!sSMi2IICO{ecXdC;)MhcZd+t?>ICQ=!+jeTNc5|u&QND~{Ap^P-zMw-}2 zr7~z6X<}mvl|kD`6B}t%25qBUY)pkR%4r+rVk4c(ply_kjcHT{ZKGUlOs6tv8%1Jc z29!}m+b9wn8B_*sqeyJbq%vq5MPeh9%Ajo=7aOynjN`P8<6^pfYG1o5jXLDucF>FE$oI8Tqu0e6f*7WzaVA#l~VPgSL?`Hu9+q+QwnAu>{IE zOxrjtHVUW=+QwnA@dTAY+c+#XmQoqCjZCreB$Sa!+sG6fg;WM@BU5ZFqcUh4nPQ`e z%AjrR5*trJ8M|m3yTrzFDucGMOKdz%WzaTuiH#Lh25n=d*mwrYSV`MhDK?6!4BEy@ zvGFXGLEBga0P;6|ZGH4qM#l|KogSN3yY;2}7Xd4H`#ug~!AZ_EI*w{*C&^8W= zjhCnl+Qvb#v5m^0ZA=#%FGCsAX&ckUMj4es+n6pkwo@6jjp<^eoXVhW>=YY2pp2cg zjh$kng36$6>=YZXP#Lt1onm7rl|kEBAvRuxGFH$wR)~#CDucGMLTtQ7WzaTOh>cxT z25sYn*mxbvI6>PuAvShX8MKWPV&e@egSK%(Z0w;jXd9En#+y*aB-+L#u~9{3&^9KC zjlEO`ZDW$y*hgj1Hnxe4x1fw|w2f_IV?UKa+t?;H4p14ijcsD%AeBMeSSmK&hBB7Y zHkOKwYAS=au~cjvqB3Y3OU1@vDucFhRBXHhWgMk#92FZ!s0`Z1QL*tZl|kD$DmIQ% z8MKW#V&gq1V-9U&j@YQ7GH4rf#KtiygSIh8Y#gUDXd8RP#`{pl9@@qpu~AE9&^Gpn zjSr{{+QuHSae~UAZLAR+A3_;xXd7$9#z`uJwy{QRoT4&l8*9YIX)1%ZaaL@61ZAA1 zZJZSwXQ&L?##yoPF_l5vI4d^JQW><3QDWm0C}R|DW0cs44`F_d$SB&zD6uh$%AjqG z5*wqb4BEz8u`ve9SWDYjD>f3S4BEz8u`!m)plz%b8;Mi~ZDXF;7zbs{qixI+8%b0K zZDXF;7*A!;Hs**>V+gK(xWOinjb&mZlgglN91|O}pp0X*jbmaXi^`yF z91|O}sSMi2F|jd+%AjqG7aMb-jPbOM@nR#J%AjqG7aQ}a4BEzcv5`Y%&^9)SjrmZ< zCfdd(v5`w<&^9)SjRjN&ZDW(zSV(2iHWrJGMNr0K+QwqBkw<0FHWrJG#Z(4uW3kxC zr!r_8hs4GbDC1DQjrP&jLDn#QPIJ7GW0KwVejV z#0(nZh#1)4*+0%16YGe0yi3F#4gb)o&xm2YM>t|f+E<5IyE#V=cSO|x@B0srj*A!& zGt}!JZ}>xf9FL7~#5rxFLTFYI*KY>j6C3aX%8q>2k0R6)Ba6Bl1*D#~aVW#Zy1PzCLxOk8{w zs-RtziHobD3fe`XxcC}VQAoQe6c^V(6|{>&aq)Gif_70TF6>YR?V?6pe1oZ|pcbJMK+C`GMXa`l$E|SE>ccBW}MUuE^4^_}E zHj0byF%=tW7aPUJ_n`{f#YS=Q1E_*_u~A(75UQYE1?{3*Ttq+>w2KUJ(TS{MMX9*>1yn)1C>0lXLKU=&TygPBrXrVikt;5K1y#^4a>YeYsDgHpD=zMWDrgt` z#l_uB#eUkwesOURR6)DgFD`x!RnRW>i;Let6|{?Vaq(NGBAs@TE-rdO6|{?VanT#9 zpk1Vki#|{V?V>_l+{;u{&@L*(MPI0bc2OZN?t?057Zu{-cTffGV!61upQ%_*yI3wR z9)K!n7t6)P@1Y9X#d2}+AXGuSs1+A~U@B^97q#Nzk5C2eqE=k|396u7)QXEgLlv}( z6mijysYsz+q=<{ZKozu$6miiXs-Rt@h>M4y3fje1aWR0Y*h;(DDlP^>6|{@3;=%z{ z&@Q%$i-(~K+C_o57{pW*&@KwZ#bBs{c2OWMqM-`fMS-|@1gfB291$0fG8IQ?7e~a! z5U7H7aYS4Ug(_$lN5sW2sDgHpB`#u^iY(eimbe%WRnRW7#KmJ!1??hBT*N{Zw2R&1 zB95upO}p4FE}T#W?P9mM7y(t#E_REH$Ds<^#VT>}SEgbW?P8U<_#0F~yI3VIMnV;| zi&f&{?@$Hp;*7ZX2UBr|c5y~r{1d96U7QgY|AH!L7iYx9zo81+#b|NyAEsio+eIn$ z`!7bzFBFQ0es#oXHw-RAexcAPD#I;<%aC6vG@8nA)4(#;$uATd17)nEZLAX;2~-Ac zW1ZL-OJ&eD)`^WoDucF>BR0lC89B6#9I=r^WzaTq#Kw3kgSL?)Hj=3f+QvSyF#*ch zN88vZHd3ez+QvSyF_FrkZR`^plc)^ZMw-}|3}vLzHqyjKDwRRoND~`Vs0`Xhn%GFA zGH4s+Vq+?lQBK<^7aQqR25qBUY)qpvXdC5XV>*>V+b9wnGoXwj+D4Js$e=Q48%1Jc zCY3?kC=wf)R0eJ1xY(ElWgMq%92Xl|R0eJ1xY(FYWzaT`i;X!{25lo*Y|Mo+l4%>s zVk4W%plu|Jjd@fCZ6jH15jm=_X0hK}9*eo^{QW><3 ze6g_z%E+f}FE$oa8MKXjv5`+@&^8W>jU`aVVcN!Fu~9%}&^8W>jVGuK z+QwnAv6RZ7ZDfj#C!vf?+D4|>D5NrI8<}Eb8I?iX$P^nzR0eHhm)Lj;%GgEQ*d;cW zQyH|4U1H;DDucGMOKhy5GH4qs#l|yG#!A}8O0iK)WzaTOij8Nf4BEy@v9XfMplzHM z8_z)*r)e9f#l|WsgSK&6Y^=YY2sSMi23bFAjl(B-gu|jNAQW><36=LHxDucGMLTv1!GH4qo#K!AT#tGWS39+%8 z%AjqW5F2k$8MKWPVq*`LLED%lHr|9XCebz~iH#~MgSIhAZ0w~nXd9En#y%>8wy{lY zyai=!qit*x8~dpY+Qv4qae&I8ZEO=82dNC&#!|8IHk7fHwy{)fR8twWjiqAa5S2mO zSSmISQyH|4qhjM7DB~z?BQ}mv8MKW#V&gcKLEG3PHr|Ia_Ru!=h>cn*gSN3pY7TVdU25n=l*hr)@XdCmy#yBWr9&KZu*hr!>XdCmy#&{}&wlPm^BvTo* zjlE)H0+g|rwy{@iq)-{OjlE)HB9%eg*ef<3DPm&^ zl|kE>A~w>f4BEzau`w0O*iPHnE;iDs4BEzau`!Lxplxgy8`G%_+Qu@mF$2n2M%!2> zHZrIT+Qu@mF_X%mZ7dTTnN$XC4~1jXWxYwy{`jET%GO8;iw8K9xb+I3zZfKpBVXZM2WJ z4zh+>Ej?|aVeqI9mhNQ-PG#;mTM^Qs@1vF$Ey66eYdZ~!i5WD+5izj8vwxg3Ce{)0 zc$bJf8vdbEpAo})k8s3}w66}ac5{v#?ue-W-}fIL9TzbmW~kRc-tdR|I363}h;!OT zh1_=Kz!5_RIGi1a$HY22c8iL%yS@>lt+kDkA%0icfKydc7rwSp5DFs$2x{N?d|%y z#@!h`#Oa8Q>*(y#wMUPr?%ld}>l_)`+0fg@&zaxw7tb zhuEJAwRXSovlur#W>}oVb|S?3eu(SLoU&NQSgiM1U1#W1XZ6BPhRxP^&cox+{imdj zy{*-9pUr*3?X9hryKEuhE$aSDPuG8GS@-|$+xY(;Y<<{T|NjYV$^4C^lKNX#yOPP} z=dRoK#zuAs@nCu2@k8gfceFf^^%c7QbU}G&Sg38i#d<%ozj&*?$q8z~>?Gd(G~_DU zG_8RC%3i$tVaQdqZCU|6MB?4gLad@BtZ79kb48W7-zIVu;^H!>f_CA$J2%nI-f_8m zREfA~1?_@%ku5G-GZopii)?Wb23625vc*LksDgf8WQz+cR6)C_5*KZmiYnSgmAJSZ zs-Rs|iHj?s3fe`LxcCxOLAyv57gsVBskDn!aS;wx&@NKN#Z^!R?IKlN*q{p9MVYwx zGE-4TyC@SEUx6xU7iHq&t55~)qD)*|4OP%C3dP0On2JK$MWML32CAT46pD+lLlv}( zLUCb-Drgrq;^G@jMGftuMqGRos-Rueh>LGQ6|{>Qaq(@af_9N4F22K5B+)LC#6>%( zf_9N4F1`y@&@Ph1MSG}%cCk@he2=NvNW0i5F1`;{&@MKLiyuH0w2O`6;)hTL?IKTH zT+3AC(Ju1DMF*&Yc9AD8egsv}F7m|1bx;NEqFP*B&s0>?E~>@F4NwK`qFP*Zgeqtk z)#4%os-RtDh>K24MF#C6LtJ!*DrgrO;-U*wLA%Hh7m-i}?V?g#+{jc^(k?2+MOUbT zc2OxVZh|Uk7nR~73aX%86pM>)Ohqy6qF7vXhbm|n#p0p|R6)Ba78f@|6|{?!;^N0l z#Yx)5NpbNLsDgHJQe6BLs-Rt*6c;~(Drgr8;^G#jB7t_1ATE9mRnRUH#Ko;p1??h1 zT-*j#&@M{F#qCT*DeabW-9j6F7}Iyd!P#1#eQ+|Yp8;Dv0q&L2CAT4q>GE+ zG8O5xi*#|(3#y=9q>GE*PzCKGU0n2mDrgrK;^JPWqJnl&Aujqt6|{>Aad971LA$6B z7r%olXcx=H#r;gha@xgmaq$3DLAzKkE`ASH&@PsXiwB_!+C{Cn_ybc>OS`BQ7k`8* zXcx8O;!jWo?V?s({28jCU8IPMeoRFQ?IJ~7`~|9@U8IPM{!j(&B1K$01Xa*3wu*}Z zOvP5(#a3}K5UQYEY!w#{sDgH}Ra`s_RnRU9#Kj<{qJVZ$AT9<&6|{>2aS;tw&@KwZ z#UoG!?c#{Ic$BF)Lc2I3E`~rAw2LF+VklHWyEq~)hCvmyi!5;w!&GF^F0#bMaHxWI zktHr3gDPkjS>hrVs-Ru$78h|$#ctZgZgJs+DrgtG#l;Ayf_AZ6Ts#g{&@NVqi@!1z zt7sRi#KqsB3fjdgaWN9Apk1sI7k`H;XcuS1#Xp#eGqj5{;^LoB1?}RDxcC=TLAy93 zF8&Qw&@M)ci~lecqunk_so#GwT7Df=JoKw0M!R8f8S?9(Mo}4V8C-_^I;hc9hMNYK zu}*#+)EFpZ9c^Qs*hruI-3p^S3cM!DEXr!r_8 zD55fG8@t5DQ&7e(+Qu%iv7E}FZR`>oPg5DRja_161(iYDSSdE1fihOoHdcy_Vk(2S zu~KY2OJ&eDR*H?4R0eJ1wAgqK$~aBiI4w3-Q5m$2(_&*al|kD$EjHFr8MKW=vGF{V zkx1J}6dNT}25lozY^Y)xFsN-+>x&OdB%YQo5yfiM0`z7?*TOvox*urfoE)Nz;34 zjZTz=2!SE@dRG=jKtVu6L_|PEKtw=AMMOYEK}A4BMMObFK}11CLB#JE(axL15PyBV z=JELPS9rw3@cNwdJm<_gpW!6aHj>50Au5Bmkt{Y!sSMgive+o2GH4skV&gEB(M;QD z78^&X4BAGs*f>gM&^DUIMmd#1+lUq$$DoX8+D5e4sGu@v8_{B;lFFcMM2n3oDucFB zBQ}mh88x(x8nIDLWzaTi#Ks9KgSJs4HfpF0+D4ApI0mFxp0#*f>vR z&^E%vMgx^W+XxdIjZ_A0qg-rUfHKNy8|7l7iOQgDl#7jvR0eIMTx>K`8MKWIv2h8? z$e?Xxh>gos25lolY_w1rw2ch0(Mn~|HrmC;6)2;fw$Uy&+NccLM!VR!N@dVC+QmjY zl|kEx7aP~0jCk5cyx6!-WzaU_#YP8}LEDHI8=X`JZKGan+<-FbX&d!o<0h3s+o%^C zx2O!-M!neRqB3Y3`C{WXl#x%{$QK)Ts0`XhzS!udGH4t5Vxx!3pl$Stjk{1rA8n&g zZ1hqYw2eNoagWNNZS;waJ}QH@;U_lkLm7Uw4L`Br>%crl#E-V&CpP@34BCdD*zl(^ zXd8uMBLK=Mq-_+6jYU)jZKF_ZET%GO8--#ckjkKKB#MnCP(~tcBT;MwQ5m$2M6nS} zWzaSf#YPB~LEC5$8%v>#2HHl0*jPqo&^8*xMktj*+h`CQVN?cfBSLH}hcY5)8xdk7 zoXVhWM2L+DDucEWAvPkZ4BAGe*jNE&RMIvo#YPmBLEESl8!M>{+D4_=h^8`V8(Csw z6_k-h+sG0dF;oU^BTHbN+Mh9)9Lu|xR8MKWKv9XrQplx)B zjd&`9wh=5g)M1rD9_Pl|kDm z6&o9=4BAGi*hr=_Xd9_wV-u8-O4~>k8!1!%3~@4;UK%^k!(*Pi*>&!0%WQ9p zhnLy)tMRUHKl*0u44(zleav19T{0YulPwE9&8`ps{cKM+Z`U~ZKd!%f3x&HCKDaMmp65ia9KyP94$I=wsnz1d#o1s0bVXIfw3 z9XEH2*~@#3W&FgcQzuQCJaO{42@|HiI$`{zNw16^_TXnub2hv--}tF<*n^RFaQY2q z0!_ig9mOXOPKK{qtmEt8(q83gobvSJ=I!aRz}sxfv@V@q<>=&S+HWv^X#Buv9iagO z`E-L}GCdx1XWxfkMi-lL8N!TKxNMFQ<2L0mXN74-2UL0k-n zDrgr8;=%}3&@Rr4ixEu4dD_K!aq%ovLAy9FE`AfLpk1677rzBn&@RHo#dAzWIPD@_ zTsT7&w2N?Y@jO&Py9gH-Ca8jTQ6VmVo2jUvT~vsR-+?M<7Zu{-ccBW}MTNNdJ*a|q zktr^IpQ*^CU1W-j7oZB-MW(p;1E_*_ktr@*pbFZ>b#d{BOvQED#dUGDaixhD&3aX%8q=<{ZhALOaWR&uh@oA?h>LMh1??h6 zT#Sb*XcsZ!Vggh_yEr8-USTRu(JoGji-}ML?c$WUconLkU7QjZlb{OP#V&C%nW@-C zyVxZzra%?6i(TSkDpWzc*d;DrgDPkjcf`ftG8K1d7k9+P-$50$i#y`t@1Y9X#T{|+ z4^Rc|Vv)FbovBzvyI3SH{t>F6T`Uq8Z$K5ai$&t%O{ju)Q7kUrVk(Mh7scY@ZK#5F zQ7kV0396u76pM>@pbFZ>266GvOvMJ;#RhTlFHi;TVuQGt23625Hi(NKKozu$CUNmY zrlN^<(IhV3g(_$lP2%ETp$ghXleqXdsDgG8B`*G*sfeOoM2U;{pbFYWl(?7igCPP^DHEJ>Cpk3S)7e9t7Xcsrd#eYK;w2Nip zVir@ejCQe1T>KAILAzKcE@nd&w2Nip;uENXc5y^p%wZ~y&@PUMi@8t*?c#{IFhdoz zizDLVQ>cP=kuEOgF%{{wi*#`@AF7~Tq>BqTsDgHpE-pTUDrgsN;^K3rqK$UZCNA8e z3fe`RxcCC9pk1_yiv>^x?IKQGcrX=lw2L@#;R#jHF5<++mrw=mB2HX*K^3%%I&tC6 zRMgQf>coWws-Ruei3=a7f_70SF1~^)Xcu|n;wMZ+9_=DeT>KQOpk3sNi-k}H?IKTH z{0yp~UG$2JpEDJ`w2NMG@e8PecF`*?ehF33E_%hqub>Lrg}=D?HB;f=@1mG`{)N9h zLC6<+bcBCD3@$^SAmm47^vmEf1~5-1~ywvi+@f~XAIMv~YFrZQ+7Nn#^}%AjpDijAdEMk8&bQEV)u zGH4r(Vk4BwplvjYjW8;Mwh<{dmO~klw2ery5l&^$HX_AF1eHPCh!h)p8sD5I0M(J40Ks0`Xhr`T9aWzaS{#YQ}pLE8us8|$Eq5ZXqF*hrud3jDucF>D>imh8MKXDv5`w<&^CI+#vUl6hqlopHu9(p z+D4Dq*h^*5HhRQHK9xb+2oxLppo~D;MxfXzpfYG1fnuYO%Ajooij5*FgSJs3Huggq zCA5tau~AHA&^AiM#sMmWwoxKBN~jFlMzYvA2xTPGHj>50Au5Bmkt{Y!sSMgive+o2 zGH4skV&gEB(M;QD78^&X4BAGs*f>gM&^DUIMmd#1+lUq$$DoX8+D5e4sGu@v8_{B; zlFFcMM2n3oDucFBBQ}mh88x(x8nIDLWzaTi#Ks9KgSJs4HfpF0+D4ApI0mFxp0#*f>vR&^E%vMgx^W+XxdIjZ_A0qg-rUfHKNy8|7l7iOQgDl#7jvR0eIM zTx>K`8MKWIv2h8?$e?Xxh>gos25lolY_w1rw2ch0(Mn~|HrmC;6)2;fw$Uy&+NccL zM!VR!N@dVC+QmjYl|kEx7aP~0jCk5cyx6!-WzaU_#YP8}LEDHI8=X`JZKGan+<-Fb zX&d!o<0h3s+o%^Cx2O!-M!neRqB3Y3`C{WXl#x%{$QK)Ts0`XhzS!udGH4t5Vxx!3 zpl$Stjk{1rA8n&gZ1hqYw2eNoagWNNZS;waJ}QH@;U_lkLm7Uw4L`Br>%crl#E-V& zCpP@34BCdD*zl(^Xd8uMBLK=Mq-_+6jYU)jZKF_ZET%GO8--#ckjkKKB#MnCP(~tc zBT;MwQ5m$2M6nS}WzaSf#YPB~LEC5$8%v>#2HHl0*jPqo&^8*xMktj*+h`CQVN?cf zBSLH}hcY5)8xdk7oXVhWM2L+DDucEWAvPkZ4BAGe*jNE&RMIvo#YPmBLEESl8!M>{ z+D4_=h^8`V8(Csw6_k-h+sG0dF;oU^BTHbN+Mh9)9Lu|xR z8MKWKv9XrQplx)Bjd&`9wh=5g)M1rD9_Pl|kDm6&o9=4BAGi*hr=_Xd9_wV-u8-O4~>k8!1!)kG>f@!)L*CAG6m&mt79V$(DtlX4i-RezvEZx9c2_ zFTQp0M+?m`f9YfPwwQViZ$3BI$9<03GRD)x%Q9y2qzNw8&svQz4mS;(HS2Tp!dbIi zh8u^yKmD!wZtinOer){6`kOs(dd}!*GR$-y>hg?pMs?`np{6%XuiGr*Ji=w%XjjwA zMyGeDzc<^^JOlTdgDQ;8IuZXq@u&Bk1kvvB2AGinlHuTkYuNXeu@sKQw+|v<}aJA$_`$ zFqs|?xU=truO_|bf720}V`P1mXI~id;t()H_NBL|40vDKq}Tkrs0?^t+N9U~Z$laM zHUB2P=6{FEpl#?i|86RSwxQSjd#DWBhFenltEwfZ_;c2zQ4hoQ+3cb^qRjPl|kFkYySRJ25m#H`3FE5^fmt`z2?7& z%AjrNHUGs_25m#H`3F)Nv<8+y%u zDU?B9^Ka5?{>!Kg+J;{952Z3_8+y$@jLM*G=r#Z4P)7eXf9~REi`)+jr!r_8EpiPv zg36$6w8;IiNGgN2v0Cnjt$;FC!}p4tR*Q`&DucGMT5PPOGH4sC#YQxhLEAVjHda9y zr)e9f#YPO3LEAVjHda#^w2jkZBbLgbZR{2sYoLtXw2j?jBaX_TZR{2sYpD#{#%{3@ zPi4?Hy2ZviD5IOU(JeL-s0`Xhx7bLeGH4s!Vk3#lplvJ`8|$Hr#k7sZVq*i9LEBg? zHa1cjw2j4LBbmyeZ5$9Ao1lyXw2cE|BZbPKZ5$9AsZ<7SZLAa#tE^p zo64YVoDdtiR0eHhhuGKyW$d7B><}AyR0eHhhuGLlWzaTuh>d(IgSK%?Z0v(FZqYVw ziH!m(gSK%?Y!p%%w2fP0qln6&ZG?)A{ZK|IZ6j1{6jK?rjZm?1fXbk4go=$4DucFh zRBRlCGLF(Vj*5*#R0eJ1sMsi_GH4q|#YP#GLEG3OHV#7>TWA|w#KsXSgSN3nY#gOB zXd7F^Mmd#1+qfz=jzJk$X&YC?Mg^5Y+qfz=DyajeBC_9F%d7wsB8voToBq8~4OU1C>GB zxFnlWl|kDG5E~b%4BAG3*l4CQXdC;*#w93YKW$^b*tkq( z&^GpqjTS0{wy|Gqv{D(gjrC&V3Y4*)wy|Dpv{4zfjrC&VDwRRoST8o(sSMi21+j4r z%D6z=xF9yJQyH|43u2>#%Ajpr5F4FT25n=7*th{@te|bI5F0nC4BEyDv2lyaplz%W z8(mZeZR5DuxD90-r)?Y;8+WJ-+QxCQ(M@I0Hjayp9x8*ju}y5;g)+9$Hnxe4UMhpO zu}y5;qcUh4+r&m6l|kFMAvW$q88>JfH^heTQ0Dm?H)tC-#D*W0LEE??HvFj!+Qw3` z5ddW@rEM$~8;ht6+Qw3`v6#xBZ7dZVfm8-<)T8*9YI3MgX@ZDWnth@vuR8*9YIN-Bf4u|{k}QyH|4Gh$;E zlyQc(aYk&!P#Lt1Gh$;kl|kD$BQ|2G4BEyXv9Siq*hAacBR1lw4BEyXv9XrQpl$3C z8}U>IZR4)kSO;a?rET058wpefZR4)kNTf1o8+XM<5|u&QSRyvoLm5kG8%xB-1}cNL zu|#Zaq%vq5OTLQyH|4RbpcYl|kEBB{p)X4BEy?v9S}%I7!<$DK>Ud8MKX) zVq-UzLEAVfHgc&9+Qv??u?Nc7N!!>dHu9(p+Qv??v6srAZR`{q`BVmN9(A8MKXcV&gcJv5vN} zPHa?D8MKXcV&eprLEBg-HfpF0+QvDtaT3ZnN830jHcn9)w2gCO<202)+c+mSYN-s` z#y+ue2Fln++t?>I>ZlCb#y+uemdc=Q>=PUHR0eJ1zSuYiW!$H2+!q_?sSMi2eX-F% zWzaV6i;YGq1GW+6D>g1L8P->M_JtuY4lxxQj2{|5Fg|a3Zmy5}9J6JNr-zqi%;ZTE zOfMUq-ktv5Y%lWyi_42M&EA%G+}tf@FYhsy@e`*`oit_g#L43(OqlxWgz=Lmy<##M zhrK`jt@&>5bDe!3d`0<+jV3Aswh`qkHZD>bu#G5RvC&Lr&^8Lh#w94DfVNQ}HZD^c zw2cC>(L!a=HVVW>E0sapNDv!Wpo|3CMuOOAqcUh431Z_al|kD`5F71O25sZK*tiB| zoTqJ^7aP~94BEzdvC% zxJPBsHZsLVAC*DdxGpyCLmAg;8`s5#?=#5rH=?f7Hm-{eKPrQ^ab0ZqQyH|4Ah8hu zWdzYSg2cumDucEWBsLaP8MKWcu@OjR&^8W<36tNLTWzaS*i;d+_#%0>ZWw8-XWzaS* zi;V~>gSK&5Y(!ESw2c_Cu>#77p>4#7jVLOEwh<#XR#F+XjTo^JO=ZwFPKk|GP{t|R z#woE8LuJr5PKk}xR0eJ1l-P)+GH4sS#KsyZV;5~>m)MA-GH4sS#Ku}GgSN3tY{XL; zw2eDrV;z)n=b;T3H{(2`lhH8E6kv1;{SB-KUchhl7#l`N}<9*c* zqyFR8kRd~y45pXH&hzk?=WceLJKHkb+v4G6cKvF+>)VgM89T#g!E_(9*Fu+r2IFMQ zLQk{n!+$^9)6Ls;j>i|@y7;4oW|+V9u}nc;uvxP{H!qwu%Xx&$xY4dp_ep`#!yf$1Y0ifC zZaRMA=ycQZQj0a*!R6d>N8^;I5Ayc(Sm13oC0dtWd)(2<(X=1>K!dM!w)1@Br^aCq zHbe&}=9ZrIMIL@EgR2-}TfxBfE$H9>U=`N6&Nk~}7_(1!G}W1_Fxpl)K^6T+X0Ae9 z42LRc7uJc+Hd~Yts-Rt1=Q-O{j9@ArO>@S!DC;z5n~G`zfO~vm(6|@WM>}H#a--Rk@7Zu{-_n->eg>`PT&ARw~rsC1mX3T|k zYO_tn3s42^!aB3rrs5Bv3fe`cxNw0gXcyLb%{J@e51EQb)0#0C)@jW)6@LU(&@QaA znr$ln7^wIRLia%#69!+P)Tv(?w+f@7oR6)D2&Sti$_)DmQc43{&Y*X=9PzCM6 zI+xj|;w7fy(Nt#4g>@>kO~ojvf_7n@$!t^c*H8uR!a9-JreZWyLA$WdW45XI8>ZsX zG-k|&bsDox#mi6y?ZP^X*`{I)R6)D2PGYvHaD^&p7uGqatD66sUrBVV${bQ!y2)pj}ueF56VR z23625tn-#_D*l$K=%2RC%|yK;F8&Uxpk3S%7k>{`&@S$Xi+_MBXcvpb#p_JPqW+J% zV8@F^;^H5n3fjdYaq$LJLAzKaF5ZMHXcxue;w`44n08StF5ZSJXcxue;-8=j+C{Or zcn7MWU2G5+|IAcupj~Vb7yklP&@MKJi)l~=?P7zt_yJTwyJ!*@KV&MJXctZ5;$5hM zcF`m*{uQdAT{MY{e}gJ$7g6Hk-O>yyKsDgHJQ(XKvR6)B~CN5?%70YNB%f!Y1 zKozu$W#VEsR6)B~CN4gKDrgr+#Kjz@;t1{Hh`5*wRnRVuhzm1RLAy92EiYHsDgHpCoX=%ROHbv^2EhYp$ghXp14>DRnRW-#Kq5`3fe`lxcE6! z(M!AN6&JsNDrgtI;^LQ31?{3&T>J{Epk4Tji(fMp{{1eBslSl#FOM4WH9#5tFt`kP z)QBII(JzC`kVlR9QyKjDucGsC^nWt8I81!MzOJs%AjpDij7bzgSOEq zHo~Y3+D4?<3DzUKw%BZ4kREdo!DucFB zB{o)48MKWmu@OyW&^EHg#wsWyo3@cHHe#p@+D5k6SWRWnHnPP=ER{jq=oA}kpo~u1 zMyJ?_qcUh4onm7xl|kF+6dUnW25lomY^;MaLTDQyVk3ddplyVRjYKMgwh<3GO>|NWzaU##KtBlBaOC^CN@&24BAGT*hr-^ zXd7u_BaOk-58L_mDSh0~w zWzaTa#YPsDLEDHG8`)F_ZKGCfY=bgtX&bd-V>^{W+o%;AJE#oWMy=S$p)zP2xng4{ zl#xr@$Q2vAs0`XhuGrX3WzaTq#YQfbLEGpN8+)LP9@<8a*vO+YXd69ZV=t9K+vpJ+ z`BVmNBT#JYgE9hX8-Ze@fXbk41d5G9DucEWC^m|y4BAGC*w_zcl+ZRx#6~fdLE9)1 z8waQi+D3`kD4{ZF8_8nhAe50z+ej80ho}tNMzYu_r7~z6$zr37%AjpDi;crjMl)@r zS!^7kGH4skV&f>4LEC5+8|736Z6jK29D_2VX&cdEqk_tyZA6QWN-Bf45iK^Vs0`Xh zjo3I2Wz^6%YQ#o0l|kF65gR9{4BAGG*r=g0Xd5|V<0O=kL)*v^8>grY+D4ApI89~H zHgd#9EtNsr=n@-epo}isMwi&AqcUh4U1H-bl|kF+5*zhY25louY@CBK!e|>|V&goO zLE8uu8x2$jZ6i!++ZIp|RCMturQ7$$vQW><3a0GU90)@nYjTl|kEx7aJW^25lo=Y;;l?w2gYPaRbVzr)|`Wjhj>kZKGan z+@dmQ8}(wNi^`yF)^9Lk8GZA6HT za4Lhg5g|4rs0`XhgxH9rGH4r>Vq*oAQAyjV6dO@g25qBKY^8*|Mgo;V+XxmLiBtw{BUo%CQ5m$2Qn9ff$|$96l!}cFR0eIM zRBUXdGH4s6Vk4Q#plzgzjZIKSDs3ZGY@|>bw2f4;kxFIIHd4h#8kIrYXb~Hmp^O&V zMvK@;r!r_8En;H}l|kER5gQp)25n=t*w_kXtfpq-Hh{$P7hwm(Fu+kWf(VfO2x(Q*yDZG4x`@xkzvRXCxhvwvGY7U=DC|) z=gzjw_O^ISuowl?6uG(!@)S&ve47)`taY+_H^@ho#XMvw=Vu@ zp&8~ceazk#Q>Wq0=jQsj&oNuZczSqQ#!Q|x!KL2dGTb=q{poMbcXOXR@?+yi)?em% z({n~glVPUwP?u+%Gpa)m4>i4EdfhnOG;G$a&&>;G&2k>$GH$f1>1CtSyVKvB?PXqI zad~m3^%dT6bGMkiyvJC^PnyI*Z8;k)4;|E6T_zW1vryB~B z>G6m=`+ogZFx+K?(J<50Kim(#`QY(4RslaSIXDk-c}T&!R%}LaC^Lcu4@dCDg5_0T zZ>S^f_L8F<{#VD3=C*#l_UfwtFRL%!x^!%{qm!el*kJsSSbt4=+xMm;vQn(C^6U#k zUK|2u$SK1uDg!=cXwuuhT~r2q%Fv{@eQ!e<^ljfJz3qF4%AjrNZQpJxgSMf!eS4@3 z+J@fty$fZ~w|$%Rwr?+$LEF&VzW1mM+J@ft?V~bi8+zOKK9oV<_HEMJzP`*GK22R-R0eHBZ~KN)8MF<( z?HfjA&^Gk8?{X-EzU|wjw|&E@4BCd?_Kl!2Xd8OlH4LEG3OHp;0C+QwC}aSY11O53<9HY%tL z+QwC}QAuUcHm-_|Dk_7vu~uvxhcedEHr9%bYAS=au~uxHpfYG1YsE$ll|kD$D>hC- z8E0u5XT`=TDucFhR&1Q6GH4rT#YQcaLEG3XHqJm9dubbc#YP>KLEG3XHqKHRw2i%D zqn^s3ZQK(Z=b(&xw2gaW<2;o?+qfq-8mJ7~#yzpoNM+DA0>s7zC?kNj5g<02s0`Xh zfY`W5WzaSP#6~lfLEG3bHZDOK`)M2d#l~eSgSN3>Y_w1rw2l2@qm{~_ZLAj?SD=jb zw2k#*qm9a-ZLAj?SE&rz#(J^QPG!(GE{Kh5P{sw?#s#r)oywqXTo4-_R0eJ1g4pP! zGH4qs#KsLMV+Czvh1j@BWzaTOh>crR25n=7*yy4%XdB1H#%(C$IBny&*tkPw&^C^X zjczJ~wsBl+^iUbJjcsD%E|js2wy{lY^imnLjcsD%9+g4c*d{jms0`Z14Y6?_%D6$> zxFI%tnFq}^-JosU5F36}25sYp*zl(^Xd6q#MgWwtl(w-{Y%HQOXd6q##$qaiwy{)f z1X3Bajl*JN36yb|wsBZ&1W_5Zjl*Iin987S92OfPR0eHhv)EV)Wo)KxY!(~Ks0`Z1 zX0Z`UWzaS@i;XZUgSK%+Y%GT|uFy8Fh>dV6gSK%+Y(!8Qw2dobBa+IXZLAR+E1--u zw2d`lBZ|tPZLAR+E2#|H#u~8^O=ZwF&WMdwP{tYB#u>2@LuJr5&WMfGR0eJ1jM#{! zGH4rn#KsyZV-Ia(kJyN#GH4rn#Ku}GgSN3pY{XL;w2ixBV;z)nm$q?NY$Q+_w2ixB zBazCWZQKkH0#uBlyk;BWzaU3i;WU0gSK%@Y#f9#j?p%biH$>425sY**eInk zXdB1GMj4es+t?~L4nrARX&YO`#t|xmwy{-g9Hla78(YOjIh8@%xF$A^K^fO*8`s1} z1(iYDxF$9#sSMi2HL+1eWzaU(iH+k>#yZ-@IRH&^FGAjnh;HZR4ESsHHM!8~en@87N~PZDXIbIg`8o*rJ7F_R}vFuiPadUyJJv%SmaGH4s;#YP8}LEAVlHae*c z+D5q8xB+E^(>B7z#!V`Nwh=BiZc!Pujc~EiMP<-7D#XTZD5HY5Q6V<&P#Lt13bD~m zWzaS%#6}O5LEFd_8+W0MOxi}K*yyD)Xd9Vg;~tek+sG6feN+Z*aCcMhtBuMr=e;8MKWUv9XfMpl!s6jc6)^wsA^q ztb#I5(Kb$rjTkC}wsA^qtfn$(8>hrZER{jq*d;dBKpDGe8@t3t9F;-a*d;dBQW><3 zU1B4i%AjrB5gY5Ej5`l)xVRbT8J&!VX(mUf8;;052;csX7>=}gDY$BsmSW{!}PjwxM|p|S)ZF1&YI;s!e!iO*Qfia!02HQ ze&#f1!)rE!4gdMSPaK>KhPBoKc5vz5?O>ep^aJPZ>9N4uY}#X8dgpEjCr9jq6TU67 z!%lZ#Hux5Jhwa-UJM1k~2E4=eZIK;z29*Ktuzgp{4tpzeUMhpO(JeOesSMi2 zVzIFg%2-U>SS&UQs0`Z1VzE(3WzaSji;W^GgSK%%Z0v_J4$w9Zh>c<@gSK%%Y#g96 zXd4H_MhTTc+t?^J4ni3lX&W2G#vv+$wy{xclu{YAjg4ZXjLM*GTofCJp^S^Pjf-OA z2$ezGxF|M`QW><3i(;dk%Ajqm6dT8&jFq&Fm13iU%Ajqm6dRRP25n=d*r=j1Xd5TQ z#&Ia)1a0Gl*r=v5Xd5TQ#tABewsAsi)KD3;jU8g+B$Tm(wy{HOoT4&l8#~0tX)1%Z zu|sUsQW><3TVmr3lyQr;aZ7B}Q5m$2TVmrZl|kFMB{u4*4BAGh*f)1t{YvZR4oeXreM`8%M>)MJj`~aa3$HQyH|4En?#m zl(B`ju|;fLrZQ+7Tf{~Sl|kFsA~ss74BEz3v2g{;xJui&DmL1v4BEz3v2m5kplw_g z8|_pEZDXz2xCUjcrERPg8`r4}+QwS3(LrUb8RgSN3kY%GT| zR?s$9h>dV6gSN3kY(!8Qw2c*FBa+IXZ5$UHE1-kH0#$mCsk;hJ;25n=H*vO?aXd8FM#vUl+E^Xtk*vO+YXd8FM#$GCewsBW%425n=L*eInkXd9cvMj4es+qfh)4nrB2Xd9Qr z#t|xmwsA>p9Hla78<)gJIh8@%SS2=&K^d!P8>_@d1(iYDSS2RH&^C68jnh;H zZDXg{sHHM!8@I*A87Sj6ZR57ssG~Ay8@I*ASt^6Jaa(NEQyH|4%_)&DucGMPHc2g8MKXcVxyDFplzHJ8#kbgbFhu32(fXK z$*{i4vo8#JamYwF<2<92(J;;A=mfviD2FK(7rSGR_fIc9v*6X!}PjwxM|p|S)ZF1&YI;s!e!iOSJTTzr+25nH`~j+ zz~b`aOzR80ZB=?Cr%zWVZzi`Cybvo>6OvL9{kK{&W2AMoo+ei z8m!e0F54W9Q=UG?+tXu#x7pO?V7%dA{dKzy#sGuy1EclV9uW6*zoE(Wcp#j8zy2y1 z?lQt?_`uXZ3NFKph98(5oQFL4k7?Hb82aFU&wTv9=NmsYKKwsULrouk_PqD62K+u= zZ;miM9_)zg~OEhYbIR)pz-z z!Ps=rfUdwJ#_wK%bDy{Z=Q1ns$itn@;}r;QyhnzAV|z7gu58;jTap=Se|Yq^OZ$jN zL_htGe0&2IP_qIX2FL+}60;t`7-s8UZ{=e@!zH`Ut;bOC|^I+QJ?B_jh z5bvH(T<@MxyZyY!JwCpFsg>Wp0obhfFQIn(d5`k&wJh1@89KG+gIPO z{VS;5e%|vQzkmP9cO-w?wTquH?Roa|p7%7~e`@Rfr%=27yytz>`@-+o0BpQ3gxc-r zJ@1>|fA;;_e+ISN&wKA6$M>J>K-jGJpEK>f_VeER_mQc@gNsdl090ROKlepXx^CAWylB9NU{k+{s<)r}qOaW# zb`_(4C>G1QUrg28&wbH1-3Ja>-A{5KNY&fVebST8?)z8wk^w_uv*j;=>XYo}KFR3P z-x)NZQ&#!o6MUQcAgbPe?vtLzeXzI>rt0nIKIxn8L%wSxuvzyZRK5M&H#$Aq@|S*B z^tWyKOQHHk`?+uYUf1#ZcMZ#A%U?#-+s}QY&6T|V#?bHD2yC|eP^#X3?i;_?y}JH! z36pgnM%CNTeWcC1`~Jq|-*eqBhw3Bk=RWeA@9)FEXMH$TZ$I~ugS@|w7%&tz?jxvr z`?-&N(lxz)29Zy$kEH7D=e}x?^ZXT0u3rJwSJ}^f)i>Qo4G5MKew!^nimJDt`>H|Q zuN*KGHuWp1di%Mr8pM6{lk20Ydi%N0ewyogtK@8F6;z*XKlj;#T-S?v^16?q>h0$~ z`X|K1Ms<)r}>_Obe4j2lXypq^Ux1alvLC*8nJ$c=)gX%-<=RV}|_56eZ zopPZ2wyT;6RK5M&hYWvoO*ipr>l3MZ`?(JpNisL_H$o0i2LLx*C$i;_H&>1 z&FlG_1_VE8%ijdmr`gYa+92=mQw9u$&6b}+)!WZ~+92*zpIo0x)!WZ~+92-Jo?M?s z)!WZ~>*M?Pn+J3X_isDX-3-;Y+RuIKAnwx#jERl=bgJHd?pq(9=WiL%Iq16ILe<;P zed{3ZGX{)_&AQK^>h0$~c98e?Tc2FN6{?T5pZnNB+-E+yK9j1qpZnOakN4kmo^F=B z$IGJX?dLxBYxm!?K3nRuse1dluYG*H-!@vO4k`?>FVn(O&{FV{QiFLfX+epdV8sQ`?>FVn)Cd8+4A$Ldi%K#e0-k2 z?`hZlKBzv>e(nPYabGZCOl*#L1ysHL+y@SFyf1umeIZqEKlg!y9Pf*sTwg@h+s}Q; zAm{n}pIpBmsxPsh`;y1!`Nab|2R-5yQ}y<9UowdM0|UmyX0LaEs<)r}k`a$)EtL%D z9CY26Q1$k6pZv}H_Xodg7;M)4L8v}?fP33NV>gHSBR5ALzNGClZs5964&&zj?^AC6 z=l>tid+~kJ>5=sU$b&zuKScjjka^A2|NBFoZhXr_ zoz5BT{ZyydY<8URV1HtL=+jp%Uq6AUz3Tt+0I5vt(&<%>PL8Ji2IGgw6Qrzr6ua#u zz>49v70_YP_u(lKSTVx3!U5U$`1(FPwc#oXoNOzYXX5!5JlunV72@JIm~~O`@aPFv zhzs_aZ(xPEV2;VY1+PJ+1%13o5En+Mf_8CUT#R5U z&eJZ=i;HKW3fjeaaq*i_1?}RzxcDuof_4!uE}mm5!f6-b;=&oKpk0KEi|3&V+C{jy zFhLcxiwbe^+e}3T?V>_l{0>w>yQmNszYA5+E-J*u??Dx`i%fCx`%Fb9?IKfLyZ}|u zE;7Z%A3znfi%fCh0#(p1u8WI5WGb%HF0PAG$_b`c~lUW6)W7eV6U&!7t0MUc1{302T84vC9DXDSZSE)I!{zkn)e7l*{fUqThM zi$mh#ub>LrMT)q1iK$4TU8IPMQBVc#B1K&MHB>>nND&vKp$gi?WpVL0OvPo|#bt5v zGE_mkxGXNlKozu$%i_Wns-RuOh>NjIMGWmCMqG@8Drgrm;$l2hLA!_%7Zacg+Qlhx z@d{IMigs~ITug*2Xcwo%#j8*S?c$WUm;_bOE_R8F$xOvA+Qlw$F$JoiUF;GUQ=tml z#V&F28dO2MxFat9mZ|9fL;^R5^NzUqJE($oaYtPIJyb!vxFat90ji)~ED{&5GZl;I zMBJG8IjuCg zXc8Cy3RTc9n#9GwK^3%%C~@)cOhpv!B1&Am2UXB6qQu2?sDgG8B`#(_6|{?Laq&J= zQBAw378f(23fe`rxcC67pj}jpi~oQsXcybX#fMDAcG|^uaq$sULA%&4F8&j$pj~Vi z7av0vw2PbK;=h=Ro3x9Y;^Ie81?}RdxcD(tLA$sqF8&*;pj|8z7qgg(WweWB;^Kdx z3fjdoaWNaJpj|8z7oR{Cw2LF+g8h4w-2Lt&;$kkdE{@PHj))60R6)BqA}&6KDrgt! z;$j|Ckxm~k(#6GmsDgHpE-u`l3fe`wxcCgJpk1_yi_e*gHrhp-xNwImXcukb;tQyP zcF`s-7C;rWi#T!N!BoW2F5<+6CsaYZh!YoILKU=&IC0?xRnRW##DzCgQAfL|6Bib! zf_70SE_|R0+C`nX_zJ3^UF3<2pD-19w2M4(@l&XRc9AD87D5%Yi#&1hGpK@g(JL-~ z&Q$c$E_%hqFQ5wAMX$K{B~(GX=oJ^gf+}bi{^H`-Ooe~Hi(=~e7yd@u2fp}1kB;!~ zhrwkGx6SaQGWun386#{n{Hct78dye=lWj%-lu<<6C=wfss0`Xhk=R&FWzaT?#6}>M zLEA_Y8%v;!B-%!j*a)IBXd6jlBbdsdZ6t|}5GsSV(I_^SLK%&;jYhGtjLM*GG>VN- zDucGsC^o{V4BAGd*jNr_MA9}Q#YQ-lLEDHF8xd3nZ6i`_L{b^FjViIR0?MeOZB&Vk zC@O=tQ6)B3QW><3DzOnwWzaUV#l|WqBb&C7EjD7P4BAGv*jPb)lgSHVO zHj=0e+D4h!SPx~C(KgD&#s(^bwoxWFHc}b1jWV&3Ol8nE(!|ClC?k!wktQ}$s0`Xh zn%GFCGH4rVVk3>ppl!5@jm=O-D{Z4yY@|~ew2fA=v4zT@ZM2Gw3@U@R5i2&fLK(5N zjaadfNoCMBV#P)ll|kEx6&u-925qBOY;1!vYH1s_Vq-g%LEESm8#|~B+D5I|$e}W5 z8@Xa*CzO#(+sG9gyQmD>My}Y{O=ZwFa>Yh2l|kF+5gU7;j2_xXkJ!kgGH4q;Vq-6r zLEGpN8~IcQZ6i=@?1M4_X&ZrJqkzhwZ3K#qLMnr{5hyl_s0`XhiP+c=Wt7l1O2kGn zl|kDm5gP}n4BAGC*eIbgXdB65;~eVH zP)0LtqgiYmp)zP2&0^yyl|kER78~VM25lo+Y#f6!qG=n^Vxxk}plw8pjY=wmwh=8h zs;CUwMvd4w4rSEPHfqF1HI+fzs1X|{s0`Xhjo7H6GH4q)V&f!~kwe?a5gVtd4BAGH z*f>pP&^B_!MlF>=+vpM-XP}HO+D4bysG~Ay8(m`KER{jq=n@an^jdHQkOl8nE zGQ`FuC?kWmks&rNQyH|446)HdWzaS<#6~NXLEC5-8&{x=cG^a}*l43NXdCTf<0_Rw z+h`XX?NkPBBVKG=gEHc28}VY}I+a1&h!-0jR0eG$UTkzy8MKXhv2g>+sHbhzi;bI9 z25qBWY}}$UXdCrnql?O*ZRCrM+fYV6Z6jZ7+@UgP8~I|Ro64YVkKC#hDWzaVI#Kt`;gSOEpHu|Ux+J>LlxDRFc(Kh_VhOYzj7!g0(hM(B*qcUh4 zeqzI)%Ajo&ij4p$qmZ^yC^i;R8MKW;v9XxSpluY2jX)}cwvi|{mOvSaw2efu5kzIs zHWI}~FqJ{uNE90(R0eIML2N9AG8$+b4Ps*%l|kER5F4RX25qB3Y=lu6w2cU{u^h^X zplw8mjc_W1whVk4T$ zplxJ{ja5)a7HuO-Y{XC*w2dsWv6{-DZDfg!SSo|I(IGb0Kp7phjSjI9M`h49I>g3W zDucGsAvWTv4BAGp*jNW;1k*Nx#YO^^LE8ux8;Mi~Z6jE0BvBc(jZ(3(9?B@CZIp_Q z4O9kgqf~5cq%vq5rD7wQ%Ajqeij7TBMk;M1RcxeC8MKX5v5`t;&^A)VMjDkt+h`FR zo1u)Bhc-sK8Rr?DjD~3@M<@8DMj575Tfb&!3>o5NFugQ(o`=UgceCr< z*_PSf77s78>sRAl-+uJX*cm=c zUgiZBmltPRU+x_@cZ=D}dyHlL#Hmv!O_@A#^0)~TroK91{G>^*j2`yjXHIiAyk@^) z|Ek40(he@|RgT6fPd|d*o*oOl&8AH2(&<%>PL8Ji2IGgu4~*8~88D5Es7zRnRU9#Klmkf_70LE}nrZXcr0MVi;4AK)Xl~7fw(G?IJ;342LRc z7YX9R2vyK7&Wno?OvQQH#d&e@EL1_eI4>@K6RMzHoEI0r1y#^4!o|gNOhq{DB3xWJ zLlv}(aB=ZGR6)B47Z)a|f_70ME`FP-sGwa`h>PEWDrgrK;^KFq3fe`5xcEJ&f_9N9 zE`Fb>$fR9lii;PZ3fe`cxcCF8f_9N9E?l4r+QoHo@rO*sb=t*saq&k`1?}RxxcFnJ zf_8CTT>J@CLAwYN7k|oB1ko;n#KntH1??h8T>KeSLAwYN7bBqx+QlJp@#jp%A=KSOLAyv17cVgtDYT0eaWM+2pk1Vhi@$~{XcsBsVl-4i zySOYa{)VZzOuM)&E?$NzXcw2o#Tcl9c5zu;xIz`Qix_b+mZ^xLUBrlsaZm;AB1T+{ zhbm|nG2&tZR6)BqB`#iJDo)WZPKk?&PzCMcl(={ms-Rt*5*L%83fjdkaWR>x*hRb8 zB`&5w6|{?8;$kXPLA%%`E?$EwXcu?H#osa&cW4)P#Kqr16|{>x;^Ob23fjdTaq$mO z1?^&yxOknZSVX&6Brg6Ds-Rsg5*Kek6|{>*;^IxHf_70XF5Y4)ifI?c;^J+nf_70X zF8&Fspj{M;i+7+3+QkNO@y|@f2HM32aq%xu1?^&kxR?f2&@MKJiyuH0w2LNj@k6Gf ziFVN>F5ZPIXctZ5;$NW(+C`JN_&2D6b`d2m{++3aqFqFZi}#=k+C`MOm=0CYE~3Q6 z45)&3Q7tasXDX^`7uDioCR9PYs1_F=Kozu$YH{%&PzCK`ySVs}sn|}t*e)(Uf+}bi z+r`CyLKU=&?c(BNsDgHJQ(XKPQ*o1aaZ_CU2&$l6+!Pl-hALZ z;^I@Nf_9NEF6J>6>9mV+qSIC0?#RnRWt#Ko6T1??hETzEkhw2L}%;muUk z(Jt!5g$1ghUDSySAE<(MQ710Gf+}bidE(+HOhq2;B2QfW6sn+IHj2c?A}WKnQ6x4NQyH|4 zBC!!jWzaT~#KsaRBZ;<=BsPMm4BAGL*a)UFXd6jlBZSJJZ8VCFrBFsAZKF|aETb}L z8;xQkl**uOG>VNdDucEWDK?fv8IiP&NU;%4WzaSv#YO~`LEDHF8EjCtD8MKXTu@OsU z&^9{7#u_N2leW<*HsYuZ+D50?SW9KlHaf*dJe5J)2oW3Wpo|dOMu^x*pfYG1Az~wu z%Ajq8h>avFgSJs7Hr7KKWweblv9W>5ply_ijg3?WZKF(VBvTo*jWn^b3Cc*LZKR2f z6e@$ZktQ}$sSMgin%GFAGH4sEVq-Iu(MsEB6&vYP25qBNY;2)2XdA6!BZJDIZN!R= ztx!fRZ6j7}WKtQljaadfMP<-7V#P)_l|kF66&u^2j9S`8t=QO3WzaTi#l{XQgSJsC zHgc#8+D5L}*a>Ci(l&C%#x5#@wvj6~c2gO&ja;#jOJ&eDdc?*aD5HnA(IYnUs0`Xh zkJ#8tWzaTy#6~`qLE8uv8~dP)K-xy2*eIYfXd8iIqmas=Z3K#qA}WKnQ6e_>Lm4Hs zjS{g@Ol8nEO2oziDucFBA~s5>4BAGr*fT|d zDucF>BQ{P`8MKWYu~AE9&^Efn#u+H1i?-1vHtMJh+D4byI7?;FHoC+{J(WS*2ooFU zpo}ouMwr+*Pi4?H!o)@cl|kDG6B~_G25qBUY+Qgc%4r+rVxx)5ply_kjf+$UZKGUl zG*cP0jSR7I3ChTzZDfdz%Txw!BSUPoP#Lt146)HlWzaU-#l{sVqn)3+lUt%*Px7e+D5$CxK3rzHsZxb2bDqFh!-23R0eIMUToZeGU{m? z^<3KCy9+%AjrZiH$xggSO!(Hts_iezXlgvEl2$JVwNiw&5o> z{HP4thM(B*r!r_8g<>NB$|$656pD>SR0eIMP;4xwGH4rxVk3~splu|IjU`YAqp^OIFMuXT`MrF`88pK8@l|kER5F24s25lok zY%GT|B4`^CVk4Z&plw8mjR-1(wh18My1$T0cBLuHY&wN6qP~Ss1zG3sSMgi zrPzq3GH4rFVq+DQkwx3c5*sm825lotY^6b8NsxTV6l-vWzaT)#YQ5PLE8ux8%b0KZKG6dtcNm6 zX&a?tV*`~z+b9(q8>tN1Myc3HrZQ+7sbXUjl#xo?NEI6?R0eG$RcxeE8MKX5v5`h) z&^B7c#%3s^<)Mv{ZpL{=C!=AS$kM~tOjGF$bVaO0CgXyKQ^E^D} zxtm?*&bG|E`V^$K#7{ zUHs8PGt6K5n7u8gUc;Nu&Gm7gW44U(^zgEbnLKHNi}kZsBaFjM!)DF;+`MqsESKTN zVee0WYrdQN+>sv}KeGO2&zqhzI+_eKork(S?E z-RbYm_A)Q9xV$*i`f~5Mxm(O$-eWA|Cr+I@Y0Bh@lgCY%F!j|5<0nmeW%RHIKXaP1 z;WhgW`#Y`Hk#=y&uXZ#}dHNCb_VifbZ8rH@m+n38=;UbHZ!msn{J>}(o&iJpbR%Ih zJsxmp-><)p7+vNYKQ#_}uq`?`&1I$-9qHedauvgEE6h+u|DKeq7-3uSDOAzFBgHDx zoop-SF%{|k=NMdtxR?)B&@R%&g&R~syGR!opFtJ0i#BobIaAR_yJ!;^?ob8oqD@?U z0aeg0+Qh{IsDgG8CoVjgia6RuoVf6WDrgsR;^IrFf_4!nF1(-$+C`nX@MbFNXcu+j z!U9#$F6zXE4^%P4;@b7m~OkJn-mwR5m&_`PR z`(bbya?i_;%IKHDWyn1*e=4J&29{AI_q+n2j3U}bk=R&7WzaT?#KvMOgSJs5HUg;( z+D4MtSOR4v(KeFAMi7-j+ei`{!BhrqBS~z8P#Lt1MzOIJ%4no*G>VO7R0eIMQEY@# z8MKW?u@OdP&^98)#&RellC}{kHo~b4+D4?CN?%f8ELeQG_jFFWzaU# z#6~KWLEA_Z8);MqZKG9eY=$ygX&bF#Bb~~iZM2GwEmQ_=qg8BVP#Lt1Sh2Ac%7~?H z#EOkfDucEWD>kyI4BAGl*vO_bXdAU+V;huFOWUXw8{4T2+D5I|*g<8`HfqI24wXUM z$Q2tqp^RMGMy}Y{MP<-7a>d4ODucF>D>ibe4BAGI*w_PQ^w2hX#6}*KLEGpN8+)k? z+D4Dq$fq)B8-ZeDACwVD+XxgJ1ylxYBT#G<3K(SFoWzaTC#KwLoqlC6mA~uSt z4BAGC*f>CC&^AiMMhTTc+ej802ce8)+D5Y2I7DU8Hj>3gDV0InNERDqR0eIMS!^7J zGMZ@{&0^ySl|kER78^&Y4BAGs*eItmXdBUD;~10?P1}eT8x>RrZ6jK2R8kqVjcBn^ zMP<-7YQ)BID5Hk9Q6o00sSMgijo3IrWzaTi#6}I3LEFd?8z-TR9NI>X*f>RH&^B_! z#%U^pwvi(?YN-s`Mwi$)17&p4HoC+{9hE`b=n@-esSMgim)NMMGH4rNV&fc?5k}hx z6C3BL4BAGR*l3_KXd7W-qmjy>ZIp|R3s6QmZKGUlG*KC}jdHPZk; zAvP{S85y*V46$*U%Ajpzh>aF1gSL?&Hd?6++D5zBxB_Lg(>B`0MjMqu+h`XXSE&rz zM!VQ(r!r_8@nYi|lo3zch!-2zsSMgiyx8cVGH4s|VxyDFpl#HPjT=x#J#C|2Y}}+W zXdCrn;}(@c+o%^CT~r2bBVTOXhBESL8~I}64wXUM$QK*kR0eG$Uu^VH8MKW)v2hp5 z=%a1)iH%+=gSOEpHttaww2eNo(MM&_HvGiKeJI0^w&5o>e4)pK`_VT1#D*W0LEG>X z8~#)VZKF_Z1V9;uw2eZsv53l`Z4`=)#Z(4uqfl%FQW><3M6t01%1ESbB#Mn7DucF> zC^mwr4BAGb*a)FAXd4Y;V=0u;K-*{#8_TE++D3!e2&FP;8x3M3jLM*GM2L;$P(}o8 zBSLJ1QyH|42(b}CWzaSv#6~2QLEESl8!MoUO4>%H*odMsXd9JcVbWZgSOEjHr7%Z zw2cn25l>~%HiE^*Iw&KUwh=5g5~vK?MzGjOq%vq5!D1tc%AjqOijDP9Mk#HhRBUXZ zGH4s6Vq+tfLE9)58_84#Z6j4|Y=SaUX&b3xBZbPKZKR5gR4RkEkt#OQs0`Xhi`du< zWwg*XTEs>=l|kER5gS{m4BAGE*vOzVXdA1=##Sg}HEmdaw>ziu~KXtgECgqHdcy_3MzxPu~KYQQW><3m13ic%AjqW5F5v#j1#nt z6Jn#9%AjqW5F00`4BEyCu~9>1&^C66jgwHu4%)^Jv2lvZpl$3B8>guZ+QtsCQA=ge zHg1WHGf>7Y+Qu!hQAcIaHg1WHvs4CcHae*c+QwP2aRbUYOWQarHf~ZGw2iZ3;}(@c z+c+yWx~L4=#$K^;8_L*A+t@2M?ob)DjlE){o64YV>=he5R0eJ1p4hkxW!$4}+!GtU zR0eJ1p4hlYWzaV6iH$xggSHVMHts_i0kn+(vEl1z`tY;oy?-^=$9<03GRD)x%Q9y2 zqzPVcjxagBJN>=cUgiaskuT0Pdt2UdbGMkiyvJC^Pn`*iYNoFE$oY8MKZ4 zVq-CtLEG3bHUg;(+QxdZu>{IkPuo~8HiD=O+QxdZ5lm&!Hr9)c5GsSVaY1Y>g)%PC zHZF*bWmE=j5+5PzCKGPF#3F6|{>wapBEW)X^^L#DxW_pk3673m>S0c2OrTzJe-f z7ypm9dyk9q%5dxM?J65)bJi&0*8JP1~55CQZ9*-8gG(jJSYl z?Qs+l5fKqlQ4tkUQ4tYQ5%Gkmh^PpNsCWWYR8&M%RLpOpID5YbhLPX*@w)O?eCRm7 zuKRx8_dWOUnb{^T{++4VM!VQ1F8%|mpj~Ve7Zafh+Ql|;@t;ry?c#yB_%Ej70qx>} zxcG0Vf_CvhT>KAILA!V$F8&v)pj}KB7yrjpOm20thq_NYS>9xG`av13Ft`kPlWh`} z(JF(>kT=;TQyHx^u#DaECR;d^v75HBTWn0BGH4sS#YO~`LEG3ZHl|V;w2ftABNED3 zM%!2>Hl|S-w2ftABZ|tPZ7dTT)2R&F#(A+34P~6CZJZYyGpG#O#(A+3LuJr5&Wnwi zR0eHhw%CY;GG@~@W{ZtkR0eHhw%CZHGH4sK#l~zZgSK&0Y{WwuM`;^J#l{>egSK&0 zY$Q+_w2h-;V=k3J+t?sB=0O=7Xd4^E#(XM+wy{BMBvKi)jSXUB0hK}9xG6RkLK!z{ z8#l$qA}WKnaZ_w0Q5m$2n_^=zl|kE>E;f>(jOnzE>0)CEl|kE>E;g1@8MKY*Vq+PV zLEAVWHd3IB1GJ3;Vq-a#LEAVWHd3h!+QtE~v4YB=ZLAg>E1`_lw2jqbV-=M_+gL3& z(x?pD#%i&#n#!PUToxN^pp473jmu(VEtNsrxGXl(sSMi2WwEi2%Ajp55E~g##sb>L z0<33u2>~%Ajpr5F3Z64BEzAv2hs6m`mH3D>jZ$8MKYLVxxr0 zpl!?*8%L=O+Qu=lQ3_=oqiq}$8)Z}mZR42OD5o-L8^^@PF)D+$u}N%HKpC588=J(& zaVmqhu}N%HQW><3O=9B&l|kFMEjCU<8MkR0x5dUODucFhTWnNO8MKYtV&gQGLED%q zHqJm9Gie($#l~4GgSIhKY@DMqXd5%d#(64(wsA;oR6`kuXd8#bMh%rg+c+dPYN-s` z#v!qBfy$t5tP>j-p^SC3jdfz<5|u&QSSL2>s0`Z1I0jK#E##bVr#zV33fXbk4JQN!(R0eJ1q1bpxWzaSziH+}|j7hYO zNn*pf{a25lo#Y|Mi)GHDx`Vq-p)LEFd_8;Mi~Z6i}`ETA%I z8x3M(A(YWT+h`CQi>M6RMuXT$qB3Y34Ps+4l|kEx5*x`-Migx$N^C5lGH4r7Vq+y*U+b9$psZ<7Sqfl(DpfYG1X<}m~l#xc;ND~{Y zs0`Xhn%GFAGH4rVVq-OxLEESk8*8A9I@(5^*jP(t&^GGCMmm*2+o%&8>!=LcMxxlr zfHD$k8;N3LJ(WS*NE92HR0eG$QEY6WGH4r>Vj~O6sHAOFij9p_25qBKY-Cdzw2ex! zv5Cr{ZRCiJ%}_=TZ6il)Y@sq}8#!VlhsvOB;D8plw8mjh$2mZ6iW#?4mMg8xdk-Hg8e25lo%Z0v(FQfV8hVq-s*LEA_b8--K`Z6j4|9H262 z8?|Di2+F9XZPbd5gH#4>qgHGbQyH|4TCs76%Ajo|h>gQgMgnakL2MkMGH4qKVxxr0 zplu|GjiXcsZKGUlltLNhw2gAHQATCZHp<0DIh8@%C>I;Ys0`Xhw%DkEGO}qK*<#~3 zl|kFc78{jR25lo-Y@DDnXd6vp<0O>PMB8W*8>grY+D4PusG>4x8%<*4G?hWyh!GoS zpo|#WMvT}vOJ&eDV#LNdDucEWBR0-c8MKXJu~7|W6w@|}#YPR4LE9)68?{shZKGIh zT%a;&8|h->B9xI%+ejB1m#7TdM!MLjqcUh4>0;wDl|kF67aLcgjC$Hez1X-)WzaV2 z#YR1qLEESo8`r1|+D4MtxDI6`(KeFA#tkZiwvi+@8mJ7~Mv~aLNoCMBs>DVkluY~neX2Sr)`9b zjY(7nZ6jQ4Or|nu8~I`*9LmV2ZRCrMDO3h+BVTMpP#Lt1e6cZ=%Ajqeh>b`nBZan+ zA~vQ`8MKWQu@OaO&^A)U#&jx!woxrMqM?jx+D5h5m_cRGHmb!&43$CKs1_SDsSMgi zyx53^GU6ZG2>i=-}YdV;w=GMuv?H4GRu&1by2-==~?ZcC&S+RYRuSCy}z)1?)sO#Xn(=xZ}%JC^Vz`X zdaf&rDSFoaj{R-RLwfcI?DtBL{Z(7Qhr>P^8R8fh7T9aJ>*+oiGd9c-652Pc{~JSw z3>-Y@jY0he3>fm}fc^sqzWz$L@BhxBJ^kJ?xA5QAxmLPs;I*Uvw!xiW!O-!+<3b(w z^{z*+J?bCeZ_oF$eP;XA=2{+)CGFfw*zN8G@9BK}pUt}8cVYH>XX{~`tLSd6fW8;h z*?QFGDtcHept~#1)`K=yu`|G00UfhGzB#ZGbYEL1_e*eNcagDPkj zOT0Vu`p2fGTJgOTKVOv7UCZUR=BcRnRWhi;LfeDrgt$#YG@g zLA$siE`EopxIw$PAufIws-Rum5Es7(RnRVOh>PEcDrgte#KjMoifOcqY2u<6R6)C# zCNBN}s-Rs=6BoUq3fjefaq)*t#eUkwesS?fPzCK`zqt5gsDgH}UtIhNR6)B~B`#iO zDpt`hR*8!~PzCK`mALpJR6)B~B`#ipDrgs%#KoU76_;oim&C=ZPzCMclDOy#RnRUj ziHjhpf_5=qT)f6q%%@$<7Z?4Y3fjedanT>Dpk2%t7XzRQ+Qo5k@j6p+oOW?sT)Y8Q z&@PUPi#MSP+Qo5kF%YVtU2G8-gP4jfw2Lj`VlY%eyVxQwhCmgxi!I{fEvSNaaaUaY z8B=kWc5zo+{5e!XySOVZ{sO9?UECEHe+gC4E~bc!x0#A5w2LX?;;*0z+Qk%c@eWi$ zyO<&_-i0b?7kk9TdrZY1+QlAm@jg^RyVxTxegsv}F7}9v51|!=MV<#T;?*F;qdjC=(Z-FcoFAi!yOB9IBvQl!=Q^ zp$ghXnYj2zsDgH}QCxh+RBWVOY!nxtLlv}(jpE{;pbFZ>Mse{4R6)DAB`*G%sklYE zxFs(B1*)K3+!7Z*fhuShx5ULyp$gi?3~@1nshB~#m?19y6{?_J%n%nNp$gi?3~})@ zsDgHJP+YK|#K(QE+CgzKirE$iX%`2@g#)UfT^tk_KZh!47i-1EXr^K3#fv2u}EBmKozu$Q{p0&sW?TuI3+H^pbFZ>DRD6Ys-Rt* z5*Ob>6|{?O;^N<#ify!uZQ|lTpbFZ>HgPc#s-RtL6BqvpRnRUTh>QPXDjv`-9*B$o zhALknb&;Ol7pvz%q8r_ZEdi8M|p4yT!&7DucGMTWmy78MKYvVq+?mLEBg+HX@;n zWwecDVq+SWLEBg+HlnBu+Qu@mF`dewZJZYy(NM;D+QxaYF@wsWZJZYyF;oU^Tfwr+hY|N)JXd4^EMk19#+t?sB7El?qjhkX)A(U~G zwsBKzETS@K8#l#95|u&QxG6RkQyH|4>0%=p%9u{um@YP!P#Lt1>0)Ckl|kE>E;g1? z8MKW9Vj~60I6&JtAU2j$8MKW9Vk4Expluuw8!M;`+Qw?Ju@cHyP1{&4Hdav?w2jqb zBaOtEmjy#$~aw2Fkch+qf(?)>0X?jmu&qoywqXToxPas0`Z10ZeX@25sYn*vO_bXd5TQ#wIF* zwy{-gY=$zn(l)k=jV)9LZDXt0$e}W58(YQ3Rw{$GaZhaILK*jH8~4PDmHdN8B=K+Q^m$kDucE$Rc!2{GH4r9#l~(bgSN3(Y~({3dubbc z#l{{ggSN3(Y!px#w2i%DV=t9K+gKqs_CXmdXd5fU#(pY;wy{EN6jB+qjTK_!0F^=8 zxF9x)po|N&jSFJqAeBMexF9x)sSMi21+j66%Ajq`6&r`4jJdRpxnko8l|kE>D>h1~ z4BEzAv2m2jpluuz8>LXjG1|s4u~9~4&^C^VjdCi3wsA~s9HTO58=J&N1(dOgwy{ZU z9H%m98=J&NC6z(j*d#ViP#Lt1+hXG+lyRH3aa(MhqB3Y3x5Y*kl|kFMEjCV58MKX= zV&e>yF_X42Q*4~2GH4q!#l|@*gSIhKY@DYuXd8#bMm3aih_-P^Y}8O0w2ebzqn65` zZ5$FC7pM%{#yYWa5z1Ic+gK+yE>Rh@jdfz9j>@2ItP>lTsSMi2HL-C8%D6_`xF$BP zQW><3Yht6G%Ajpr6C2m44BEzGv2h*BSWMekEH-XX8MKYXVxxh|plvJ`8#k#8+QwR0eJ1q1d<&Wjv&9JQN!bs0`Z1L$T39WzaSrij9X<25n=K*!T|0m_*x{BsQE~ znXeI#8DZvjX1F}o64YVl!%RZD5He7Q6e_x zP#Lt160wm$WzaTC#Kv4IgSL?=Hs(PYnY4{eu`!>@plxJ|jYKMgwvj0|7El?qjRvu? z5XxwvZ8V6DMN|fDqd{yWQ5m$22C=c2%AjpTiH&3^BZ{^WB{r5&8MKWkv9XlOplw8n zjb&5@ZKF_Zq(B*kw2eZsv7E}FZ4`=)R4RkEQ7AT6P#Lt1G_kP~%1EPaq=}7HR0eG$ zO>Cr58MKWwv9X%Upl#HNjWtk4-D4ZQ$Jj>O0&IRm?fwDqQ}yv1e5AH{LFvP?u6>4n z?boGCfS>*4*G2~ij~?p?8Z|O(WN27$h$HCR{z30Q`NeA=PZ&3Bf+J*NU|v_-ps=~_POg{_M-g-o4?&}c+Y18pX<4_ETv%YQ;jX9qV9eMsM@VSju>Nlh88UG2pf?8f z8!%wVn*;g}9QgVx-M;@jhxYV)%iOa6%s*g`?caXr*Cn89;Q6D^*amlg8AHbhj|+9! zm%ARl z<@^5``sjaLzdhXj+h5s!ZhQPY0bQ6MN^(9v`1aZvd-Tz+OSX4^Qt9{oBK)$l{S~pa zLqAvMe0;<<(%z%v?cP;a#L^BOADxd6^CsGN20YpB{#xmCyZbZy{LaVc1|#i0yg#EW zV%hG`K<(zd@9f_3KL4!O_Gh7XbKZCQ@cx|F_UE8>bKaM@y?4_}pWeGM?MuvgUt)W5 ztPAL5djQmK&ifMg`TY_6PMqJnL+$3gFY)31(P`k9D`Gj$+n{!H-p@YT)xBTSK3bY~ z+j}tWXU%y(>*M_XydDKC+x>Z{-JJKcKF;sI;!y|izNP)wp>}iLkNEKZ8@dpd_TPZo z&3QlK!~1V~ZU0TE-JJLJKCb6~%WL~@G41Qkd0+2-{r-|x3Ul{ed&@rm64Y+a`+E25 z_uuYh`)@<-=De?WU(W}2vON%LH|PCEXU^}xV?DqB4%2?aoc9}U@4ws0?f$z^yE*SS z9(k8*o8RkT5iG~L--FuCdB5?1 zeZM*H``zCEsFUq~1ht#pFCarpFr*AysvV5f4PI? z_KtNgGwrL)d0*w@{(K)@5z9W`2WmIxeU}iLS9!e2+}iLw>&z(|MCZ=Wh=XrTd!?~SkG}opmuZKx3s-q_f`ix zfW`Y;P`f$rciq?Xf7U^=dk3(z{~6PM*PQpeZts8I$@V{o+Rb^t+xGna7ac5uWxM|c z)NaoET_5N7ztn}WwErd4ZqEA@AJ_A5du@N4X`f=w`xGD7^MB>F{jZ>QbKa-4_5P0L z${F9X&%Xn;oAW-!hxd1NAuR3hLha_f@A2XNJ+JNWG3|TIdEev1`}-oR#Ai2Hk`M+k`mz(px+ro!_vJpk57ia1oa2T+^Iqfj z{$VHEKZM%Nd9U%|{qJ-|EZ+YPYB%S-#{KiOf3KB3_xZnv+Rb^NypmuZK=eWIptd%~!e+;#o^Iqom{z)g>KVjO- z%y}<+SZq9pI+xvB&cCZ6jj&+|x?dH6f`M93{M_mX@`#(bM=Dct8 zalh^}ukD{P?HkQ`-{|A~{<+uo&!Kj6-Z!@O{!f}KCp*hN|4&f6Iqw_W-mm-O>Ds@5 z+Rb^t<-_|w>q1zz`#&@7x6FCJ<-_~Gcy0d|sNJ0RTaT{ipYqz~C)R75pFr*Ayx(f; z{inLAy&da*3bmW_KEsFi5xNi-?<1J@8RoptaNn={SFM!I-QK;*e}&r3d7sf4?<1|= zM?&r9ywC7)J^wS^8!Y?$&!Bd5-VZ){{VwxyUuv%G{Fe4Fnf8O`ydU&&ejlX^VQC)) zwVU&P(8v8chu3xo)NaoELHGT-pKGP;>g^rpe-5>q^S;){`F*smh-IH2&9tvI=Y6dY z?_YUs{|ah1=Y6dY?_<2SkAd3Fd0*@H{%>08)BC?c?dH5+_2K<%T@j1-ubK9%=Dc5h zbUiQEHe)qcb^uHJSg75c_p3g~7s|AsGUxr&Bkxank2=hH&ngURH|PD75APFnZ?Nq16QFi;-cR|s zU-zxo_HUtfbKbYLJ-`3E=6XBU{X5gX&7Ajb?)&rq(Lr*1$GZQ3+Rb_2=EM6$T@j1- ziBP*a@7vm*-~Ur{z3ub=3ALN^{=j{H|E~^`+uP^=i)nvg&iex&*Yp3aD`MH_{~KyI z=lwz3pSSrR&Goj={}0q|&iex&=lB2Bg|KY*|ApGkd7td|{(o92o4Y;l|HrgX_Vj+= z&o+hm{`bk2`*i`|f91P>PiIHphCaqN+V=R*1vsH^L7(g?9<29vhHqVpNghjKsh>pE zdzuI9y_s*VpX|MUGF9)Xo~z&O?Jj+5eYp4faHxK_IrqE0o#$KYr+BZQLe-mdzuSlV z2=Da~RJ}R(yM4Hy>b-s{Rd3GyG9T_Ez1K%V^~=n;U*^O8H1GA(sCskmm-%oX<-ItTMnN3dM+TCR4cQ}yQDpZDQD+G8m!_0drMd2{a1`*1(Qd;JWm-kkgM z-2+>9_!y5)+2P$U@hsauhN?H`{=AR#{Y;NFvDD9`>dm>I{m8wqcdS}>k66DzOMNU< zKii!9**@IQ^1kh7QT68B&$isrYu&H<|ux$G{s@|OY*=^7Fv!AklHdSxV{ZSw1 z`*@F~ux$HysQ#!q_ea~_*PG)J>}}7VL)Dvef3&Upgr}@epz6)JKkDOrKi6X^EPMW3 zs@|OY4FONC_2zkW$`0S&wcb3aeuFvp8y=nSpYm#FzFh6hr|QkQ-_X{5Vn;iHWzSEf z>dm>|(BsLTzo4V&_V)Y*RJ}R(H{I?RcD8;YRDaW)`ZhA?Ki!A>CEn|o zQ1#~APxo;@f2sHSrBuB+_tV|y`(+-TK973KsCskm5BPAO;;|+c_bE{Q0dwvT_;A16 zd;M~%-kkdbZLjlFJ%WXRW!tAx_2%3kaJyg8QFMFP`75Y;bM9An=6?Q4x!PF?)vq?^ zezon%YrIvR-S(@fdUNhq`#9gHd8~=WeHvA7&i!g1_w`nLuU}2on{$8J{rY~5N2eU> z+dJy5f$A@tbAQ>#`F^d(nppPywN$-1_m>~Jf67<5>2l0Zr|QkQzwAEWuk+Z-KKJ}} zRJ}R(3w&JXXLziMWzWxm>KB-EzrcOIU+>ZBbK9?{>dm=d(Dph%v!i9OZ2L^A-kkdd zKF;?WJeI;zzk#YZ=l+EIe4pjf>2uG|g6dD0bAQ6^eq(3rH&XTH+@EmY&(H2`eKu8Z z&i#qDKew}~qh+wT-$d1$bHCLxZQuGcTbrMdm>| z+L`P89I4Nt>dm>|+L`z9wo3h0s@|OYdp@r7b3G0MmZM%SRDaK$`+M&5{WgzIpI1BE zsCskm@AF2xdb>M{ZtsrvZmQm#`@L@W`JJuL zhwAs5bHBIk&*$&yXc;Vf{vN8{ocq1**Y^b-MYp%-7f|))-0yX}-`m;xy;Qw9_bYr{ z=kN1a6U&~z52{~b&ixAab^d;jPC3!Fx99Jt>dm=d;dWox+4@4N-kkdtKCbf*c&v$K z&p$xbn{$7`hx;P$^+izq1#|8%__)qL=)L|RRd3Gy1s|{Pi@nzuQ}yQDUub(j|By$p z9Q-Zr4^j2z+|O-$KmYJk)*puI=bCdr*N6Kf9!p`__D86CbMEK*a9`rRzJ#hb=YDQm z_eVW~y}3V1)thsF%*Xk@)MF_udwwZYf6ScwWA5wxGLKH#nou8P3GKha=Sn7(dpCuajM>&`%OO1 z_mv)NV%hU6sd{tnH~DaX!h8J*s@|OY+wS}MCp|iS?)fL7`rGE*-*&q{)!F(}RJ}R( zw|%&;@>mm#`zos5ocr58+@JPdf10W{=YHm+^Zir)#M&9T);j~$&ot+LrrZ5lkFD(U zsCSmCH|Kt4+xPF!b+il?_vfg3bM9yQaDU!oDJ=Epsd{tn4|T?UwYaZ_>JOQ7f5`2= z#$zk{bYDZ&n{$82{XSl8XX|UJdUNg%wY{%*p`&H6xW7Qvn{&U;$Lsrx9!p`VzX;W@ zGv|JtkNf$Tyw_i%>dm=d=i@rR&U<|wRd3GyIv?lz%iilRQ}yQDUvs~{zv9v9^Ne=| zs=sE={WbUb{%U9IuTu5q++S;ZonPP4GFXoJ^;Eq%_t)G%uXnAZ==R>rxklBSbHCW_ z{(5KYuS4~V&ADIf<9vU^V@)i3{tc?$ocqN-&i4)8>l>(gbM6rvqv-aIdW}&1X>;yRKXU(+cQtRx-HlsRy*c-%-LLPPI=k(gsCskmPy4ve zzwNOm7WcQQdUNi#`*?kS$9w%9sD8US_uJdf<=piM_IAv_OVyimzum|AzS(0bEPH-4 zRd3GycK3DuJ&#V=2uF-q3X@Kf9T_U|IlMiEPMV#s@|OYNj}_v=e_3$MbZ_fQB_xXNuXX__Z_2%5~YCD$`-qA8x_WW?DewR7-yL`Bx;;|H#`YBYs zIrqCBUFZMusMfyr)=yuH=xY5=)Cj8HocmpF_ftK#a{KFG+4fVZdUNiV`Z(W5daQ}1 zJ`$>5YR>&qAMU4lub)QMn{&Uk?fZC99>GGuvhAa&dUNiV`nb-Y?y(e>`sq}?Irry$ zxR3T;9}U%?Gw1%C5BD>?*UzBp&AC74zRr*F=oIel-Pwwv>dm=7=fnL>k2SHlpGno5 zb06o!eXRHTSg1bEocp-8_w#3Y1PcMnwx31Sn{yx6_Iw}rl=X2`y*c-BkK8}ytDM>L zDrYuTZ_a&*5BKpNJA!4;kB90@%(*XV>wb<$us8Q}sCskmOMJYKm*BA!mTjLv)thr) z;(mQU*P~N5d3#qobE$fB?lXP3pXaeAmTf-|s?Rj%KGVl_{(SHC^Qn4s?lXP3PxM}& zNY$HjpV^u7{Q^1PFQDqpxo>EDKYyXe=9a^s#r;C4zQLUP1|RMhc`Svcei2n~&V7UX z^?i~@r)=`}&Ui^wy*c*{K3?B1_E;0kwqH!un{yxK<9wg&y*?SLk22>z>XG}We2uq6 zj`>TddUNih-0qipY-OMBms0iS+(-FvzszGzEbf<4_2%3c`f#7(y*>r1FEr=AuruzL zi~HqNy*c-VZSU))dTeev09f|?RI1*b`$G5Y`xPBUw|BL(f~q&?KCSI_{>qM)!LsdF zLiK6p+^4zca94E{-QKoeMb(>gpVroWT1U%Z+4gBvy*c-3KF;^6J(j{!znZEy=f1A3 z`!ycH-j4Zep!zxw_q{DE4nJofzrjapn-`QmEbH1QY}Dm0T>`oWo+|UV4Gx<)-Vro9 zIC%6}$H?(xLW8~x{^qsEzYH2RGHhgMSa66V=-d85A5R!JY=R?X;>V8A@xkLl9rnep zM=vPz5Ae6|@w0tq`_%TL{RNL7?X|yZbN^&-pwFM|wcBk^ezLdcqaW<8alIL7S)p70 z$j=YEp5^(My7cM-W^}h^G*KCk4=i9t4{OG4D&z4Hg3EBd&u7`G?m!u>Z{2|zV&g8A zLECV>^Jdu^%~S?$!}S)LCF35ILECV>7iP)04`sBzy~WvZy{To%ctB;)HeBycSu$Fv z4BCe4Z6-^`Ln?!|;d;NwlJOms(fSq;XT$Zzk0rzT40BD@K-+M=gJa2-j*ZU5Z zj7TV>^{oWXhU*OkOU5)RgSO#%C%}>sMP<-7T(|Ts8Pll@+J@`iyd@(V%4oga&e?F? zWVd9@pfYG1uDj)yj2J3|w&A+%ZONEPWzaTU_pdD(u~0_qEosh1o!FQ~WzaV2#6}#I zLEESk8?&hl+D4+-h=(!~AHQA0*+>){bEpj3MxxkApfYG1iDF|el|kF66dUuPj7r)@ zrP!EHWzaS%#YQ5PLEESl8w;om+D4ApSO{h0&^B_!#v&?%wvi(?lBf*YMvmB6Ol8nE zn#D#kl+jGvXcikws0`Xhv)EWlWzaU7#l|u!gSHVNHd3IB2--%3*jP?w&^98(Mk6UdPTMFK8(XLh+D5t9$e}W58|7kS zE0sap$QB#9P)0UwBU@~2qcUh4*4LEA_Y8>LW25^W<%Y?M(Mw2dUOQBGyh zHj>1~F)D+$Q6)Aipo}WoMwQq&PG!(Gs>DVml|kF65*sI|4BAGX*f@2Iw1|!KR0eG$Tx?WB z8R4{zaIsNCWzaUl#YQcaLE8uy8yBby+D5+ExCmwB(>C(O#w99)wvjJ3>ZlCbM!wj% zOl8nEQpCm;C?kcoks>y(QW><36tPiHWzaTK#Ktu$gSJsEHm*Y%)wGRjv2laSplwu( zjRq=%woxrMZc-Vvjd-!q2xY|6HsZy`Eh>Yy5id5Hs0`Xhyx6!+WzaTC#l{^dqm;H$ zDmLy?8MKX3vC&Lr&^AiN#yu*7wvi<^?n4<_w2dsW@qo&pZDfg!7Ak|bktH@BQW><3 zMzQf7l+j4rXcQaHXPNKcXryg4ij7HB25qBJY)qyyXdBUDBOJq}W(aWzaSv#YQTXLEDHF8!M;`+QvSy zu@cJIN88vZHdav?w2ggYBaO0X?jg?{}oywqX ztP~sTs0`Z1MX`|qWn83fTofDYsSMi2MX`}dWzaS*ij56a25n=W*vNu1=Fv9hiH(g^ z25n=W*vO_bXdCmy#wIF*woxHAHbWT|w2ca}v4zT@ZB&Sj94dphQ6V<3&0-@L z%GgZX*eo`-Q5m$2&0-^u%Ajp*78~2C4BEyWv9SZnxI^2xBQ|zY8MKW%Vq+JTLEE?^ zHg;1Pw2fG?kq>3W(l%nn#vUqzwh=2f3aAX)My%M_OJ&eD4vUR_P{v`}#$mCspUR+Z z92OgeR0eJ1u-G_2WzaS<#6}U6kwM$Y5E}=n4BAG9*eIqlXd4+~;}DfW+qf<^4nrB& zX&cwY#t|xmwsBo-lu#M8jq76LD3w9mNEREVP)0IsBUx;eQ5m$2WU*0BWzaT~#l|rz zgSK%-Y*auQXJ{K|#Kv(dgSK%-Y*bPiw2d=j;{=sK+t?vCPC^+wXd64k#wjX;wy{HO zR8bkUjU8g+G?hWy_)ct`fik|MZG0y-&Qck)jqk+9IVyv;@txQ>Pi4S1<~YShHIw0b zmgisU(yNPokDu)`+o!e{?Jta)F!oDFSl{u%Az^(74IE&9)fVvKu#ZNDIL3tq_8RU8 z4f|ls*f2*(Xy35@ZwwhSaPXiv2K5^-V91*T`VSoVy4`N;_Q|mKz8W)jR8Qyk|IKlV zjT$NgwlT*kHfpI1*v1^E*tkGt&^C68jf+smPTIy!v2lsYpl$3F8+B9$ZDXg{xJ+fx zHkOEuD^SJ~+Qt&Gah1xTZ7dNR^;8CJV~N##ym(gUX<7oD~}l zR0eJ1tk}3oWzaTeiH$}mV-{^=me{yOWzaTeiH#;IgSIhCY}}?YXd6ew#vLf*2yNqt z*tkn&&^C^Ujbetp)zP2 z)5Jyul|kE>CN`#08MKZ4Vj~jD*iYNoFE*x88MKZ4Vk3&mpl$3I8`G%_+Qure5e;Rm zqHU}a8#Aa3+Qure5kqCrHdcv^nN$XC#PX$-?@$YxbfAEpo<^`n>%ewaY(Ty%$x&-*y zUw&-ppUfuVuj-9N(L`wQFWu78VbD8S}#_Z!~x z*}&&|t}BaieP+fx_P1@_?cGL<_}VdX#E70f0{guZ)VWE4SGs-wcMk38_t7o?pZayK zd41Ra|IN4BX+G;`I{Gck4z{K%voF^@X7e-ufI`3iU%FPfYhc=5KilBW?=7L@gU5wB z>{kl>Y!?gsTzf>bpDo;v7#DH6#zlJA{64j}?r4GCY<@qsckS8b`~MmG=zm31< zU)g?cd;B{AU6@sLK0c%M+8KNFkx;U|`;$t)?-${hmF=&HrQHu|e|#D=(%z%v?cP;a z#L^DwaXvnGnrL@Tv$XEr{#xmCyF+Ke*1;De?T@_s+SOZ^=b1puc7KN1=XaX(?wXr% zg~{RV*$#F9OFQ#}B+i}Yyt{^Np5hSKy4|0%Zg=J))49`}ch`hY`?6beyXn$cwtF{b zpKl%JG2z`c*7Nk+J;1u%1E6+u-d(dm9d7qWzx~TYp=G;whuY0~cMTA=?%hwd16bN^ zP`f$ruBoHe_NO}NS=xIr?X3e!CcL{wm0H`Me;W9f_UEB?bKYI^O&xmw6#Ol z=DfRxqFmcuu5Er*b7g5P?Y|1OoAd6Pq-x#gU+5s&y*!ro7nt_ep(_*KUE^1>VcnIj zE7DU}#M0gqYB%TIHM7-P`Jz^~zap0Q7om1@-e*O>O!K{6BGT_a^3w*R`<_FsqE&3Sjtp?MzOenWF*X)N3QH=uTN-jBGw|7Hit z?&Yzx|0dLK&bw>kO*X8%veo-<>55p|e~W2v9gZ{M-8ClHVf#y7+h2m(&3Sjt)^*ta z+g{s$8)`S_ef?9tbZrF?XuWR}2(_E@?waE3aJ&DG*X{m0Ond9Vp9$}-(ZAM>`ny`$ z{)$+Rb-xR>oAd6P7xX-||6T`+U}^t7sNJ0R8}9S_?{|>wULH&P??dh8yt^h7+uyre zCHz2F#M1r)roDBD(S&!`IAe$Hy}Y*fg4)e_cg;YyZs9-B%Jx^pvfcjxYB%TIH8|<2 z{NcvNh}vpJov(+x-uj_SV5n6W(1TnjLQHKhhPkwEq#* zZqB=FuCv4TKla-G$56XD@2+9c)_wj@w6gsbv26E0f!fV^cTJGCU)j1MFYAg}+Fxec zTZc_ecz2DRcG%v>YkME4-JEyVtg34Z%X?Bk)LdB_%Xa@E)NanZYrwU2?|!9&WcTt| z+Fya%&3SiC#k#{<7vWDkSOiP^pEB*O1GFZ*yGChUJM~ju*S%`Ju6q?~H|O0of7`mb z`##MgShjm#sNI}**U+x#J|EPl>rk)>@2>G+If;2L zL_b{!%XaSvwVU(qnknwEy}#G?{!qI)@2)}Q4%-KKZ65%&oAd6PUhc5{b+7HOGwrQ| z&L+IOMxI;Gac^j4`zvDc{sz=;&bw<)y8X)56?s!v#M1sI)NanZYq+|@_JLm82SV-U zyt^i{J8U21wS5rN-a5Q(!n5-hAuR2G#-oRxWcy!1?dH5s@o_!>j;@GhpMM8xH|KqdkL&q&y|%v#wVU(4$L;++t(3!id&j!> znD#y9yzg;)f4`IM??dh8yzhDB{V89|{K)!R=0{MwIq!QOd4J0G53KDUK<(zdFZXdh z|JN$Pa-9EbrhU0N@5|lZ|3)i)9_#)FYB%S7xsUVvP+bwrb{`70oAbWheLeqUt@OFw ze+;#o^IqdVzyEC~+y9nnuQBJnrZe~JKC~X^KZM%Nd9P{v`u*=Z*a0lw{|;(5=e@?q z_59!KLRi}W9%?t|eU97vKWL@TW8FV6?Q_g|pW{Bif7HqLkDzvQ-siaQ&kyTl`!J~8 zocB3x@6UhS!6I0^e+;#o^IrDIyZl_`Cr{V@3DaI?&U=}U`}4zfAuQW{IMi;=dzp{x z`A@yJe+spm^Iq20`#)-~oa`*y{U4!rbKW=lc>VsFE`+81Gp2o`Iqw_Y=l9RGQZ{#c z_bNY!+Rb_2=sv&yQzzU132HazeWQ=-`7d-uEc^TyP`f$rw|sd2XRq!5%(UM!=lz!3 z`@d+VPw)Q%wVU&P%kBLqooxRJ)NaoEEg#;0sw-mg{!^&koc9@RujfZ-uI&7l_7P0` z40GOR__&_`S6v88`@cf%=Dg4FaXmlMYx_v3-JJIsKD__TYx~cjc5~hjK03cY<$IA| zTJKSR$+REz@ZQ_PjD5;JU4QviEI++n_vlMAu9f=#KYVG%de@`Zmiha~&25lopY~)ZGw2d6Gv6af8Z8VFGTqvWNw$Us$wow_hjb^ctM`h49n#IO; zDucEWAvShE84cxT25lokZ0x2oXd4A$BOl5rpluY0jXhKbZKFVJ z6i^wojRLW;m&%}Rq>7DwP(~_kBUNndr!r_8sbZs$%Ajqeij4zQ25qBOY!pEmwX}^| zv2l>fpl#HOjbbW;woxlK4pAAjjRdiA7|KYXZ6t_|BUA=$BSCDGP#Lt11hH|H%AjqO zi;Yq!qnx%;E;h=j4BAGy*eItmXdC5X;~14e+sGCh6;MVtZ6jN39H%m98`)x`lFFcM zWQ&axR0eIMNo<^iGMZ=`O=9B|l|kER5*t-i25qBBY@DVtXd5wN;|!D$L)(ZE8)vBu z+D44nI7emBHe$rac`AdpQ7ks9p^ReMMzPqap)zP2#bTqD%Ajo&i;W9Z25loY=lD@`LvCEu`z|ppl#%fjR-1(wvjJ3rcxQSjTEsF31y_vHd4gKG%AC(ks>yt zs0`XhirAP=WzaUN#YQxgQBB*Z78^6D4BAGu*odJrXdBgHV@2I#EXsDR0eIMRBXgU8Ktz1Qn4|I%AjqOij4#+gSJsBHs(?pw2dsW zF%QbfqHSb}jrmjtZ6ix;BvKi)jV!UTfXbk4G>VOdP(~wdqfu-uqB3Y3jbbB-%AjpD zijBoo25lo+Y$QV&(X@?dv9W~8plw8pjipouZ6jK2ETb}L8%1Ix1<3BC)Z8%Ajqm5gRL^j5V~4HDY5Gl|kEBBR0~g4BEyTv9X%Uplw_c8*8A9 zE3}O(Vq-0pLEE?@HqxmK+Qt>Jv5v~1Z7dWU8BoST+Qve$v7XAHZ7dWUnN$XCW1-mC zKxNQ2PKu2zDB~n;V;hx0+qf?_@~8~j#(lA|oywqXM2d|aP(~ze zBT{Vaq%vq5kz!*Pl|kEx6dSv#4BEy%v5^mD?4xb$6B~P|4BEy%u~9%}&^GpojlEO` zZDXa_*au~-q;0Ge8~dpY+Qv$;QAlObHdcy_15^fWdgH#4>AvWr$4BAG9*tkq(&^E4%jVn;bb=t;tv2m5kplw_i8}(EMZR5JwxJG5rHj>50 zbtog5wvjA0ZcrJtjbyRWKxNQ2lEubNDucFhMr<@f8E0S{HF09&7L(z6mgisU(yNPo zkDu)`+o!e{?Jta)F!oDFSl{u%Az^(74IE&9)fVvKu#ZNDIL3tq_8RU84f|ls*f2*( zXy35@ZwwhSaPXiv2K5^-V91*T`VSoVy4`N;_Q|mKz8W)jR8Qyk|JB5ajV3Aswowx& zHf~cHu#K7$v2h2=D4}hXh>g2c25qB6Y&26Dw2cz6agWNNZDfj#`%p$EZ6i}`JfJdY z8<}FGh0369WQvW4R0eIML2P^nWi-$>8pMY48RYvnY8q%84Ps*wl|kER5F3-J4BAGN z*a(L*qG%gYVq*%GLEDHD8xd3nZ6iu-OrX0hK}9$PpV0p^O~bMvmB6L}k!6a>Pawl|kFc5gUuC z4BAGs*hq#lnrR!&Vq*!FLEC5+8%wDS+D5b3SVm>gHX_7E3X~B++lUYw%c%_7MugZ% zr7~z65n^Knl|kDm5F0C@i~`z5f!J6@WzaSX#6}vGLE9)08>^`d+D59_SOaCG(l%1X z##$I+!R0eIM zTx@KmGH4swVj~yI$fj*%i;Znm25lo-Y~)cHw2f@Bv7O4GZ8V9E9Z*ISZKFwS?4&Yi z8%<(k7nMQVXc8N{sSMgijM&JBGGb^OF=As6l|kEx5gP?m25looZ0w~nXdA_1V;_`J zOxq|H8~dpY+D5V1D5NrI8^vPd0F^=8NEaJLP)0g!BVBA9q%vq5>0+ap%Ajqei;Y86 z25qBWY#fF%>S-JGV&e#vLEESo8zoc*ZKGan9Hla78%biL6v{}VZ6t|}GAe_%kt82pTmqY-DIyaEK%5+x|iCKl#OLA5R!JY=R?XV&Impwn1SN$2)=^ z|M`*Q$Akub8T?JV4}bEYj~&05;QFjK`vbpsJwMSc@RFashpoH4+lUcgJ0^}85!l`R znQgtluzl|Om$^O#&gO6T8{YHTz~_3dD~l<5*8YzDZOcP?_6Y3vN>JxM8t#>D-~XLM zd-}a)ZrSs?xK_Gr;3hxY;La~#==k7qp$_|h+WsTJ&$_E5KZks5%$&QA_sTy0zuZ+O z-mZHTaIMt;|Dj;L>(OgV{R8~5Px*7|pzGSM$O^fhh5fKXrw+QNQyK8dz^Q|->!=L) zWZ=|6*9<6w9&~l;pzC@ngSMfAu9;K@Z9@lLH&7Y04IOmNf->kqSEmlTZlp438#?Hk zO=ZwFbkKDZl|kFkLD$Vt20iHN)IrxRR0eHB2VHZh4BCbcx^AU1Xd61{nhRyngRV{; zblpZ}&^C0?HIK@mZRnuub}ECmp@XhFpbUD@)v1H7JE;uXh7P*!qB3Y3I_SEa%AjrN zpld#qK@Yk*brpC$wxNTrrBDVv=<3u#*D@-DwxNTryxQyH|4YOxUwWmMBPs>Q|( zDucFBEjD7P4BAGu*qBLW&^F@5Ml6&OPuqwW8?&ek+D5$Ch@&!S8}VXeHkCo!C>0y= zP(~?jqf~6np)zP2rD7w2%AjqOijBEc25lotY|Mi)vS=GwVq-p)LEFd@8;Mi~Z6ix; zETA%I8;xRPA(YWb+h`OUi>M6RMx)qBqB3Y3jbdXll|kEx78}V>Ml@|BT5K$#GH4so zVq+y*U+b9wnsZ<7SqeyJ5pfYG1YsAJ%C}Rz6 zV~yBYMP<-7)`*QXDucGMMr^F6GH4rD#KsyZ;|gu#ir837WzaURh>dhAgSK%+Y^_Is0`XhuGrWNW#rN}a>d3LDucF>D>ibd4BAGn*w{*C&^GRija(?>K5gT^ z*w{v8&^GRijXWxYwsBu+lUk!JE;uXMx@x-MP<-7BE`mTDucGM zPi*8v8T)7(`^3f`DucGMPiz!W8MKXkVq-6rLEBgcPxqk^_kAvVgW4BAG8*eItmXd4w`;~14e+t@5N zDxi$bw2jST<2aQ;+t@5NDyaAvP{k8MKY-V&e*w zahoY-ihGGH4uabn{(l>ytRDG?iY zpo|jQMv2(COJ&eDO2kGpl|kDm5gYfY4BAGf*tid6WYRV=#l{0FgSL?=Hd?3*+D4|> zct~Z?HX6jncTh$HZKFYKIG;hjf1{>>w$UIqCQ%u*jRvtXnaZGTM2U@XC?krt5hXUJ zP#Lt1D6tViWzaUF#Ku%AgSJs9HX@;nLfS^5*qBCT&^8LiMiiAn+b9$p)2R&FMw-}& zhBDG<8);%=29-hEND~_|R0eG$O>E4hGH4rhVj~vHsH1JviH%uQ25qBGY{XF+w2eBk zF`LSuZ6u0~cqk*0wvi|{=1>{5jYP4LKxNQ262-<`DucFBDK_Ro8I`n+O0hAY%Ajpj zij71ngSJsAHWpABw2d6Gu@K70p>5=djYU)jZ6il)BvBc(jU2JDn987SG>eU7D5IIS z(JVHWP#Lt1X0fr9%AjpDi;ZPe25lokY@|RL5wwj6v9X-Wplw8mjZ`XwwhAU3k84BAG5*w{p6&^F4&#%3s^oVHOeHnvb1w2gAHkwazBHp<1uRw{$Gku5fI zp^R+WMz+}4MrF`8vc*Ool|kFc78~2C4BAGM*w_JOG|@Ji#Kul4gSOEmHg-`Nw2daQ zv75@EZN!L;d?+J^wh<#X_D~tLjTo^}KxNQ2V#LN?DucFBEH?H*8O5}XVzIHG%Ajo& zi;Y4mgSJsDHV#l3w2gGJQ3PeA(>Bt@#z88BwvjG2im43RM!MKIL}k!6>cz%kD5IXX zQ7<-*P#Lt1da+SLWzaV2#l}%8gSL?*HcFw4B-%!j*eIhiXd6jlqnyg1Z6t|}V^juh zqe^U4Kp9n!ZS)>v8*K})`3<%E2f$C&$8YeF+U5nN56imt`JbQrb?Fk|XMg#%(ZRu^ z$2x*WjSL$Z8WtSl2>P~v(ECq*@!H1|#toa`2$>kTrK@dF*u?RUpvQlHb!upO64hic!XyAarOMdnqw(jVEq4~Km;GQ=@1EU?#b*VBD4W^9-vB(!f>|2KvV88~>*8-w}{7%=3`0sRLKeEpSf z-~XLMd-}a)ZrShDxmLPs;I&eJ+u+WxVCeYZaiI?Tde@`Zmih|)H%_$U6IeF zaXkzBS$|HQ6HTWw;A5#%=S0_08St^xsdJ(kPzF6G>eM;W^;8CJL+3;@sSMhN&WUcI zGH4q*Cz=Ii&~u_rofF+iWzaTsPBfd!pl#@!=q4(IwxM&Po1qMPPSmM$qFbm8+J?@F z=1>{54V@F+N@dVCbWSuE%An^&ojNDFjmn^H=$vRCl|kFkInnJ@25m#8MFIQW>-j zofF*$Wzch?PMs6oPi4?HbWXI8%AjrNoag~6gSMe_qD4>!JtykaInjeu25m#v`q zp$vLX)Twi#r>G3thR%srQ5m!iofADxWzaTsPV@|vLC=Xgbx!mwl|kFkIni@e25m#< zM9)(hv<;mTt%fq_IZ>z1iPlgVv<;mTt)(((8#*U?fy$t5=$z<9D1)98b?ThxB`Slq zp>v{jR0eHB=R_}48MFvmXWQO_{l|kF6k{RkIDucFB zB{S5wsSMgip3G3+fim*o&lhm!iH*Bd25loxY&26Dw2eHmagWNNZM2Au`%p#;ZKFkO zJfJdY8!cj^h0369w1|y|R0eG$Tx@&?WrWiaN0(=*qB6R&^E%w#$+mk zwvjJ3!l8_O+D5+Em_lXHHuA+r1eHPC$QK(^sSMgiir9#RGE!(8DPm(9l|kD`5gSod z25lonY)q#zXdBgHBO1!6rfpP5id4o zQ5m$2c(D;jWzaU_#l~zZgSJsBHsYa-QrbqT*qB3Q&^AiNMgo;V+b9(qbEyp4MwZx^ z2W4c@HnPOVd@6&sktH?~sSMgime^Q8WzaSn#l}J?qmj1JC^i;R8MKW?v5`b&&^8*y z#$qaiwh=8hlA(-f+D5e4SVCpcHloGGQYwSC5iK^BQ5m$2BC(MIWfajiip0ipDucFB zBsNm14BAGK*jPbj&^FeHjg?Tw8rsGhv9XHEplz%X8);MqZDWntSWRWnHm-<`HBiPC z+Qt>Jv6jl9ZCnu>=~M=7Ue8MKW^v9XKFplw8ojonlRZDXI<$cHla(KhypjXhKbZDXI^Jp9M#KsXSgSIhGY?M$Lw2gUU<0zFu+o%v5rBFr%ZKFbL zlu;S9jS8_*PG!(GD#XSyDucGMS!`558JlSvo5jX)DucGMS!`5N8MKYfV&eprLEE?^ zHcmnrcW4`T#KtKqgSK%;Y*bMhw2eDr<202)+lUn#XP}H&+D5F{I7?;FHe$ubIVyv; z5i2&%QyH|4!(yWv$~a8hI4m}5s0`Z1VX;w5WzaSbi;W9Z25lolY+QsgGH4qaV&f8( zLEFd>8+B9$Z6iZ$T&6N;8`s6g6)596ZR5JwxJqTvHm-|}dMbmqab0X&qcUh4$ztO= zl#xu^NERD6s0`Xhve;;#GH4sgV&f*2LEAVZHX5OfGq8=CII(ey$#6Z(^DlMj)dl++ zk(xNM(L`myHfrL;#%(GCwoy|eHts+fCA5tav2mBmply_hjbh={j0W09gV=CBgM9x+O#^MCL2OK- zGH4qOVq-FuLEDHD8{tq!6m26)Y)qjtXd6*tBZA7HZA6KUsZ<7Sqfl%_LK%g$jY6?8 zjmn^H6pD=~DucFBC^n{38MKWwu@Mbrq|r9g#KsINgSL?-He#p@+D4k#m`P>OHtNJi zER<14+o%&8v#1Q(MxEG*qcUh4bz);Sl|kD`6dUnSMj~w^QEbeiGH4r#Vk3ddplu|I zjk#0?ZKG0b%!4v2X&aSdV?LEZ+o%*9iBtw{qf%@vpfYG1IbvfWl#xT*$PpWhs0`Xh zj@U?|GH4q)Vq-CtLEC5+8_7^cGi{?;Y%HNNXdBI9V=0wE+h`UW%cu<6MugZ%fifa! z8xdk-Ih8@%h!7j8R0eG$LTs#{GH4qGVq+zgQ9#=$5F4wg4BAG4*hr%?Xd4A$V>OjQ z+ej4~YoLr&+D59_SW9KlHd4h#I+a1&NEI9Fs0`Xht=PzbGHPiXwPIsEl|kF66&smU z25qBOY;2%1Xd4M)BMZt%plu|Gjg3?WZ6iT!WK$WmjRdi=iOQgDl#7kcP)0dzqg-rk zp)zP20+Y@%1Ebeq>GJ%R0eG$ zU2GIn8MKXbv2lpXpl#HPjl)nzJ#C|2Y#gC7XdCrnqlC(!ZPbg6qf`cMBS~zOLK#W4 zjU=&AMrF`8lEg+il|kD`5*x>;4BAGO*rGWeTzAO7S)A3J_A!4Vo}pVal;7e-AO`=ukS@A%-5 zu)c!^4hXFGv-hxdw|5&c;%mpm5hDV-+q!)+?7gqXj2+ec3)|eOk`^;8CJL#IVEsSMhNPK$1!GH4q*Et&;o(9@z$ofh3lWzaTsS~Q!= zpl#^1=q4(IwxQFao1qMPTGXl2qFbm8+J;Vx=1>{54V@ORe25m#9MR!0M^t7l`r$u*C8MF-jofh2(Wzf^2PMsFrPi4?HbXv5K%AjrN zwCDjUgSMg5qD4>!JuT|gY0-mJ25m#9MT@Bn+J;Vx9-=a68#*m|7|NihMV&e=dW6cL zZRoUU36(+H&}q@5R0eHBr$tMl40>ABsneomR0eHBr$x)D4BCcHiyosgXd5~$S^;Iy z)1pqD7Cla7&^B~hw35o8ZRoV<2`YoOq0^!#p$vLj)Tz^=r>G3thE9uCQ5m!iofbV! zWzaTsTJ#K*K~IZ1bz1Z+l|kFkY0-0325m#9MbA?iv<;mWt%fq_X;G(6i`GyXv<;mW zt)(((8#*m|fy$t5=(OlXD1)9Bb?UU}B`Slqq0^#uR0eHBr$sMQ8MFvmXWRm(8l|kF6l1b_&DucFBC6m;*sSMgio=j5Tfim*oPZ@CLiH*Bd z25loxY&26Dw2eHmagWNNZM2Au`%p#;ZKFkOJfJdY8!cj^h0369w1|y|R0eG$Tx@&? zWrWiaN0(=*qB6R&^E%w#$+mkwvjJ3!l8_O+D5+Em_lXHHuA+r1eHPC z$QK(^sSMgiir9#RGE!(8DPm(9l|kD`5gSod25lonY)q#zXdBgHBO1!6rfpP5id4oQ5m$2c(D;jWzaU_#l~zZgSJsBHsYa- zQrbqT*qB3Q&^AiNMgo;V+b9(qbEyp4MwZx^2W4c@HnPOVd@6&sktH?~sSMgime^Q8 zWzaSn#l}J?qmj1JC^i;R8MKW?v5`b&&^8*y#$qaiwh=8hlA(-f+D5e4SVCpcHloGG zQYwSC5iK^BQ5m$2BC(MIWfajiip0ipDucFBBsNm14BAGK*jPbj&^FeHjg?Tw8rsGh zv9XHEplz%X8);MqZDWntSWRWnHm-<`HBiPC+Qt>Jv6jl9ZCnu>=~M=7Ue8MKW^v9XKFplw8ojonlRZDXI< z$cHla(KhypjXhKbZDXI^Jp9M#KsXS zgSIhGY?M$Lw2gUU<0zFu+o%v5rBFr%ZKFbLlu;S9jS8_*PG!(GD#XSyDucGMS!`55 z8JlSvo5jX)DucGMS!`5N8MKYfV&eprLEE?^HcmnrcW4`T#KtKqgSK%;Y*bMhw2eDr z<202)+lUn#XP}H&+D5F{I7?;FHe$ubIVyv;5i2&%QyH|4!(yWv$~a8hI4m}5s0`Z1 zVX;w5WzaSbi;W9Z25lolY+QsgGH4qaV&f8(LEFd>8+B9$Z6iZ$T&6N;8`s6g6)596 zZR5JwxJqTvHm-|}dMbmqab0X&qcUh4$ztO=l#xu^NERD6s0`Xhve;;#GH4sgV&f*2 zLEAVZHX5OfGq8=CII(ey$#6Z(^DlMj)dl++k(xNM(L`myHfrL;#%(GCwoy|eHts+f zCA5tav2mBmply_hjbh={j0W09gV=CBgM9x+O#^MCL2OK-GH4qOVq-FuLEDHD8{tq!6m26)Y)qjt zXd6*tBZA7HZA6KUsZ<7Sqfl%_LK%g$jY6?8jmn^H6pD=~DucFBC^n{38MKWwu@Mbr zq|r9g#KsINgSL?-He#p@+D4k#m`P>OHtNJiER<14+o%&8v#1Q(MxEG*qcUh4bz);S zl|kD`6dUnSMj~w^QEbeiGH4r#Vk3ddplu|Ijk#0?ZKG0b%!4v2X&aSdV?LEZ+o%*9 ziBtw{qf%@vpfYG1IbvfWl#xT*$PpWhs0`Xhj@U?|GH4q)Vq-CtLEC5+8_7^cGi{?; zY%HNNXdBI9V=0wE+h`UW%cu<6MugZ%fifa!8xdk-Ih8@%h!7j8R0eG$LTs#{GH4qG zVq+zgQ9#=$5F4wg4BAG4*hr%?Xd4A$V>OjQ+ej4~YoLr&+D59_SW9KlHd4h#I+a1& zNEI9Fs0`Xht=PzbGHPiXwPIsEl|kF66&smU25qBOY;2%1Xd4M)BMZt%plu|Gjg3?W zZ6iT!WK$WmjRdi=iOQgDl#7kcP)0dzqg-rkp)zP20+Y@%1Ebeq>GJ%R0eG$U2GIn8MKXbv2lpXpl#HPjl)nzJ#C|2 zY#gC7XdCrnqlC(!ZPbg6qf`cMBS~zOLK#W4jU=&AMrF`8lEg+il|kD`5*x>;4BAGO z*r9Ho*}xF)*X6ZBW?6@s6O!e}3fnF`+?U27lA; z!=F6pW5+KhI6}khjehUGFlxfsFCAfh#|MXm^&K>DK;Rj_!0xtgpA38Nt1)9o_5Q;4 zx$7VEqWuM%zuj+m&u0Ul>$$Ehrs!GwJNCD2-R<2*jQH9yam0w8Jp%i^5@dhX7VzP) zk4A<##)Sp;8t!_A55|lQbA*KU4eS5LkRbyH4|-!zzX1b=yg8u%z=5y7((U`db7)V$ zw=63Se<0t_x(4`d{eFSF24?x&26uklLdOS>3w79UceORT{wjO3pDoo@BHb~>5qwE1@0 zj+vyHPA}Gpd^65C>IhDoUZyA_Dk3T(A|fWlpg}}ML`1xyq9URqA|l=p5fM>Q5fS~J z=<&?{>~q0SaKUZS#{XF}%pS|}v`!VgrlOuJgu^;(nWG6+?5$n&7r}QeEy{HzEy}~MFTypO;pcfb zdjRU5E)PT2N5)uAVe2EADQtgwvE!RUvi`}*=joZn+gKPGVn8hQg;c%m^h4GUj`2os z2o>!+_gd;VQ1v#xV7=!B^1w@scT9?GV`8ZCej`C6#o$gJxU>P0D+;5`l&AIoylH|b5d z_qkK?7minz! zy*c-u7ykoiddH;HHYS$(QmEd$OMnUYp8Wzs)^7`5zm2Ll=ial;fT!N+*@E{#D3iOK zGOFI3d(Z9!-n8wuSq^+iSdRSdRJ}R(o=pt`XL`q^+%_hb`f{k=y90s=_ny5Fyq!C2 zoq-3%Qon<$H|O57g@U)fBHa24s@|M?&&~_p`kmp{@1*L@x%X_?5cm-Hj>#_Dm{^Yd zT~NJucLx*hJ^MU_tlu5Hem7Nb&b?=Q2v7Y0*ZV!P-tVF6&AIpNDiLz-_XeN)y;Qw9 z_nyrt20r(dGWV5Gy?5sd6Yf2GScIJWeZl8`A60M8y=Uu-koEh6*YBt5&AIpNm=Uu6 zK=AqlRJ}R(o{cu_k8`SQ!7@6Q^}Y(K_wLGJ!o6pI4)1l%!BEk@5wg@Dr0UJN_iXDS zbL-3Wj>#e0m{{r$QT68Bdv*f}S${Zq{b8!!oO{nEBHmf9wsi&`6wBOKL-pPrM@+c) z>^J5vtytd(W07fhXBJCN;J(vCMr9Rd3F{XQz{p^+$u(AEoNex%X_4Vn5B- z+Ja?tEOTEA)q8hSG2z~`uZq;!hvJxRC@l5IsCskmJ=?K(g&((d1|Aek{c)<^oO{nM zE`c+>V{*baCYJgWRJ}R(p3PspoprX(z=L9`uY>BnJH?oA@7XiPTYoa#`jb?>IrpBe zW&%&LcTDPSV`7>6daB->d(RFv-p*6D&cK6WsXs;4n{)5k*v4Dm5N>?~RPWsd$Ao*& zemLIx)8W>irs~bP_iUpR_z?GwNuzB{EJuDLRd3F{XZIa%=NVgP;6bs}pP}l_x%X_^ z6FAd5CQY_6vD7y~_1+zROt|;#^<%%ZKWht?(XrH@rRvSO_iPa)b@riXwhe`)zL}~w z=ialkkoOorXX^|+D3M*c z-kf{S_D9}XzG&+VJSdjAzev@abMM(TDR8EDOfK2R#8Q8WsyFA}v)PjUG~Z$imeH}) zw?OsYotaFy_w3Q+o#o4+qJ1M|slQCsn{)5k+R0nr8g6|nRd3F{XGbV+{grU*uTb^o z+k~(K=l<*cR)1ZzQX7Jez^7bsd{tnD;(VS+Q!7RG(MPB;uMFpT z{_H60e!8=%dUNh8eV^kcg**32RJ}R(mHyZHb3zS+#r+(r-kke9pZnxc(SfdZlA-!M zbMEsTuJh;G#>6uBbE$fB?(-b(@8<=tpGVc3bD!t$e!eYOmOjhe&!_6mxo`2gPYD$r z$bAY_-(t>vOE~NO0$J}DQ1#~Aw>Y>@wH*~Md-Jb3+bs@|OY;&494%aXHR7FBP~eX;*~zrr@% z!Pff~RJ}R(-M;Jm>`>8x&U)EUeYZLH-M;JmmEqQ}r0UJN?{>J(&#{e(<(!{G)thtQ z?OX3x**b-Lpd)`3Rd3FHio<%JYa0{G+~-2|Ddya#_+_)cYIE+Z9j^1g3|{{wRd3FH zwa@(;Tc@1t0=Zv9)thr)?OX4^3b+0%s@|OY0{{1L^Fs}T<*b(v)fbp^U*NyquN`3h zTB_ch`vQmS{B^dWu+05Bs@|OY0*Cef>)`cYQ}yQDclhq_3v8W^=llYwzQdgR4u|!A zy=_b^NB(-M-kkdm2ls`+>kFxRbM8AF+;0e8zk#YZ=RVuvI=?7*eGycjZO(nR@44Pa zTc_h$ZzEN2&V9E3=Xje!4TI&Zw~4AZ=RVuP{bt)xSn4-Z_2%3+Ik+zlUSAB=H<@$a z_2%3s`>*%ALLCH_BYziEpKQ*3vV;5GwxO`p@22X_xleww-VgA|-y=u<9;)7) z`(%gf{Jp{FelJyT&V7~dd48p>)A5{N3DsAbb6@4)exGejEbjMF_2%4HIb7%O4_?2Y zsyFAp%EA4C;PnTndUNjc!@16{l5>6)RG)9oeZKE`{z2O;JD&9pQuXHC=leg;KNM;h zEN8t#RJ}R(`3~+6+lInYf0(K_=f2J1d46^9`f8}Y&7Au-hxPtQ@cJWEy*c-74(okQ z@cJ66-kkfkaPIGq%8GZCsyF98)4_dh@VT#r>NCx`&wS#3fOojZWbThq_2%4X`kw0@ zx6QKS)y{FM-kkePhxPu1ZA>id{Ryhxocjj<=X!OvU^xgZ^>t8vgE{vN{-58U9AN!P zs@|OY2LJ2)`T^G0Q}yQDH#oRIWg801k$;M+H|M_8_gt^R)+yWreTvxt)t8!cUmDJO ze_Gc2(^S1V_ocq|zA@anZ=~wYxi58af5tW@7WZeUdUNi3{MY*?Tddm?D@n7$oLk)vv?whH4bMAW_+@G@zg{A%+Rd3FHn#2A5`QY{E zq53p)?$aEe>s<(5e}Squ=RVEf{Y6`Fuyg)Js@|OYGza&WY(rsje~GF$=f2j#eM|8A z7O1|~ocmhedVks0DF=L@_5LzdZ_a&fIN#f8mAjl)s@|OYTHp6{uY^1ISEzb(?u#7U zx7o(Ta@K2u>Wj>|FY>v+YU^}#f0e2?=f22yf8QQ%eLGce&V7;p{(9F!4TI&#zed%Y zbKm9QzQZ;Ymii8;zRR5ZE{E&<>%r@`K$p7U=| z_2%5?K5;+5Pq4e>TCWSL&o$>hH=O7BH>Lh2Rd3FHu7mqqHV>9_{w=EBocmmd`}^C$ z>u*!_=G>q6f1clM3zpNLW$wG7`t!!zUta96-runeg{A%uQ}6jBz4H2?HwL|NthIMV z-J_F{L(TonHzUV9`>AgxN8D>4G$=YU=0;tVYfSuS(_)8zI(6!&lVc}Ln-n+vlc~Rb z`{^%-Pn-}xAufLE^w{CQA2Ixc8B@m1h@Jk~2eEO}rcQ~Ab#L_i?)7z1(NXSw5w4G2 zAGuz0zdCWoAwY@jlpo*=o5tY~XS5tNbj^h^h4h>eX@25lotY;2-3Xd78# zV>6XO+h`OU#ZX2gZKF|aY@sq}8;xS4gvy|8G>VO_R0eIMOl*`w8D+GMGO@9Z%AjqO ziH$NUgSJs7Hnvk4w2fY|Q4VGF(l&a<#ttfjw$Up#DyR(FMz7e|NoCMBlElU?C?kos zkt8;DQyH|4B(brF%Ajo|iH*Hf25n=%*rcz$>DucFBA~qVJj1t;LiP$(zWzaTC#6}~PLE9)18)v8t+Qwb6 z(FA4OrET058)vBu+Qwb6(M)B~Htvdzb5sUxBUNmihcZ%W8>wRB0+m7ANEI6wsSMgi zs@S+hWzaTi#6}C0QA69P5gV7O4BAGG*l49PXd5+R;|i5Q+b9$pZBRxbZKF_ZT%|H- z8--${oywqX6pD>&R0eIMQ*3lV8J)C^PO)*F%Ajp@ij7VxgSOErHf~TEw2d6G(FJAX z&^B_!#!V`Nwvi(?Zc!PujU2IYo64YVG>eUHD5IIS(JVIZP#Lt1X0dUX%AjpDi;a6! z25qB4Z1g}G6|{{Cv2mZuplwu$jb18)woxHA9#9#yjefDw2W9lrHu}ZJLn?!|(Jwao zsSMgizu0(0WzaSf#l~YOBaya|C^iz9EiMugX&Z@RVXHkCo!s1zGXR0eIMQf$njGH4rlVj~&K$fIrKiH*5b25loxY|NuFXd8KAV?LEZ z+h`FRDNsfWZKFkOETA%I8!ciZmCB%Pw1|y`R0eG$U2LR58R@i*bg{9B%Ajqei;cxp z25lotEddxMvBbgHl|kF+5E~n)4BAGv*eHTBvS}OHVq+tfLEFd{8=I&M+D5k6*i2>6Hk!mnF_h6n z+h`ISTc`}$Mw8emp)zP2O=4p!l|kDm7aOHeMmcSxTx@KkGH4s+Vxx@8ply_kjqOwh zZKF?YltUSPw2eNov4hH>ZS;wa3MzxP(I+-`QW><3WU;Xe%1EYdB#VvRR0eG$S#0c~ zGH4sgVq-6rLEESj8XzRZ0w^lXd6{xV?UKa+o%#72dE6%M!wjnf->@H8~I}6 zAeBMe$QK)js0`XhzSuZSWzaU-#6~rg(MH>76B|dU4BAGU*r=g0Xd7)}<0zFu+sG6f zwNOSTZ6i}`9HTO58<}F`IF&)$$P^nVs0`XhgV?BpG8$+b4PxUYl|kER5F7PW25qB3 zY@DJpXd9(sqXEh&rEQdojnh;HZKG6dG*TI~jZ(33hRUFA^oWfnD5HnA(IYm_QW><3 z9<3TCs73%Ajo&iH$ZWqlmUqBsQ*68MKWevC&Rt&^C(1#x*K~ zw$UXvI-ra$+D4byxK3rzHoC+{CzV0l=n@+@s0`XhuGr{;GID7fxnkocl|kFc6&tsx z4BAGn*tkt)&^FGCjczF8JZ$6gVzF_D$?*J?S6(0V#vu2;2-nB1k6f?0U!6E(@+YzJ z!=_E09zSgKsFCjPxT1eD?xz!`$4-fldE>*_xcDDWnj9ZHJ#JY1hbq{Y>!qKK`{A!9O`bR;;hTRhFBThjsSMc0<;7y-9+d&xxO`k}^gtQM zX&cAI#(gS-wsBl+^imnLjpJhD0hK}9*d#Xkpo~qljZI?XA(cVf*d#XksSMi2Cb98| z%AjrB5*v@9j9avgTVf;OTkem4{aV}~Y*#$f-y7`qUGfZdyySUhu>091&+AVwd2Z1* zZi$VVR0eJ1me`m@WzaU}iH$@kV;*f|p4ga8WzaU}iH#&GgSIhGY|NoDXd8#bMlzIf zh_-P^Y|N!HXd8#b#yl#6wsA;o%%?JF8|%bI3Y4*qwy{oZETA%I8|%bIDwRRoSSL0X zQW><3cCnELWwg^a+Qr5qDucGsE;bfZ8MKXdv9W~8plxJ{jdUm@i?)#^HkMKuw2dsW zkwIn9HnPOVGAe_%(I_@Dp^Qe_Mx)qRPG!(G8pTEyl|kER6dNn34BAGS*vN)5%4i#9 zVq+zhLE9)38#z=4ZKF(VtfDe#8@*y97s}|RZS;zb)l>#;qgQNvPG!(Gdd0>UR0eG$ zNo?dn8A-H_B(d=&l|kD`5*ur%4BAGL*!YUdpl$3I8~ISie%i)<3Rc0n0;X&ZON#%?NuwsBW%?4dGf8+XOVUMhpOkt#MSp^Q}8MylA@M`h49 zQpLu8DucF>DmD&K8MKWWu~7wO)X+9+#Ku7?gSJs4HV#o4w2d0EahS@WZ4`=)YABZuIcMvmAxMP<-7n#D!~l+pau#?VQwPhHWji1*!5 z(eRIYGh)oK*4`C$k4{Fu`RBih7&It4!u{>HKb<=D)5)>JCr*f;5EnmndhGDuj~M== zXTNy+gBerC&4``;SxiNwYjpf)(_)7|{qHACn-n+vlc~QA_}icTrVnC&H^Z}6n|o&D zd-fgOVmc$-gIzDXUm8FDH?g0MA0P9wZ_l=&zi|EB^C$D{0_TcyM|?Qsg_swIY&e-( z{et@k?(bWE%aFk_@4PiUyshEhdg+`0=leq<{>0p{|1v6iQPiKV^bB`o%;s&8t})>c zW8Ac&n&pIgnmYFB6Kb=ZP#dWX`h?mnC)6`k27N-UkP~VX zlu&+ej1}tyBhWBT;Nzp)zP2m13g}%BZAmREmwOR0eIM zQf#zS8MKW`v2l&cpl#%djSeUykG7E~Hm*|{w2eHm(Me^{HuA*A4Jw1S(IPgwpo|vU zMvK_ENoCMBTExaJDucGsA~tSQ8MKXbvC$1>q|-Li#l{^fgSL?_Httdxw2gGJagWNN zZPba49w?)ZwoxZG?o%1GjXJT>OJ&eD>cqwaDucFBEH?U}jAGhGvDkP>WzaT?#YR7s zLE9)68;__A+D5n7cnoE9(>A)rM#2lsU)s`5+vpY>GpP*PMz`3QMP<-7Qp83gl#xQ) zND&*esSMgiir7e^GH4qqVq*@KLEESn8_7^cHEp9>Y|N!HXdBgHV;+@3+o%>B^QjEl zMuFH!fienc8wFxx0hK}9C=eT|R0eIMKx{0eGH4qeVj~U8=%8(Mh>b;525qB5Y%HcS zXd4}3V+oZ(+sGCh=}<;CZ6jN3ETuAN8`)wbgUX<7WQ&buR0eIMNo-_78BMf}Cb6-c z%AjpDiH$5OgSOEmHdas>w2gAHkqu>((>BV*#!4!KwoxuNa;Oa2M!DEnMP<-7`ou;q zl+j1q=o1^OsSMgipV;`E%AjrZiH$F)4BAGr*vNx2l4%>sV&h9HgSL?@Hr7xXw2fr3 z@fDRp+o%#7`A|j`ZKFzTtfew&8&zUs9hE`bs1h4rQyH|4e6djgW#rQ~^2Nq_DucF> zFE$FP4BAG%*w{d2&^Fq{MiG?JM%!o;8yl$%+D4n$*hFQ}Hrm9-W-5cWktsHcp^Qx0 zMyA-M2S9LgxAZIp_Q9aIKwqf~5EP#Lt1Qn9g<%AjrZh>cxPMh|VHM{Mk-GH4q;Vq*`L zLEGpN8+)k?+D4k#sDv`oXd7u_V;_}4+ei}|`>71tMw-|-KxNQ2YQ;tslu=9Ds1+Lr zsSMgit=Kq3WzaTi#l~SOgSJs5Har_!fBfs$;{Gsk#^g_8i#l#6}B~;rS`AygulS zLD=7EwlqO(T&6N$8%q*QpHJ#%i(ANoCMBR*Q`rR0eJ1g4pPSGA__IE{Khr zR0eJ1g4no4WzaS*h>hD+25n=J*yx5b7ST2qiH$o{25n=J*tkn&&^8u{jeArEZR42O z=z%hh(Ke2Wjr&vvZR42O=%q4f8^^@P11f{Iu~BUFK^Yrq8ym&OLn?!|u~BUFQyH|4 zjbh^wl|kFMDK;KM88>MgH^oN6i^x|dm)@jp+!Px#sSMi2O|dbH%Ajq`6&s0A#$4LQ zT(L2m%Ajq`6&p!Z25n=m*qB3Q&^8W=jbtd}AZ_EI*qBRY&^8W=jd@fCZR4QWm``QU zHr9%b6ewdYZDXz2SU_daHr9%bR4RkEu~uv>q%vq5SH(selyQ}|aaC+AqB3Y3SH;F+ zDucFhRctJwGH4si#YQ@mv7ENCTx=|*GH4si#YP5|LEBg^HkMHtw2jkZBNNIvP1`sv zHkMNvw2jkZBa6zQZJZVxE2s?G#x}8$4P|VjZEO=8E2#|H#x}8$LuJr5wuy~ZR0eJ1 zzSzixGVVXMF?5paQ&+Sr;(d2iH2kC9j2d&SwRc6`qmz+u&X4FEG-yzCg!|iXe>!#Q zr;}rcPn-}xAufLE^w{CQA2Iw#&wlat2Q#LOn-M$xvzT2GuF>(IO^Y4=^uM1lZBpFu zPp1Ah;BSBSn?8vB-3-qr6Yj)__g=N_MG-SC=4IEj?Ini(!u4~{pXD|8tF9<_#D_y( zhs=9`g! zJY=WeYkx96HX6l7F_h6r+h`OU zTc`}$Mx)p$p)zP2jbdXfl|kDm6C0&aMj36ROl)kUGH4rRVxx@8ply_ijqOwhZKGFg zltUT4w2fY|v4hH>ZS;zb3MzxP(JMA~QW><3B(bpz%1EMZB#DjPR0eG$No?$)GH4q~ zVq-6rLEG3bHY%Zv{j`n!Vq+hbLEG3bHuh5)w2l2@;{cUG+gKwus-TQDw2d`l;~Zp)qlC6mA~sG_8MKWOvC&9n&^AiM#u+MuwsBW%G(j16X&ZON##t(ZwsBW% zG*cP0jk{vw9F;-aNEI9Bp^Q}8MylAjKxNQ2QpLtaDucF>DmE@r8MKWWvC#r$)X+9+ z#KvVRgSJs4Hd?6++D47oxI$&nHVVZ?8eTpR0eIMS!~>;GH4skV&fi_LEESh8$D1)1#P23Y}}_Z zXd4w`qnFB{ZB&Sj2UG@aqhDOHWJ0gEGmPxQ7JYOp^Qq}My1%8O=ZwFD#b<;l|kF6 z6dQA>4BAGX*hq#l@@N}*Vq-3qLEFd^8}q0P+D4w(m``QUHd@3+3Y5`8+h`FR3#bg* zMvK@;r7~z6En;IKl|kD`7aM6%MmlXHU2H6(GH4s=Vq-CtLEA_d8%wAR+D4t&NQW}& zXd886V=0wE+o%&88B_*sqfTrrqcUh4#bP59$|$C76pM}JR0eIMSZrib8MKXJv9W^6 zplx)Ejch2Ro3_y{Hday@w2f}DkwazBHoC>eDk_7vks>y7p^OyTMvB;2O=ZwFQpCpR zR0eG$MQnUQWzaUN#YP^KQBB*Z78_qu8MKXRv9X5Aplwu(jjyN-+D3uc$cHitXd4A$ zV=a|I+b9ql>!=LcMuFJ)n#!PUbcl@tD5Hb6(IGb0QyH|44zW>4WzaS{#Ks0HgSL?^ zHj1E(Y}!V)*w{#A&^EHg#wIF*wvjD1Hd7h2jV7^C3}rOYHk!o77Ak|b(IhrXs0`Xh zli1iwWzaUt#YQQVQBK<^7aQBC4BAGy*eIhiXdC5XV>^{W+vpP;DucF>FE$QS z8MKWyu~7|Ww9z)&#KsXSgSOEoHfpF0+D4n$I7(&EHZsLVEtHW-+sG6f$EXb2MyA*} zPG!(GGR4LTDucGsAU5itj0W09gV;DpWzaSn#6~@pLEC5$8>grY+D56^Xn-D>k~Ij9l7AuGqLqWzaTq#l|fvgSL??Hf~cHw2kv(qZ`UN z58JrBSZv&3GCV)!mDdNoG043y!u7H1BiC#0S0~Px{7G#5uxV4L#}6AlYNY!+uIQhP z`{{(~u~Xt>-uN&!F8;@pCdbE4j~f<0;@z=hM~xZ%?&x<$jvV{Fkt0Tp`mWpUdg*85 ze)y|NlP3;I_~xI>i^axWDg(B0d9m2IM`ge^E*}>gJy6DR+QxCQai7YdZ5$UHy;KHm zme@%6 z7V`IRT)suyxFt4bQW><3TVi7tl|kE>CpHqHjCr(;d17NWl|kE>CpMC(4BEy#u`!3r zpluuy8_7_{A=<_vu`!p*pluuy8}q0P+QuQVF`vqyZLAX;DNx2b+QvGuv4F~;ZLAX; zsZ<7SW1ZMoNM+DA+Qmj1l+jMxXcrrcs0`XhyVzJvWzaU-#l{jUgSL?+HqxPtEZRnv z*jP$s&^EHfMh2BZ+sG0d%cu<6Mx)rsgfbdw8;xRPIh8@%XcQY+R0eIMQEaTBGH4rR zVj~;MD5GtZiH((125qBEY~)ZGw2d;cv5Lx|ZS;zbTqvWLw$Up#R#O?Yjb5?wIh8@% z=oK4ZP#Lt1B(aePWhBuylElWBR0eG$No=g4GH4q~V&f|+gSN3>Y~({3`)M2d#l~7H zgSN3>Y^wPrAC*DdNEI9VsSMgis@OO{WzaTi#6}gAQA69P5gP}o4BAGG*f>OG&^Bts#$hUh zwoxcHs-cWR+D4(+I6`I6HVVZ?4V6LLC=?q z4BAGg*f>FD&^B_!Mje!qL)*v^8z-p@+D4ApsHZY$8#!X*6qP~SXcij{P)74p8$&0# zK6ORABHnjLMZ-Vp&4@9_T6p+2;r{m9pH7|n>Ezhq6DPz^h>M>( zJ$Cr-M-2bbvtPXZ!Hg;6X2eeaET$sTH9G#YX|cnf{`V86O^O@-$<*Hl{O!+v(+9D? zo8j53%{?>nJ^PMsF`W_a!LFCxFO47no7m6BkB@oTw`beXU$}nm`IC8efpbN2g9{#O$%-`6=v%e+lVwLS0N{z$es% zbUC3ep)%kTYC@fyP}8A|I{0EEp-yZpr7~z6bz&of%Ajr3iH&7c25qBQY-BeN-Bf4(JeM|s0`Xhx7b)kWzaTK z#6~WZkwV)@5gV(i4BAGD*!Y~vplzgxjW4JS+D5h5$b&MfX&cpI<4Y=owoxrM)=(L= zjcT#+6_r8TC=eU@P(}f5qd;t|r7~z61!7|zl|kDm5F1}p8MKWKu~7hJbkH_B#Kw9m zgSOEjHVUZ>+D3=i*g$2_HnPP=5tNZl+sGCh8>tN1Mz+}4L}k!6vc<+`DucGsBsPkn zj3(Mfli1ioWzaU7#6}5~LEC5&8(XOi+D5t9D1|c0X&dEYV;hx0+b9OG&^GeL#$hUhw$Uaws-cWF+D4n$I6`I6Hrm8S4V6LLXcHSpsSMgi zrr4;3GBRl!nPTG@l|kFc6dT8>4BAGf*f>FD&^8*xMje#VK-*{#8z-p@+D3!esHZY$ z8x3OP6qP~SC>0wGP(~?jqf~61rZQ+7rDCIz%AjqOij6Z=25qB9Y&1a`J+zG;v2m8l zpl$SsjbF(KZf=jk#0?ZR3#Gm`7#MHV%o6`BVmN zW1ZMYfil+7Hr9!a1ylxYW1ZMYr7~z6>%_)FDucGsE;iDjjCR^ayVzJnWzaU-#l~VP zgSOEwHkMEsw2dsWkq%{K(KfQg#!@PSwvi<^GN=sNMwZxEMrF`88pTE?l+j4rXcQaE zsSMgiqu9uzGH4r(Vq*oBLE9)38`)4s8EvCXY^BZtbMZIp?PRa6FTqgQO? zLK(fZjb5>_n#!PU^ootosSMgiuh{s4%Ajo|iH$rcBZ;<=BsRXJGH4q~Vq*=JLEA_Y z8(&cww2l2@BOl7xPutipHr7%Zw2l2@V;z-2+t@EQzNRv08*9Wy0hFZTE)gjDucGsDmFGz8MKX7v9X!TplxJ`jbbPx zgSL?&Hnvb1w2ch0Q9@cvJGl|kF67aQBD z4BAGC*eHiGN@yD;Vq*uDLE9)18x>RrZKFhN?4&Yi8+XOVE-2$JZR4)k*iB{7Htvdz zJyZs5aK&^Bts#vv+$woxNC4pSMljY6?e4P_M4HVVbY5h{bWQ7ATQs0`Xhq1ZS|WzaS{ z#YQcZ(Mj9r6dT8=4BAGg*f>sQ&^9{7#tABewvi(?>Y$7q+D4ApI7wyDHgd#9J(WS* z$PpW-s0`Xhv)E{WGMZ@{&0^y;l|kER78{LJ25qBRY@DGoXd4w`qY28Wplwu$jk8n+ zZKFbLG*cP0jS8`Gj>@2I^ox!2P)0v(qhD-XpfYG1{bJ)Hl|kF+7aNzT4BAGb*l2+= z5@{QWV&gKELEA_a8?96ZZ6i@^T%j^(8Yy(IPf(QyH|4bg|J5Wu((K(#6IdDucF>E;jB`8MKXbv2l;epl#HNjUFhYjVud8$D{|^!En4qkl5)rxT{fPKh7- z#)q+S@jsq4IX-rJ+_3l&?~WZiYRu?&N53<2XpR+ei@`NmK@HBSmb?p)zP2 z)nX$V%BZGoREv$dR0eIMT5QatGH4ssVq-p)LE9)08!1pm0d1o|Y%HKMXd4A$BbCaa zZ4`)&g;WM@qeE<@K^YyijSjJ~h{~XCbcl_`R0eIMLu@ReGH4swVj~^O$fj*%i;bmJ z25lo-Y-CUww2f@Bv5d-~Z8V9EOemv?w$UUumQxwDjV7^?MP<-7n#9HmDucFBE;h2E zjB?sWx!71qWzaUt#YPU5LE9)78>^@c+D4z)$b~ZcXd8WEV>OjQ+vpP;pHmsMjXtsQ z1(iYDNERD;P)0IsBUx;GNoCMBlEuavDucF>EH=KPGH4rBVj~~QsG@CDiH)^X25qBC zY^E&^9u~MlqC;N!!R28(XLh+D4|>D4{ZF8<}EbE0sap zXb>BvP(}l7qd{zJqcUh44Pv8=%AjpDh>h)325qBMY?MP8rB7`Po#gt|743?6-yIbV z|EM=3#vE(yT~YVwWaOLw?>7;H21Q4>zy0>7Q>T79Id=HO3Gox+;-^lJ9sc_f!+-Sb z7jJ(sW6HQ0vC}_`sfcuqj{j_0?C_`m{e)?g;)Z`R^|t|k`?KHlLG15lc)s-Ro*DVx ztG4gP$8<)x2fJQ&zchaQZ(=_iKR)JV*RwCb5B-Jf=bk^=YwlNFQSOKjhrAH;;*bp| zQ>$Na|G@oy%WoMnIOd(VhP%Jx@_olX#_>D$Z@u)*|MUGJ5r1NC*nb%ny(sEWS9*p! zGG_C(NY|L~hcRy2)G2YX?mMNCuA8Ni(NXSw5w4G2AGthZWEhlf%!kCTc8~z1)tH_`D^9}#PIjHE1jprNwg)tlb^o98I4gbQK4cf-@ z4gbQJjYQhU^9}#PnGM>;^9}#Pn2k!>#`6vT!kG=)#`6vT!kCRb+Q#z@|H7FK+Q#z@ z|H7D!7TU)14gbQK4cf-@4gbQJjda?^^9}#PnGM>;^9}#Pn2kEx#`6vT!kG=)#`6vT z!kCR>+Q#z@|H7FK+Q#z@|H7D!=NtYF>?6$Q8~%lHP(9!9Z(s-2^9}#PIjHD$O|}h>hJ;25qB9Z0w;jXd69ZV=t9K+ei}|l~6_+Z6i%=?4vSh8);%=Kb1k-ND~_e zs0`Xht=Oo7GHPiXwPNESl|kF66&r`B4BAGm*f>mO&^C(1hG%2zkAMAI+#hT^IZuCY zu-mt@^-#y1tzQ}Jezt-3>rc0GE~0G|iH##v25qBAY}8O0w2dOMag@rSZFGr^S}3E7 zw$UXvj!_x3jV`fqoXVhWbcu}ZJZYyXQ&L?#xAkZ1ZC`^ZR`>oXQ>R@#xAkZ zOl8nEc8QI1R0eJ1vDi2dWjv;BJQf=ls0`Z1W3h3O%AjpL78{qS4A{of1hLV=WO#nc zE3XfFV-WVa_|gQiahb}1Z7fX?8?96ZY-4GH*tkMv&^C69jW#G_H*I6L*tkk%&^C69 zjdm)7wy|4mT%$5*8>_`e2b8gzwy|1lT&FT<8>_`eCzV0lSS>bgP#Lt13u2=S%D6z= zxF9xeQW><33u5CIl|kFMAU1AO8MKW>Vxt?%SVY@cBsT6)8MKW>V&g8ALEBg)Httaw zw2fn8qX)`3M%y?hHttgyw2fn8qnFB{Z5$ID52y^<#zwKx2W4!eZEO@952*~=#zwKx zPi4?HHj0f$R0eJ1rr3B4W!$7~+!PxLFCt%=TzZqXaZ_x}q%vq5H^s&*DucE$S8OCg z8FOhHbH&DNDucE$S8OCv8MKYLVq*@KLEAVeHj<%?gS3rP{vi-##OPgh{~XC zTooIOsSMi2Rk5*z%Ajp57aQqN#&X)majwe2K?>Me$xlBznc*o7w_(lc<)u)UKBCsBVt~5J=er;f*1g2@yBF4f%QH585w6)0 zu8&-vF|rM1c*Eg#`$m1p(_K9lyJDUmfJ5#?#SC^ueCYNL{x`q)$oGqvToHffjvO*5 z#xjg|qL5F`E-!w1#(m@2@k-Xe93;~_CUt^0y8d49`g>IU)APHD z`s2|tzN6YZ-95HoAz+#N9;p8K(`CU(z0dvqP|>~-vee(F>dm=7{_?Z4UT?Vdy;Qw9 z_s1RFKd_C7W$qtP_2%4f@^{~73l8SK531i}&iy7==yU!<+fZ2M{vlOw&iy8b^}auN zeLq!i&i$q*?wzjiyldg3DC@P}BdXq<`&<6*AKRuonES_2{Vj9uZ~4~ygl~Ow25>qG zEbbG&#a!p#GUxu5gZr7bF|pLor0UJNzvZyr&k9~Yi>f#0eqMCw6)!P(eIisp&z$>t zF2B2+*|y+dXT8}}y*c;uUiPa`8en}ARd3GyJO}r4Y(rr=>&>C+&AC71;66EceKJ&k z$ejB_Pp;gUAm`Bx{bAM>C-&t>ds9~_U zpHJ1BbHDD1`vD&LDRSheK=td)xnJkI&R-B}$^#ww3#fW??$tQZ#U<@-RFK$xb=&udUNjEee3<=aO)RS_2%5S zJ6z{4v5kpkyI%FGt@9x&U%?peWN+|jlT7Md8p_>*ZIq-dUNg@eeScu zt{tBwzocl6|^*-A+6qfpIsJ_gc`!b*Vm9|dD6>lX~Z_a(0 z!{>N8wlT5HeGXM`&V8A0y+!RuF3 z_2%67I=KHlc>U*8y*c;24)^z81h4;ssyF98$^SY(&lW7FKa2Z3s6NS@`y}6b|7ED? zK-c+SQuXHCCpoxZV;d98+^?bP&ACtVzs~>4798xz|B9+N=YGG#dY^9_3d`K*L-qU3 zx!><|zt+|%Qy$3uTB_ch`~ANA`*q>gucPYCx!>=*&i^{x`md>abMDtXaX-M%a0}#9 z%mS!>jXC#g{J*caKGZ>AIqR*b>dm=d<8xmaDmu_vuaK%Y=YEaP{f2PsH&FHF+_(BZ zzb^{6z6h#sHRrz7;r@Q3ZA>igH&XTH+_ySh=WhyLzlo|h=f2hFezUF9@mg;)Rd3FH zhR=O*xb?+QeTF&r8UE}2mQcfBaleJCH|IXXx89e8iVn2imr(WQ+-HRIeZ8%6rMs1? zH|M_I=e{)Dxi5w4>&>~ZcUbSY*~Y};ej8P9&V9YZdS4d2zKp6j=f2+IdH(j`_1md> zbM8xg?#peRj#oS7P<@Fx_a(mPdOO0c-$B)zb6?_H?<>Nsub}G9xi1N4z27P8{Z6Xh zocp`sJlESL^}C?@yXM^Aby)9r+dNp-``uK%Irn!R*84rd>-SLg=G@FnIkzs@|OY8h`hPY{9|KdWWcbbM9;WzqfOE zfc1x|dUNgz9o$#jhQe~ztA^?e&ABi1U+<6Df`c9TN2q#p?h75R^J{EFVVV0Hs@|OY zLWlMKXz==@RJ}R(oeu76gV)zW^_}M2cRH;1$AZ@%qw3AM?{si~Jb3+as@|OYP6ziV zg4ds*>dm>&ad2N3yuJ>q&oSpd$HD!{;Poe|dUNh`9NgCjudk=-&AHEc;(mbNR6Qk6 zv`=G-?sxNitP_YF{evpM(8{@3}ZZNb5=^G{Rt=G-?sxNo!#h2=WGk*YW6zS;ME zy)(8>ness2S~^43n{!{`;J(Q=CYHHxg6b>Gxv%hjet*{1={Waisd{tnD}0~dH-}r_ zOx2rnU*Z2;?_8*1upIg4sCskm`+eWrIUg!I&{^+1RNrsTeZSBBg>dUHQ1#~A_dBfj z7j0u=Ir1-3_2%67JKW!23SNJSsyF98@yU8Wz`L9lxyxyR>J!bmPxQIJY@21r_5Lzd zZ_a(9gZoz7m{{DmQuXHCCpx&l61@HjRd3FHrNec8Tk!fesJ_yi`%3@&`>VEKIRRLX z{Hs*GIro(g?%QocVX1GY>dm>YbZ~zyc>Oi1-kke9-*tY6ty2#8K-c*lP<@^`_j&&A zuZJ22%aMPbsyF98&%u4CZ73}Dom9O!_jwNPZv?NuLDidc-{P>|cLlHSg6dn$xo`1z zf72E$r$5V)f0L>==f1_k{Vm&2Sn6+4_2%5SJaIq3JKWoHhkKibaU?0efRfwLPZC<+POp3n{%J;;Qp>{Of2s2QuXHCr#r0o_k!2oqw3AMuM6k7 zUXPshdZ79`bMEVW?(f@XSx$C=+~23_&AG30aNlbi6N~#^s@|OYI{(k{9@v8AAh6Uw zpz6)JFLt=j@3ResrM?fUFE;1C*x~zn4};e~r0UJNFLrR>AH2SwsyFApIGpSJN3!BQ zqUz1L@Af^{du*F!IoSnT@g76<-R9hP``_OuyztFm3?AgR&vMpFc)|VguV0J%!^9br zKZ%VWHf`$k_+g_*jhz18V0ZLS#{G1{^w=r!L*Mu?HZK0hlP1T@Mvff&y^$kEjr#5@gWWIvY}^ljHEHt1*Au?^=W@3>_uUTeXWE9sQa_WbH|M@P z9QU(cu>M`hv#5G=?o)j36K%8X=sppuPci2{#bLdlZ5tEIkw2TNH|IXZ!F^Kj`Xs8} zocok;+|LpBbEtZA?yLQu>m}Q!TTTF$BR?6cuQunt+QI!?+fZ2Q=Ti0N+*doepBKD- z9#wD7eYNlV`SWd^a=-_=(w$G$n{!_f&U3vKIqRiB^#$hK7x-W2F9>xISlln5>dmNx+;==#?}OaT`_A;y zu88;DQ9~9<{UWN~ocoR^?gv=ESn3y3_2%4n_}2R+p}6~w0*m`4RJ}R(*$&V1(`{p7 zsZWRMv(35BcDT-88oYifRd3FHw(ots3|prxbb*fi465Fo`)r^4W#QH@qw3AMZ*p*- zX&V!Z`%I|5$(;Kp2lvZ^*Dt5)&AD&#-(N4w7Az+K%iL#C_2%3+`CsR+7-0Phs@|OY za{uRg*#oT4hU&}Bxi5EcztT1omLq>9Rd3FHxo^GCv2_afKn&x7id&ACr@aQ~%kOe}N% zB~@?EeX{>`{u*1b90Zp7HB`Mh_sRa(`CkpN{wu29ock(=`}=&`P*~abMEu~uk#BA zSYH6u=bLk%?|Uz2eW>U_NB(-M-kkekFxRbMEuQxxe2a=ll&+y*c-74(ol9 z?TE0P^@^bSHgoRV{J*!e(H1OApQU~yRd3FHo5S~ZHra;4Qoo6+H|M_1_c`8XTc=EU zpmY9as@|OYOb7SHwlT5HeKAy@Y0iD7zxyq=U^xgZ^;@WVbM7;J>wQV6=s-t)2~}^- zeWt^6y{)z}vCREes@|OY1_$@0!Rt$*`UZ3E8+_OK+iab}Jy^os zZW&c?&V7S}`|Y+P!s32ARd3FHsqcAyxvkUj$S;TLOU=12^|{{>Zv76b-kkeV-+EsW zZhZw+Z_a(G|8u>ap@zZYekWCL&V7%=dcVsy6qfp3P<@X%_dO1uHx7{ry3z-kkf|aPIF9$vOWJRd3FHt;7BOVcQX5Ir0xv_2%3c`R?yMUsd!R z>`p7crM?=fFEZ!8$l*Hwh;2+P^+%|BbMA}$zpqzg3zpNLrM`x$H|M^{cb$JURCJ(o z{!yylocpeDuJdc
    vocbRkF<>3C9?TD}(`NybwbMCwR-5<9F2XlX%syFAp%VE7g zVH*m|+@GN8&AHF@|Gr+GEjZZR*Fp8U=G^D{uJccZiVk$nKS|Y_bD!&bo?jnseLYog z&V8dm>|<-5Oc3U}_Cp!!|r-0uqKy_~Z$_h+ekbMANf z+&70i_svwjIrqDK_xI<*tv^TAn{)p-ocsIpa^#K~hP|JZ-MzYyvmuw3h1pz6)J zf9!vMe{q2I7pZ!4?jQTyUkVi+=xXN@Rd3Aw(gcUk@mg$SVmb0#n0n72>6O<9y)o#G zW39a_>K>hpboWKLW=FU_a=qq$)%LekxWD7_{UsGKj(RyFIrqE6`TV|JuJzifdUNh~`+sleTBw7-a^zp5>dm=d z?Xcc=*oMMV-vQOHHs^k|!{>O{gV$fD>dm=d?SGx$X$uZ^&hMn^&ADIgu-@OW4Ta^% zzd_ZTbAQ3XeOK`ME~x&3IrkSF+}{jdf0L>==l+7f`&+i)VD4{G_2%4P@ZI0v4iz2f zTJJVhZ_fRqaGvXR%e7uNRKLia`$hiG_3nf^2rTFPJ5;?n_ltbb^Y4a=4s_(-rRvSO zU*xdf-?NR0W$y1$_2%3kb6D?tg4g#z^~cP)KjyIB-w$4YpQ<?;nO52Fp4BAyseA{YHo9 z`Te$`u+;Zc_2%4f^nb4R$QCRFEcK75dUNh?I;{7PZ9`$He+<>%H0S=N?|FX0i($T% zl<=ba<6plP_Xpc2i__m5?Dl=MIMnf@#a9NqpMA9W`qPgVZ<=#|Go1B)=8M+90e&V` zZ_fQq-~IipaOZv&Rd3Gy+;Hyi6J_obq58Sz+|Tu2?`MZP2rOs4*;Ktb_j7&s_er6m z16}JSQT68B&vm%JpJN*n%iPbQ>dm=7=)2BOwsp!yPN2C@hUyQRbAQm`{(i1)Oe}Lh zm#R1C{$M!k{X9A6&!g(ixj*QCoj>0;-9o@J_w%WGbMDvr+^2+!4&*)ss$Xl){aW93 z{(^Ap7f|))+^_Ym_o?C5r&9Ii+^=<5?-$y}#NvJ-Rd3GyRiFDbTc@1t0=Z9v>aUt} zf7M~VUt}8-%iJ%b>dm>o>hN4|aq#-ZRJ}R(R~_6h30}X1syF9;xx;h4^x*aBQ2laq z?w9-4`=z!{$Fts2s@|OY<^JE>$p|$Jma|?4Rd3GyatHUzY(rtGUq;oNbAQ^weP;0b zOsM{}IrpdipX)8R1yk|LPZC<*2|*m&AC7Ad!D}{-1-$%y*c;W z{NKyT4mAvxBR?Ce-)7GJHizr{mA0X<)UTxK&AHze&U3vSd9Ig3)ths_&HuUHD%*4i zJLj*W>dm>o@37wI+J?e%5_igU2pBr~aA)lHjc=z}jYA)~1sCVu}jd}L2@teQ> z)0Kk;MMuVL-WKT^6aU$?*x{c}o%-qI*a_1n#SQ;t>TlnE`pe-HC&W*Pi=R3@cKGi{ z4F6!plyNg+r+@ZAY}~Y|Q{rOXcS<8&H%lX{X&e1w;{ugI+vpb?7pV-|M!(p&L}k!662(Rfl#xi=NE92FsSMgi zqS$DqGH4r#V&e*xLEESl8*NZVC2gZpY+R)>Xd9Jcqn*m2ZB&YlYg7hpBTsB}KpACpKGJvR0eIMPHgl*8FjRcI995#zQKDwoxoL`l$@sMzPplBf*YMvB;&LuJr5 zs>Mb!lu=FFs1_S@sSMgiwb+MHBR0eIMTx?`R z8RfK%aRr zZKG6d?4&Yi8$DuU7nIRM+vpJ+yQvJ?MvvIoLuJr5dc?+FDucF>CN?Uej5OLtn%LM! zWzaU##KwLqgSL?-HV#l3w2fM^Q3Ykx(l%UI&^Efn#&Ifxw$UXv zPEZ-Nja;!&2W8~aHgd(rNh*W3kt;UpsSMgiuGlz5WzaUxi;V^-!}}MBaDT_Zd9iVt z%AjqW7aNUK25sZK*f>LF&^C67jV35#7rk!m5*uf!4BEynvC&Lr&^C67jdN56ZR4@n zI1gn!rfobH8yBby+QwtCagoZPZ9Enmm#7Tb#?l0_(ZXbSehT}Sfh|oC8<(jJ*v8TX zvC&Fpz&4g9h>a^$25n=v*l2??cGEU?i;b&P25n=v*l4FRXdAo5#x*K~wy|1lbU+!a zX&bA>#&s%#wy|1lbW$0#jn!h~29-hExF9yVpo|N&jSFJqCY3?kxF9xeQ5m$23u5Co zl|kEBBsRLCj77AKMPlO)l|kEBBsT6+8MKW>V&fi_LEAVcHhQ3pW3-K9V&guQLEAVc zHhQTH+Qu=l@qo&pZEO@9eNe_m+Qvq)@sP@(ZEO@9{Zs~RW24x3L}k!6Zi9JGdhraP)Y+U@0 zCryrzogOzVe#EyKMZE8hiiUsGn^9wq zwf3&4dvr4L&9sQyg9Z(Xj&Ohb?N6sp{d98d@QD-RC&a~1ogO><_alb?=-Dsc{$R$G zaWi74e-^VZ!ZkYnvuUxzpZ@m~rcH_){>jwe2K?>Me$xlBznkIt#Kb)};=NaGAFRa8 zh%cX0R*bL$??EH^2DE_luWY5r5{695N{8>B(`(9RYVI3i-I!^GAB}=Fl6@PL$Re zgn#Cnk$;@Q#Sy>xClTKqgO^Tx>n|{cUwVG`>J#7cOk!_@D-oH*1kbBbf#=OLf$u+= zK$+uTx&GYs^xs4eLZ;C32YNb%yVFiQS?kF5mqWKl=<#i7kAT{rUgEuBqJ40TkEnO5 zBQ;kJ3rl+>)c*9k&_uiEO)ZJD4@8s>gr%K1Z6$bL>N3*q@|*W>X|4>7rTtsXyze&W z-SbYE#M$Tl1sw=WJM$%}gx%)6dtM|9+5TeCcIE~uVYfN&p103Jw!aj#{Uzp@_r9oR z!n@~{HSg*D$sjmgtyzxwXsF$sch7rnA?N+czy9NhSlVBP+Rb_Qyc`#@-FEG6X?H>G z=Dd5}s0-QtWM~4;`(UQs`|_O$@1EE2?919KdU}(gvCR7`P`f$ro_F^o&OQ)-Ne9Bx z{+FP3bKX5K{Do})%R$@!GSqI)yXP&zknOJqZGV+%_r6eQ!n^0y!jSDlg0>HV+Rb_Q zypI^N{k5R&uR-nRyf1ok|NhMr8n;#k9udpA&JDGj^X_@`F>s~#O82kmh*;YH3e)a= zNz#OO&+C%j%D<|Wfk(vB{#T)PbKX7gSO%{2j>uor5wW!YHK^U3_hbIg>;Agt%FtNa z|2oue&b#MrPVWr=jS$J6|K#Z>4wm-6!L)l{{50X+^9rc9{q->0Ux(VwdH1|GDhIW1 zhP@;5H+4iT^ZqxXc5~i0`quXttqeROmi8E^-JEyN8>-$J{##+T|1GB7`!cHu@1ECM z1JAH`MEs;yXRe4Z{^?7%D^LHY5zM=yE*SS9q!-%u8xSM{qI8U=Dd5} zx(z(T-lO5~1#SO(OuP35T@&6tuj+K7(*6&jc5~i6?<{*O|B+S(9uZ6XKZ4rLdH1~N9JtauBL7%N#M1tcp>}iLJ#R;Q zE5EIkfk(vB{%xk+`y#ao@19qx16O)SHr(Xh<> z_n>xj-aQ)!ggnoW(t)tFkAm9GdH3u#;GOQ#S{ZmmEbXJ2cJIanCcJyLC>Xf+F;?$m zpmuZKJ^LDX=Y8xz2Ej7#W1)6)-aVTk1U|aGBl0IYB9`_)f!fV^_w1k$vi(nkw*M*9 z?%iy`g!lUn*Yp1@X!}2d+Rb^t@9@0tp9gLK=TN&j@Av($=l_M~3JHt%e*v|d^FI5@ z`Y!h}-yc}}_nG$DPd8UF;eB>E>-)d7uJ8X6YB%S7wr_p^L5S)0EO>t3tNa1fZqEB` zfA8;w7zB&=_n>xj-uL-?|KY&ef5^1&Gv|Gu!}|Us9SF<3{|IU~=Y605{rkVt++b_m zzk=G$dEe)7J^y1J2+O?x7-~1?{Y!`I`F|a>{a-WfUz+p&rSJRm|3)k2P!F`m{Try= zocAxoc|QNX_1flrsNJ0RFa6i|KMQdHSiJuk)NaoEW#99&@umSnfA-(ykB-$ z-+!VbVwv}!K<(zdUv{{E|93&#{~gqB&iiGD_5I%mZU6UByE*Smecu0rRywY6|AA>= zYR>yo|NSn08e$ME-hT?UoAbWZ!TUHJ2uu4osNJ0RrBBxPZ{C0ZKr0=O`46CWbKXye z^L?A2S+DDU#c2tl=DaWP-M^0yvwb|%zQCOK1rFZ-yN-y(`+tYp&3Rw&#QOl> z6`EkZuA2b0oAbWF;d$LJgU}iLkNB_epJ=X}^eo5xCrtYhbKZ~m-@i{BSo=h% z-JJI$4(ofY4uoaiW1)6)-j6)-KEQL`pIgs$e-5>q^S<7itC_TNJ7=Dc4I$NLnk_bE`jIqxgOc|JeY+CG(O zUun+!N(b-Lf*$kJpmuZKS2}qAUC{R5LG9+euYBS?z~A@ixs3hhQ~BvZ+owbA=DeSM z;yrkKT+sG7rv0or?`Ivn#|LeXhuY0~KkMLqM$q;dP`f$rXC1u%K4|;zp>}iLclck= z{}0U#c0K<;nD!m!yzg-E{y%jfEZ6h@6KXf-eTV=1GM{N~uzCLsYB%S7hwt;a{}m!R z&@-$51+|;={?PZl?th2b{=b>_hvvLL^nKsv|Ag88f1q}A-XA)w@Bddv#B#3tzfik5 z?+?SdfB%E^9`+AVyE*T(9Iog8U(k8~e@y!eUJX;J}zoVg6%Is zpXB<~_4HpwCqRD-`YgM6us)c0Z~e^R^)sn@yLqra*fqVkepc}MSya7UJy*Xsn0jx0 zV(|JzsD7_G_j{kXcltcmI~21cVl3`wQ}yQD?{#pW6nyTJsCskm_YP>S_s;zsnfp0Z zy*c+^SnlS%jmfs@mfJeZk)I6Je__u37Y^(FT-#7s>gQ7R=G=er#Qgxzdh_JSpGVc3 zbN_|IdOtt-+|Q@#&AGqi?>@yA9L#+RRDa2w`%9MZ-FeS?3kF!ffT}m={*uFbpK2Ql z%ULg#syFBU(%=~HlrOY($^q|tm}jY9NY$Hjzr^7>Kg~8KmijcPeu+8vOI*+9evz%y zaqbsU_2%3!ad5xbHYS$2Urg1TbHBvreu=HqaqgE;_2%54aB!b)8xzair$hB8%(*|| zaGk$2c>PkU-kkdrFF!l$W!O3$kNga(-kkdrKKIMQtzSmfn{&U}!F{G}Of2p*q593{ z+;8@+_seaajz|7-s@|OY%?|5*mTgQdbDu@kn{&U};r@O_@cI>0y*c-{9o%OJug`|+ zZ<}*}+vk3zt<%x{N~+$R``f&tD}^HCIvf=G@PBaGz^CA}r_p zT&RA&IrsB@?pND79o?^{>dm>IZ~2Cv_Yv3Up@zXS_n%Ys=G@QsUFUxhDmu_}y)USG zbM6oOKhMt#H4K)y&x7g@n{$8I;r{+h+fZ2QzohETxj!7v=Xh&m#alzwn{$6Sob~=I zssDrFv)qicy{nrle*9Na&OVyim|Fy&Y{kq`w>!^Bj?!R`pzyCUT z{nu2zIrrBb+!q9|FM#T=nR9>5cb&i9*6Dbizn-c$=l)ta>wTfDc!gBGIrrBrdtrL- zayEoI2rSon8>o77?pHXtFR~4VrM?KNUt!Mu3ZMIpwoXU)8>xD8?pOGp>un0ReiK!1 z&ix99^?tK$Oe{zKW~$zt`!f#ii-Xq}L-l9Oxj*A@oxde`{T8a;ocl8l*ZC#E>r1G5 zbMDXhp6hM3bvmB&w^H@y+;4y4et_T6E|qJ&QmB5rIrrNg+;6iT5teiQHmcs7`|ZB< zzRcF?c;uH+_2%4fcUbSY+s4E)_uHv@bM7BRKl{#BxvkT2?#rS22j<*AaB#oFHYS$2 z-$B)zbN?Wm`}+zx=T}hm=G;H(>ezps*W zeic;zl{xoc`P?52ckT~T_2%4v70!BpNap?!Rd3GyR}T00hiylMWyL#8)thsFB^>wF zGWXR`{S|ZWulT=zf5bN3vhZ8x{s>iX&ixgK=lM0Zp|I4~Q1#~AU-7N?M{S)l<$l>i@t>)Zsb#Q;$HYOJLr>S~#?zcL)Zwy}FNY$Hj zztv&AKNGzE3{`K={XK{0`Axy=o1pr8=G@=&UFV;*bvmB&&r-)tKb%UQ3P zsyFBUp8x0f=WM}p5LoKZQT68BFZ938KR>|w^HBXlbM6=V++PS49q3x`0#$F${Xz%# z7j0u=Ir1-3_2%3!^j+s)vULjgKy!bIsyFBUsP8(zCEWTJsQ#!q_ecGo=U)yr43;DR zGF5NR{n01x2lyTBR(Yn|O4XZlfAq<9{s8N*Nc|P6-kkdl{_B0)0FV4OsD6Vv_Z$4( zUmalmRjS^c`wb53eYOtoJvldUNhq`LFj~w&|ABpT&I_ zRKLoc`&AC^Z`y{!Qh$@GH|Ks;IM?~N6_gwEzxb=6adUNj2`R}WFH`Fj#&U$yLdUNj2Ik>-P8wyMPJ*wWE z`-AIh=G;H> zJ=c2_Dmu{IKcec*xu5Ca{;_RLEOY-Ds-J1j{Y;1TJ|Qy3a{~C|XFZnsgh=j*PL) z{cNhdm>o=>Prvg`tMQQooR@H|Kt_|MUE`0oJEM^^489UmVVL z{vx@~UqscLbH6y8&+ivY{r|__oySFW<$2t{=LtLJ+ zz}x3{Zk;-HpLuYBYahZjx=W{>Hc&<0w+k*FWhdUcp&*y%> zX1`C7#zeU4rGV=9^SR$|JkL*+I<-4qDyp8({eJUvy)j zGL8EzX-tH<&qCGnx$n^I_u1jVz+xAzqwLK=DrYAU(Dye*u38tNz<*`?~73NeC~_Q&-IFZtS?5@ z^SLiJ_WQ-&qCM^Ri&6D_?#~FS#EmoH4-v&gX5z>sq_A>hCuMJfC6v zp{-A!K%3*am&S&Mjvem|88bR!ba+H)m^0*?!69!y`iqx7m^5MJBxl&<;F19Qu!zYM zogqDc|LBS1!b3g_{mSDXfAohwaDF|>^<7>^yY0=Vq_6x2yWVZs-~Q-pyaRt^|J3y# z^R(kBdtZm`qoBuwp9oskkXZY;;|<5__Wq83qegw{oIGk&(175VUkGu$Xg9ts9IXAe z@C*GO{xjbVvVE6tsBfKj4Q@d2mHNK+;noK&d}8Q?aHpfx^{1EA_YLgp*kZGPV*l9g z8XjpBt&N1kVGMXs&-bQN?BcgHllx-6ot@HiwFhSO7c*v|GI}oCz>EQ6#%xqZ&-E9V zQ6DH~q<}K&doHBF3}Pb{m4UWVPi&;2GSD{aiH$j^4781MVj~@tQI57zPHbeLGSD{4 ziH%HD2HHkBv5|$!K-=gfHnKq(ooE}K#6}J(18t*|*vLg?plx*@<^Xd4N{Mjj|5 z0c|6J*vLm^plu`&8}m>ZXd4N{#(Y!;+D0w0Q2@%QMcb$)HWr{V&^Bs`jfJQTw2fL~ zV-YF?ZKH(PCzHWw$Vgv zl!G#w&^DTgjg_bjw2dZWV-+d`ZKH|USdGd++o&QoDnJ=kXd6|;MkOi(ZKI0VScA$y z+o&Qo)}k`dHoA$8Do{o@+D13Au@04iw$V*&tVd;_ZFCbG8&DZ&8?nU3Mo>m9+D0s~ zQH{z#+lVDLHlZ@mHe!j5&8Q5tjqSun4Jczf+QxQbV+$$+ZDTvJu@#kpwy~Yq*oMkL z+bAS9wu3SX(KZT+jUA{Aw2eYyV<##DZKIIb*oDeK+h`*;YC#!oXd7+B#%@#w+D03( zu?Llbw$Vmx>_uguZKM+$`#>4#XdCIoMja{xZ6lr7*pJFU+ejxi4xlp7HX4bIdQe6q z+D0R>(SXW8+h`;<4x%#9HX4bIL#Pb2jS6C;5tLDZwoyTB97biJZB!5&M^G7P8x_RH zQB(%n#w}u_36yaQZQ~ZPaSWA#wsDKtIF8Cd+qgw+oIquuZ6pyJCqWrWXd6ky#wk<= z+C~zwaT=9@wvj|^oIz!vZR{g9nn4-+&^GoF8!e~|w2ghlMk^`I5^STz z^=rC>7usFNWVrqt>Y_+(4-@d{RKie8?Ij)BxFoQWuR@i ze*ce<5rfJ=+i?9NA0cBJDg$l9^&5GFj95@c?=RWG*>L@89U&tQm4UY5`W-n!Mm#D5 zZNv4;ZiI~Ks0_3X*Ke&6G7>-;y}yVCXT$aDXM~Iys0_3X*YA-LG7?c4XdA9y2qR?7 zL}j3DxPG&XkdXw+=>26aI2-4Qjbu~?+QxZeV-_j{ZR0$#F&mYEwvkC}q<}Iq(VuT* z5*w+g4780*Vj~TefwqxJY|KGrpluu>Hqt>EN6yAvSVQ8E6}8h>cuS2HM6NVq-2U18t*=*vJEAbfIl@5gYla4780d zVq+dE18t*=*qD#XK--8RHVQx)F=!hx#Kr=BsP|!GSD^_ z5*y1<8E6}=#Kv+^Ml0GzE3r|E%0Sy_B{s@X8E6}=#KsC#2HHj%u~81nNJHC5BQ{o| zGSD{Ch>caK4780jVq-Nb18w6Vu~7laIEc1!kl3h1WuR>wBsSKdGSD^-5*uq#8E6}; zh>a>x#wxUpRm8?RR0i6{Dq>?jDg$j}6|u1am4UW#gV@*z%D92Haf8^XMrELF+#ohK zp)$}mZV(%rQ5k3(iNr_%mvZ7d}=_MkG*HkJ|_ zdr=u^8<&ZVeV~lXXd9P_jXG2Y+QwyKV?QbbZR0YraR8NpwvkJ0)Ppi|(Kd34jRsT( z+D0z1aS)Y(wvkJ0971KFZ5$^y8bKMy(Ke0~8;4ODXdB0gjU%WGw2kA$#!*xT+Qxcf zqY0F;9&KYiv2hHQfwr-p*f@^LK-*YPY@9%4pl#eEHco;v?xAhmBQ{QbI-4781CVxt+95skJHO>DHFGSD`niH%lN2HHk6v2hlafwoaYY_x$gYS1=n zh>dfo4780JV&gn218t***tmeoK-(xFHZFoP3eYwRh>c6A477~`V&gI@18t*#*tmkq zK-*|0HrhcM&1f6V#Ku)r2HHk5vC)CbK-*|0Hm;#E&^A(tjZRQT3fe{rv2h)hfwqxC zY}`O)plzfO8#hrIXdCs!#w}1rJ=#V+v2h!ffwoakY;>VA&^GFcjXS6ew2g9NqZ^b_ zjHfo8DSWreS+D0w0 z5r@h^+o&Zr;!zoB8@0s7bW{e~MhUTz0Lmyq+bAJ6W}q_AHcE(%L{tXaMhUSo6P1Cs zago?a0%cr8+qg(?UDg$k!o7l($Wptx$bQ2r-s0_4?Zen8|Dg$k!o7k9- z%0SzQB{m8`8L?;^vBbs#R0i5cEU~c=m4UVqOKdDcWuR?rCpHQ}8Qak|wi6pgs0_4? z?ZiegDg$j}JF&4Cm4UWVNNkjVG78Z)3W<#+s0_4?LSkbnDg$k!kl0v;%0Sy_BQ}|5*uq!8E6}g#Ku}w2HHjiu~7xesOYgVaGZUt zJi+{I7c6HoUI2E35u~L%{P%o_lF*Xz1AS&X6&qBSwcugoZgo zz8M_y_M^Xe>4QlVMow~uO%ARMun&uvJkc4_^Y@RQI4(Tov(T?R{_#hD=mY21lbqoZ zj=Q!upBgi1{AbRHK@&s6A_ff`Iwbg<%`w2<-_dW>s4tz9M~w>ZZ}0cv$hSTpH-5~( zAK5>3{g*xMc*@?_Vf!fP@!%(dmNg{SKJIwK@w)Iwf(8V?{6dK1MSI|TBi|n#=9~}_ z{Om`rf89Id#z#2A!UshRes#o%p~Hv0I_%{kLq@zdWbn|Tue{Lj;Xm`;AlrBO#{J9h z*!lzp1fMzZn0>hQ5e%OgIw9QYSm658`3D|zjZ2r!9&NLKY1G^W;u!r?GRJSei~ zfA-*h4_9J9P@mudcH2je-f@5U7atpc;rd&?@BUjpxBt}M^Y;h#f&2hOWMt2U_u1;C z1MX#uZ11nA>>Uvs9TB1325RrQTC=nd@ZKJvY!3jn_gvyR+N%S-xA#@H_r<~$Ht&F8(^?Ok_y@4fIn5iDHmo&c?RKJWP& z@BQd%5Ze1;+VlCm=exc4Y~H=I>mCioN)XxuLG66r^ELas`~Uv!KnU&qLG66r^ELas zUAgA%pmsj*r!?NBOQA6D12FBU_`IJo_V*|03~TrICqeCe-cL!OJty~G1%F4}<9-L! z&gcEqfQM%ra-IKOFAG3e^S=vf=kuPd@%|JY2%-HcOnWk)_hk2l+{2xE?=lZkwg-XQ z`Mf6^=lQ2;rDyoUygvM1*z+sGZMyUErhpxZk6d+IjyyOnV)l_d4@= z{`b8Mf-vvD4{GQ0UZ*+F{{bBcq5Tg)?R?(rG~WME+5U&1c0TW=-tJiCQ_mlX=ej?_ zw3qUEFE!5d&(ImxUhAF#wexu|wQ`>SV{zX97}U<^y;QnM^*r6ZhrD31Js8x^=l!a& zzyFEFHUB4=_N#o}uSySQyw3Zdiu3-bpmsj*S2g?lpV8GI?C*aDYUlHQ)qVf|@N?Kd zrl%8%_SE-!TVof+IR4I-=XpT*K{C+_P++T^LgK)@%}f; z_P+tO^LgK4%zgKPR?7hiNb3^IoL!{=9O{KM!i>^Il{;pZ^Z6 zB$s#N0giC3`wpm`&wG((e}93Fh|vB5sGZOIIn8tkv0&3^;o}uyn5?u|#ntut?p26om!@a-% z_Df_$x3ScFpVPi9&il)tc0TVJn)Cc%<-89Dwexw;(CqI+lt<;|D{sGj^=l!$S;q$&lv%kMh2SS+lw?XZE-nUq}fB$FkKJK4E?R?(1 zXuQ9pocDJ??R?%BXwLKhqHO;cO#1>p?+c9m{aL!jrV_}10l5k8>YR5&wGpc^Y`~?u4>-j1GV#c zZ!ymE|L!H()3xs3LG66rTQvLo59o*pYyJnIc0TW^#^-VWK`Y5p_cZVSfoV_W^PXzu z^SJlLGwu7Jc0TW^ZtvuK)FZtt0AbCK1hw;dPt|z;fDVMv{sE|+&wGPrfB#U~{voEl zfzNw`74IL3-ai7h^LcMDd;geDHM!smYyM+UJD>Lk^LhS1eQWr)%ARgWCDLUpMdX|Knv4gn9oTP&=RZ>zecY zkLf@N?LP*!^Ld})_U_~NDo2TD+EJMH8GPPn7~cPv&al?||AN~2yw5P6*NwK=J{r`{ z=Y598`%mbI2;P4JYUlI5TeH7^rfmNV)4rR}`)=kR&YG0yW7Ew)bt zwexw;F+Z>S+RGpa`}^0Rc0TVp=JR}*Z|z~Ac0TXNH2Zrv9SC9G!!hm0_`Dy}d>$8} zY>xo7^Lam}InPg0wod}J^LamJ_WljcB^$l4=Dz{8^LbyV@%}S95JLOUFzxI3ysy)E z|GBdL=b&~z@9V6b=O>Hj`N^PmKJV)^_wT<@&igMw?R?(vnxEJGlIE(eb-%>4-{tdu zSL6LxbRdLl-LF9HeBSSx&-1^gxvF{pHK?7>`(5My{aY`|p6=hj1-0{epJMj@8!v+( ztoh$y+Na3gyKVNVnBV_CMddy4;ctcS8x$%167+HQvG$(-D=-rDThOP-;(_(56Mb)e zl)63&RWF+d)~ojT-ufx(`YEV-Sv{_Pv+7jeTOX~ij|SCm=5xPU<9@2Tek!V-&;4eN z`xte7462^b{pJC|y=#1$)Je`a#%3>E^QWQe`P|PJcKhC8i1juMLVYZ#emjGn)NAL0z8!sz1Z${*3!`x}KjE z?Y-*Fkb=p@UvNJIRnO=CjK+PUG!#O8BC4Lx{TYq>ndoR_>iSuzdOr8FjdT8NsgqplJ)QGsqw4wGAJDi@k;X*0=BI$_ z5AeA^;C8S3E?VzNAk`Ku)Tg5A`P?7q|L6`kO`2ux%1=Yp^SM8u+3)8_V$6bx zeD1Gl_WNvgeKx568lU@XR`&ZGvft;R>iOJXGxqyjX_m>Q-qT4p7gf*a{+ippk7vEP zWbWsp>iOJH4}5gi%djUFLIsfi&IZ;xDZH0#N-fKKHxqkFI(PyhVH3@fM)!`P}akzNy#y3}d0U zVG!niA*!Cw{VvTpe~~m4Lj59CJ)irVQ)$_Sutl962 z)%C@wdOr7yHSQOy>ldTy`P^U7xGz!Hmw@Uo@VURBalb@ezXVm!=l+7m{Ze)PQdB*k z`wJTP%hdJDQ1yK7v&`rGJP`?6I&*%QA*?qYbta4utsz1u-{-|cZUnvcRF!w7_^?dG+YWDk8 z>iSivdOr6@HSSld>sO=d`P{EHp66FcorJrm9j^jZzn0JaT8;ZkX-ow7m8g0?_iHuo z*Qo2)pz8VDuhqC;tFB**s^@cm$GqQHNx|d-Agufw|gH~{wA{WH=*kJ+)p#UpTF7a+;2wJ^SR$jZJPaltF$77{eCN|p3nU@EBpO6GWXk1^?dFZ8Sb}BvrM*mPdnarQ2io4 z_lu1Eeuvfi9jJOf_lq>{cS>U-xZjDY=X1YEbAP`}UB3%e&*%QE=JWenb$ueOEK z>Ou8~_}m}T?Dq}QmR6U>jLmKyo)b)o@^?dGEYtH$N>iR}d z{c1k%{Y~@dcui8U zYUMY9>TmM7ziB+zJLWCg(^Ji3sCqv4H#P2$OJgFqKaQ&BbAQvgzds>$k_GQ+?oXiV z`P|Pm&iN;;)}I8`&*XDIQ*+KgC5?%&@=u}a`P|RcxIe9~KaHyAb3aqF-=9&}pF!30 zx!-HJZSG{IX{a!xzdo}J`q%jfPx1j3z-0#)6Z&lZ~qU!nF?-hO*Uhmrn&Pu_A zfH3!GQT2T8ml^KcyhVFD?X-dFm+`q@rg48x8WUme&!Ou1+%MC(Kd-JokE-W$zsxx2 zUywQpcTX$-0;-jE1Glu6?Od;R6U>jxtizs?dtk=Q2ks!_jAq9^RG(5iX-bdOr6j zjQ8o@kUGi5&eO`jfvV?oe?qh0-;~BgSot?m^?dF(Xx!gY*WUuwZ{Tyk!Mxw!mVyZZ zVeW6E>iOJnFx+=}i}vKc3sukOeuKvS9cfI2xxa&|=W~Bwa!2cY^WKKD^(_mKg? zF6xS#USZ`&1~@+X;_2{j$4naknKNS0#L%#aLBoa)343#ZBk;YE?~e|1PKX%z>_^V< zhWC3ThYx#o*vmtPjCgIx;Gsibd2)cG--jdL`h48@G0#N0{};vQ zK1$<0N*W5GJ_=RO=RQi~eu}z&3aXyZ{U(k3Xmx!wsD2Zl`%Rkr`>E>ssi=BB_nXY` z)r^sXRi~X8R6U>jO&a&pq@fV5decz#eD3GD-7B^?plroKa0=(EX{tOERBgU_sOVwKKHXU_xH2Z^|Mg*eC}r%pWn}xI<+f*HmaV_{eI2; zeTpjGV?h9n&DRnO;sr|2vyJLzF4#07fLHaxaJpv>Wlf@7i;eCi`4Z+sCqv4 z#fJN0sgrDUp4=Cs>iOIkoA>+0-iAR~`HNBYeD2R{+?Pm0A=Hd#B=2lD+ppSKOK zYwgOazuyq>{CwMwZGHL#+8oclG&VGJ?09F$n9&iV!y`h&oFU%~4te|0U%d3eqzNM@ zIm0Fgmj>8}MNFRP4C(p%M^79V9`ae}S04ZPqd)Y4^Xo~j@A5jX+unRi`pR#x>n&gX z?T^03JMc&LPhI~pPdlEn_jTAl3VJ;FiJ)Z-iM5YA-f+Bb@9*e0YSfp`$)iRE4G4bu zg%HP!cH`T^!P;*NztHdDKl9xn+jsef`X_w@Q|;f&agA_5@QTU+`*7)7LGJHPw;Z>NUsyZ*800J3u7Asu&-8h= z4=|&@m~jM^(R1wvW(*KBj-oPpF2cBs^?_nW6DVVS&(#!|L2MjDWuR@WCpM0wGSD{G z6B{Q`8E6~#h>eq=jC*Js_lS*Cs0_4?d&I_RR0i6{J!0bwDg$jJn%HOtWkjQGL=zh= zs0_4?Xkw!km4UVqO>CS+WuR@;5F2fvj2g6!8e-!dDg$k!hS)fd%0SzwAvP|cGSD^( zh>eS&i~_Wc0%GG5Dg$k!fY`W<%0SyFAU3X`GSD`fiH&wpMl;$*GqG_Mm4UX=Ol)+Z zGSD`fiH&Qh4780DVxtq3k%G37LTp?|WuR@O5F0m88E6|R#Kuij2HHkFv2hEOQIED! zPi)*qWuR@;6B}Kq4781UV&e`f18t+6*ysjjl%s8w6B~C?8E6~j#Kt{T2HHkBv2h=j zfws{}Y&-yEbfRr^5*v|^V?Gz}MBC^jHlk1&Xd9ix#uQWr+C~Dg5e>>nK-)+lHm0I7 z&^8i?jTlr0+C~DgF%6Z0woyxL#DX$v(Kc#{jW|>W+D0w05s%71+o&ZrrlT^@HcE(% z1W-l^+C~YnF$0x>woyWCB%(6VHcE(%nWzl3jf=!a5-8&$+Qvm%}wsDczn1#wf z+qg(<%tmFPZDbQ0DWHsOw2f?HBNdf_wvkP2q@gm+BY$rB~Q5k3(+lh_Es0_4?LSmx?lu?Mb zQAlhoL1mzA6cQUtQ5k3(g~Y}(R0i5c8?mt*l+lK^(MD{PqB77n+K7!ZR0i5c8?mth zm4UXAPHdEeGSbmD(us|gs0_4?bYf!_Dg$jJo!D57%0Sy_BsMBQ8I5Qgjl@PJDg$k! zk=R&+%0Sy_BsSKfGSD_Eh>a>xMg`hN1+lRXm4UWVL2Rr?WuR?T5E~m%8E6}~h>eY) zj9X|Mw}_2uR0i6{En;I6Dg$lf7O}Azm4UXAL~PW6GLq0Xl8B8hs0_4?Bw}MLDg$jJ ziP+eN%0S!LM{H~dW$Z)S*hg&aKxLq9>?1aIqB77n_7NMqP#I_&%ZUxwi>*KT;_2{j zr8hZ;y*a>PyxDr7_RZE$4sbkrf%Y>!uX0|Fwy~Vp*p13S+gMI)>_KIqZ7e4?_M$S- zHrk1eeV~kXw2gLRqYjmUw$V;(>_=swZL|{`2T&Pk8+pV=Jt!j&Z6lA^Xh3D4ZR8Of z2T>Vl8+pXWAyfw1#z|tM5tMNfZQ~@daTt|>wsDf!ID*PR+c-&V97ScIZEPeqnm`#F z(Ka>`8^=%?Xd4@ejpL{cw2h6##tBpg+QtK7<0L5K0ouj`V&fDl18w60v2hxefwu90 z*f@jA0NY57BsQ8c8Loc|_LqUBMiLt>s0^@;)JS5Z6_o+Dks3*CoJD1zZB!E*ZJ>;5 zw2f+F;~XjjZKImlIFHIe+o&csE}$~dHu8y$i=d2rw2gdX;}R+ZZ6lx9xQxm`+sG$2 zuAnl|Hck;6?VyZPXd9=9jjN~(w2f25Mh7YbZQ~TNaSfG$wvkM1bb>OH(KeEajq9ik zw2fq9;|3}NZ6lf3xQWU@+o&TpZh+bAVA?xQl$Hm(vI4?r1L(KfCU8<9^yzA~A56>Z}x zu@QyJK-;)VY)nCApl!qv8_}SQIJAv8Vq+>Q18pOY*oZ-8pl!qv8`Dr3Xd63-jaX2| z4z!IO#6}z{18rjmu@R5TK-<_sY)nUGpluWp8wsF{BD9SnVq*p>18t*-*hoZWpluWp z8#7TEXdCBY-FJ_&^9WG zjciaxCE7+Mv5|wyK-;J!HgZuJXd9Kp##~ee+Qw~SBM+2uyT``BarUwHK)dZ-N8dp3 zzv}tE!|PhRvg+?Q1U#Q;`yX4MK7lsJb1#hz4IMk)88T*c#OUyd&@gAnH-kgoe)JbF zeK2Xl$Vtwy$-&D5?871^PjrU#{QaXRjtdX@Ec7dnfBexO`oQ`1BxiVpqto{0Q_{OA zf=}9l``aJAUt-{o?4P>+W1e6YvIwy}B z6*M6D)2wme`5dG?iwO#B(05x!(j}1P|r&}{O|aSaE65siWvOrh!I1F z4|{dk%R`2Ycx}kwp+jFWzX7EJd<|{!zvIusZ1~^t=V{fNiTmI2XJt`=&lfHJcl=qH zjU@2dyT$*GKP$6=w&8!rpM}}jhqmE=$DftiK-=)YN+lF>H&@A$Ja8)zH;cl=qHjXJaq|2zJy%m&(q{~do8W}_5s!~c#yE3<*N z;eW@Uh1u}GlFd^s}%vrx9%M$Z!SIj805E4wHyau)mn~| zpDs9p%0Rz$?>KpL=}}Y$_^Q^jp1irV36!xO{8rzV^~A<8R0i6{dSc@^Dg$j}J+W~D zm4UW#kJvZ~%D9KNagW$Ig~~wNxJPW9MrELF+#@#5pfb=lqKS=WP)0P`Ml`X}g33VK zh$c2#Q5k3((Zt4CR0i5c4YAP%%BVrxs3A7ap)$}mYKV>Vs0_4?8e-!DDg$k!fY`VQ z$|ykFC?Ga2p)$}m3W$x%s0_4?0%GF|Dg$k!nb>FtWi+F0G!q+FQ5k3(&BR6rDg$k! znb^37%0SylAvQWe87XKRDa6KgR0i5c3bAnmm4UXALTubbWuR@;6C1Zc8TDuz^~A<) zR0i5cJ+aY+%0SzwCpPY&GSD{4iH&YhMmgF>Ik9mUm4UWVPHfymWuR@86C3wY8E6}w z#Kr?qMkm@vC$SOvIOcQlPPC0qVj~Kbfws{}Y)nCAplu`&8_}SQ1hkC=Vq+>Q18pOL z*oZ-8plu`&8`Dr3XdAV}Ml2|!7Hy-J*oZ@Apl#F=8}X&7WZKH(PNC0J& zply^88#7QDXd5NOMj|Q$ZKH(Pn2E|j+qg(Ki z5>y7-Mj^4W6qSLtQAlhoLuH_Cv=JN2K^bjm8*Ri!DJlbPqm9@oLuH_Cv=JLCP#I_& z>BL4kC?g$hBc0e-iON9RNGCQ{p)$}m(us}Ls0_4?Mq;A^l+lQ`(MW7mqB77n8i|cH zs0_4?Mq*_%mv zZ7e4?_MkG*HkK0`dr=u^8|}o#K2SzG+D1FEQHRPv+h`{?_MtNjXYwb z9+Z)Xwvk6{G@vrjHu8v#gQyI&jXYxG5Gn(0<0P@s2+BB#wsDf!IE>0b+c-&V96@EE zZJZ=Fj-oQqHZ~F)O`wd8Xd4@ejbo?`w2h6##&J{z+Qvp=;{+-LZQ}v4aT1jA0Bz#| zv2hBOfwu90*f@>KK-+jgY@9)5fNi8k5*y8!4A;Nq$!GdJ+sCoRX8*+gvHfYsQ)4EL z|I8UNXkuts#GqkAhd5re2fjD*{n2602@%21e&h^~cxT*rw2f+FqYac%jkZxuY@9=7plwtW8|P6O zXdBhU#syRc+D1OHaS@b}kG7FdY+OQRpl#$68<$ZTXdC&&#uZcs+QunjqaBoS3T@*Q zv2hiZfwpmq*yuoIplzHYHm;#E&^D5ZjZRQTGTKHmv2h)hfwqxMY}`O)plu`*8#hrI zXd89J#w}1r9oj}6v2h!ffwoacY;>VA&^GFbjXS6ew2e|?qZ^b_indWoY}`dd7aMjYBk9I-JKm4UVqM{LBPGSD{Sh>dBe4780M#6~PAV+Y#C4q_t?m4UXggV=~i zWuR^BAU39>GSD`Ph>ZkLMiJUZ5wS4?m4UWVL~JCYGSD`Ph>e-34781N#6}V*;~d(? zIbtIjm4UW#j@X!m%0SyVM{LYSWuR?j5F06=j107m3}Pb{m4UXAL2RUba@ z4782I#6~(O<1pIBVPYc#m4UW#nApffWuR>wCN{EA8E6}o#6~tKqY`bSlGw;WWuR?T z5*xXw4780(Vq-2U18w6rv5^PLxZPu8;5hqOd!XI+uA^@t_+RyW-{EzwU0L<_8v>rs zwq5Jfr%#~G@!U&eLqo@ocZQ4^9WgpQA~ehy^3C9ow;%n*OCL;{FmjSJY;v&cyY#~% zCQo#R^!)v!Cyomb`7HD+kAM8pANs)g^(5DK>m6~nH|1~O2Tuy_Z+xqM;E(K|y8c^S z-_y7Eb=W=%dOY}vpk)n-uGd(+;dtHN-_dW>s4tz9M~w;^5d88BA=bXU|3bfq|IBxT zY(ME6nE%9Ye&DJO2)>zPvk$jk6FxC?Lb%gW>H58L z(-_aVe;T8u0(@a>@xOS_!fg0oycaR})e$3x4j=aFu$PAn8S&bX!9$0>V)_}P7XOR) ztSl<-Tl_EHvoafK8~zvXS(uG|XdC_)?^&4*vvw^nZfAOA$*~mlN@V|J^%50!*_+PwdVKz>pZTMfj zXJt0fHvBK%voISQ(Kh@q-m@|rXdC_)?^&3Q2WT7q7w=h_4X_RWi}%#8_cMKN|M23y=^kFZ_cQJn@4Y|9_3FKI{Wd)84hUZTe|huXFYG@LwEchd%EV>1 zFJG`dEI8Bf*#F_>feT!JI=|sD*Xs_uZT6`)`^R?IYY)dv8vmIyV$j6Uu!up!h7Pgz zhC|b<#Ujt!g9q4cA3J*Aq8QxIZu`C?AgE7p&xX)JFdo~V^`piJ@ z^~=@u%R%*-JuiA?sn@tKRo9oI>iOJf_BUPV%A{a&0T5Px8LFPoedd5iSG^V9qK&OU zs9%Aq=W~BV@jt5EfP?vJ?l``@1B zNUEW|_u#iWAXr%Wt5NlQ?$-pGuK5){&V2=_ehr`dH5&Jo(ohI z^4E}+zXny$=YEZ`->;Qs+4DFEbH5f<&*#1?@X=|f%4&TTsJ@HOeV6@F{W`1l>rnN4 z?z@cp`}J1q*Q4tB+;JPYE&#&H--)W{bHC8abG=>Uw6hCU&*#3??cT?ynzf|97F6HL=f2grzu)a` z!5dF^g_XY>RnOwIvKKE(HIe)*mXiqDDKdPS3eVXC^fYtf~sCqv42aV@? z^;YZaLG=gu+#l4O^Bbfw5w3a-sCqv42hHdFgHkZr`h@y}sCqv42Q~Nihoqqp>JOpn z`P{EE+&4;{WXe5V^BY0+tN7fn(zrh?jfpV#hf(!>?pJBf`A5|CM^N>A?pGQ6{ZXk? zyYi2s>iOK?&^*^`lEy?>`Awku8+`6>829(bq)zSJA4Apixxb-te_R?9VeXHk>iOK? z7-0Gg_k_| z>8y7eRnO->(d_<=w_y-g{uxv~pZh(=IltLkw5Pdm2G#H3bH7L9zC{`nVeVT{^?dI4 zX!iS7b$u(Up3nUreOEK&Z6r1+%MJa_ifUc2rIu0RKJwZ{ZftlbL#qYsCqv4 zOU=*q&P&0BfH3#xQT2T8mm2Obc#HP5<6S`2^SQsQaeq-76JhQzg6c2xxxcJ&e@R_` z302SM{<3DjzpSpmjH>5zf7x(k4ISfQK))8_tD0F zKSk=)uKX#edOr6x?)~1!l^;!3el)1QhR=PC=A1uOS`os^pNgvIb6=xzAEU01LDloQ zuhH!H)716TQ1yK73yl3fR_fGV^W&)_bna_Q*aeqIj6!U&R+uJY*D}Oesp3i-~=6QaKG!#O83aGxG&wah-oS&+$Pes-9xv$rp z^V8JzX{dTW_w}0l`#I|RIjDL*_vIS*>FWA)P<=U{`*JJK^E1dXdOr8%=KVg)+b{?_UKXmJ&wZzHf1m9w+SA-;gX%l^+;;B%j#Ip^oA>+@0deC`u8 z=lpr<`gy2&KKBXc{eHd_OpZN*`}wGPKKHd6_XW~W2=xV^`dU8swZ?wGKLd%^)7%%M>iOK4nB6b-HVnetFGkh#xxc8{?@Oei5b8@n z^%wcvUo`gnB~qt$l zF9+3U^SRG9{~T_q6ihAvLVYQ!p3i-@;l9jUw5R>P3{}tPKHKo&wZ2TdH!m3{c2P_pZhAq zeTCFXHabt&{0dNg6`%Vm}aCW8AlsCqv4RmOh5R_fHQ{I#fh zKKI?4bAFXHCc@lTf$F>Y+;=X2k!alc*~6JhSxqw4wGcbo6;H%P%` z5eW4gQ1yK7V>RwKN<$&kZv@rH^0|-IJkPII*H@$J`P|29+;39XZ$j1cxsTPH^Ea#O zH>2wL+;7*ouTj_6fa9bdR?cRnO;syK&Cn zX0?7Bs-Dk%q1(NWKaIGZd_#RZsJ@WTeW7N*-yy9C;k2^@RnOVF!y^=^?dHr4fp%JMSHsH?E}@P^SMvgxUZAOM40jbj^Oh zUtPZ+RnO->-PrFBNS)fN-T_oSpZi9`eZAHCdQg2MpZi9Q`vz%D1osW7dOr7!#(sZL z>eR0MgQ$8w_l@TL{*bp}5a#|6s-Dk%h2g%@TePSBz7bSk!RNlh?cT?y8;8l$jl-yV zKKB)d`y*E8{s^j`&wYjFdHzvpOa%8wQT2T8ZyD~Jq)zQMzX?=-i_iTn&3=DO8WUme zkD==M+~0Da^L@PP9Vb`4u`6tQTp9Iw>@wrbje~x!b znr?FW6WpId)$_SeGWPq^-l9F7^-iPe`P?VD-TQdeJ406f8B{%=`+es3YBqaY1j5R1 z2G#H5bH7is-?vCZA=I~^>iOL7v$Ef}l9k_zs^@dRPqW{jRnPrdR6U>j<(lv9w5jXc zK=sS{+%Grp_vfTw)qZ~tRnO;sxv}4$_ZIDGzdw(v=X1YYjcH?urE8e0#UG=V@>iOK~ zY23F_|Y3{G0>iOK~8T);Q)%p%pJ)iqL!~Hd@_194K zeC|&g&+|L2)^~#HPqMjhnPs@YZnge8rr!0R^yD*rp6&B&U29iX{r!djN4L#B)n@s=Y}*U!ZrT}s=n7f zN4??xrqrpu=HEos_qu1O-*33TWwrhmsD6L1dyaZ5pWojmPquEO>iOL7*SPPJR)pZb z3sukOe!u4a{*Jo-4yvBdeVO6DTk6za^}0dzWqj_-jQ#$u)%v@rdOr7M8u$04F%jI~ zL)G)SFVnccudct3s^@dxp}D_*pss%as_)=)-(k3qe2o6CckNX#@-fFJUpyWDt#tMd zdvk!pID8M(9=@L(;COWSex~Q}-NEO+L*qV58WX{N6sn%jeTQ+*pCWZ?=Y9&Rp3i-} z#(lIjCc@lDgX-h?+{c?g$D1kzlSLrZPes-9xsNxV>&1AB_ViRU2361JKHmKO{Au2X zL74k#sCqv4JI&wQiS@BQ7F55J&;3qgzmM}4?P=x5q3ZeE@6_z~@zR(GD?c7p&*y%p z=A1uWT|XUF&*#3_>^?yXR_%BRp!#Ay_r+G8>&%oCZGFE^Zk9Q6ihAv!rZ5#>iOJfYR>s- z(ohKXX{dTW_n8{^bJX>7Q1yK7k7(SdtLxK2^+)*JA2Ig(45^cF_q5+qes$awBevRh)`8n$P98^7@ z`!&WnKUeD1UiETO^?dHv81Cmey*42Z5Raid7%0(KKET3_xaLL2=)1> zdOr7EhWmL^r`G*ER6U>jF2ntNtM&6y^?dGQH2ZylG$z8zF96lY@VSrCxL=^IUx2FT zb01^ndHzE3Jbxjop3i-Z`~Kd?Pcs&g`bDUEKKENS?hDl`zYtWvmCyZFjr$^XeG#gj z&;3^O{e7_%Og4VunqQ2n=X1Z+*zXs6i}rMXzZg}|=YFB_{rnQE^(CPCg?#Q8YTPf8 z#za{8OHlQE?iZTfFO`B-?w6wK`P?rw+%NMM?aBQzR6U>jR?U9DTpAN$7{pxOjG|lJttJU?ZQT2T84;ttE3aL}O z@+&~~2l?C|H1GG7-iAR~`IV@8KKBPT?$<~|A=Ixy)$_SOsBynmUB4Dp&*y%X;l4`h z)Vi+%)vw}nzslI}*IBJ!hpOjuzsmR=Z@ty}^{9G2_p3DSH%Mb5T=h1f>iOK?(46x( zs_Qp`>TmG5zhU0*tEFIa`4i^88dcBd{)T41-y{u%P`?RP&*%Pz#{Fh>{bp1>pZi3O z`x$CT4yxb7=YEgI{SI~g4pcp#`#t98`8%axasd!l{!UaqpZh&-_db4aXBYY2 z&Ms6vpZle5_deFwlKNUu{Zc;nOU>u}-9E1T-Kcs#_e-sOUvCeY`#q?7KKDyC`~6;N zAqZE!y{LLV_m?&M{XTX5K2ZH-KKGY3`+c3dz7AE-=l-%4_xp+a{iu3A_m?&952)w< z0IHtPeXjZEa_XgE)mg6|RG-V|K3C(uK^h9-n%{t`=X0NH+}|IRI?0rKy1ze&s^@c` zt8srw8WUme525P$+#lEM_l@fMMo|56$^AfK#KHg0=WW9uy|w@OCAJ^i`t%93Ii7oI zY-s4%@y?Jjqa#L#M}&qsL%tat^7f;@ctf)K?4P>+V_d&I!`|0n`zYw~ z;3tBXH6+$P?s&uTy1l=n->6YvIwy}B6*M6D8RW-0+yaq2V#t;B?vS(U4y;61m%bL-?*EAZUy) z?_K`f{!_c_a_Rc}1IIvKyX^W8>iP95)kz0>e!U8?-KS$=Pi~MiSZH^G+Iu$#j&{}N z(3|^Hx*CM`pMu(Zw+4pxCGI2P!!Ar?Mr$$299>sfv|VpKc^!iw0{n2 z=kvZq^)YX6`#5F$I8ZyE_a*L^Ek9iIU(ibJn*RdS&gcD-;r&aC?O$TrFY$T5WbE(b zEw+ybwexwu1#z1!MEFM}Y=`$SMXpZ6Ti{{A%`2%-IJP&=RZ9F6xdWqTN?ozMF*!+SWb)Oruc zv>)U1eoS+okDwzWtoaB~JD>Ms8t;>o?UO+5eBO^~ynmx?{|3~~=Y5^((QfaC_cLYt z&oJ%l_`I((&htN~mD+3F&q3{c-q*Rk>%O|vy9g$W&+8_G+WEY%6Mk7zZ{aVzECAs; z{|it%pZB{O@4uu2A+-Mz(|(uF`(4fc{wrnsuR!g5-tTJm_g^d9e+_Eq^L|&ezkjQ2 z{}$BF=Y5LC`)`!(zrnOmk-c}@>{BtH$4xPQ05j4?Gd*wc!snWiHc-7R9$4?w4j=gN z1IWHXQ8w|{Lq(zLW%Iy#w|U+9rni0ysh@(Xm(}CyHyb~A9c^{xM}z7&^SR%w+UI+x zd#W@hg8QkcdOr7?%|DP4BL$PopHLrzs^@dR*?7UyG;h(aWB>PEpJntBuX@u^^?dH< zyLWpZuX?fMsuv5YpU>xhzQ%o=v?7F+ABU>vb3b2o!tcH6#jES%QT2T8=c~^6z4g=8 z_0v)HeD2SvPWip{3F`U;Q2iM`_h*cA{tT&;Y;>Nk`7==UeD2R!Ip-&mYknfCp3nUm zW51tib?#@P>iOKy*0@iS#zffhl0fyd`P|QT@Ap2Qc9O};Pe#@Axu31s?`Ns!eio{p z&;4x8em`4XKO0rg=l+1keTuq11yp~4&;0=_?o)~TR8&2m`vb-gVx>v5tUc|dq3ZeE zAJDj;BaMl$3n%)N`K)s-Mp1e!Av)e!jXsA63uie!9l}Jazp%R6U>j z>B4vPdLLrXmx9T~U%2YcN7eJW-=%S1APt33UjVA##piyPm2>_Aa?M|Ws^@dR%kAFB zr~`j3s&5h5cegZ`U`ySFId^{ zmyr4;sCqv47mVlmORcW_rKoy7_ZQ6P{AJ#TLAdHIL)G)S&$8lvIa&G3LG@XD?z1%a z_odQ85azxVRnO->%Wz*Nb!vCKGE_aE`z+1pcq^na5$1jcs-DmNQRDmh<01*@+4RiOGieD3cUpWmjJLdcQ z4N|abp5Lj5LG zJ)iq&X7`(=V3qsLsCqv4+cf9=8fhqmxvv4$Z{u^n&Dig^NS$QLJze#-pz8VDZ_~Kn zDvgOS_ghi*eD1dy_xIbRPVL-pL)G)SU!-xrT^bW%?ze;L7xB4Yq`AM}p|0P7s^@dR zNaKE|x_&3Bp3nUv^M1ce3MLl-Vdd{a)$_SOt8rf|4TVr&3#vcM=l-n5{cd&rZd5&= z`?DJNd(`!NQ1yK7&l>yvUa6CC_jK0Vi>l{yKS$$!pEM?d`+cDLIehNtxZV5sZN)nB z?S(p2J)iqI#^?9@rCHXl{QanUKKFAB_Xn)jA3)Xfxj*D~@8im^Co8`mRDX!i{UP&n zy#{ZKK)C8Ppz8VDAJXji2c@A9>JOso`P?5e+#ixUwO74EsCqv4t2O(5qckSM+&6;i zSM#}FtvTl(R@Wa!)$_Sut#N-uU4H~s&*y%%=JWfb>iVOodOr6zHSU|#^-ZAqn|$tX z8qf2ONuAnj{xMWNpZl9`_dcHWj+3+Aaa28@`jna2J7X=#mXuX?9Z^?dGU8t42oR_o88>iOL7wc@^+ zto&wB{a!xzdo}0$7HLHY?psjxeD3!e?pvi!t@~C~J)ir%hWoQt>(8R<`P?rve^0l~ z+b{?#zYSEsjL-ct^K-p(KGvT@)$_Surn$dAFAaq-_vcadeD0TN&iNPA^%qd}eD1Gk z?(Z+E>o0=pukg9QqS@~+sp~JH>iOJX(YU{?uD^_`=W~C>*zd1Mo!ZmR6;wT+`?<#V z^V_Y~w}a~E^0}X@aeq}B6Jf`@imK;xKUd?vLtWp2s^@b**KmJL>eRZwhN|atf5LFz zX|=u+RDXic{R!iF{&lPM*HQI+?oXJ1{{DuyVGyo*H&FF_?oVjW`8TDZ5bAHD>iOJn z(46ycsq1fn>NoJY-(a}EEp=+I`L|K^eC{_``5do{Jk#w$)$_UEpmBdkS`mW#JE(d- z_xCmKyVdpGp!)lK?(dtQ>)n-t$pt`|`@5)mKKJ*{`~5v1>+hlJ`P|<(&iVJfMSJ>w z{(V$EpZh4|od3XT{R2>a6rcMjv-`+^U>9}8mste&kpYfRzIZzP+cA^If98xBG%++R zV$iUmL&Dx1;0Szgg9rp5&AtPQJ zGI;3FSDqZ;==b5sw>}>?e#|qG?*B#cxsTG^-$zM9A=F2q>iOJ9Y4-am>iQ|DdOr7? zH2ZzDx;`3IzlqQNCbRpgQm|^jpNgvIbHB;F-^ch^AA_prbHB;h@27c-_O#zmL)G)S zpQmvjD~*Y8&5s4u&*O7HPxCxKPF){|s^@b*&-mU>ywpi9cAi##JgT10{XETnKV2FV zVdYOp)$_SOt#O~Au1^5fpXPIaTH}6(x_$<#p3nVh%{f0&U7v`m=W~DB?Ot(1XI$%g z-#IxmK>UvOOjJFe`&q{QeUdcG+G~CisD2io`&s65ezLb=5U%;jsCqv4vo!8!Nkbvj z&qCGnxu2!k?`Ny)XQS%*-0wHsr%0Vz_bH(I{e15Co86~+8wO$Jr=sfl-0#=8Pm_j1 zs82)H^SR$|+~3cUI<+f*4yvBdeVOK*pDvAwF!$-8`Z7NEWtww-hPpljRnOiOJvX!iSDX()u1pNp#JbKha?_j9FAGUdjdzED3GRnO->UbEllNn;|^=Yi_u`P|1F z-`C5RI<<43kE-W$AFpvgPZ|?p?&qQE`P|2A_WSwj`uV7OKKDB{?hDlQ1)%z!eC~G| z?iWa%TK5Z3^?dGkYTPfB#zb(x5LM6Tey7I$B6a;DR6U>jVvYMkb$ubIzL?K_vH6@| zBn6WTfUxq5Q1yK7i#6_xrJ)e&i&6D_?u(6c{$i<!2(`}4;4c9u$=+LgZ)RnO=Cyk@^&CXI=(@|U6N`P^q}+%H$x zF9+3U^108{xGz=Lm!j(V+-GX``!aQX8LFPoeWtPBuaG*mSG^UedOr6@H15l#F%hnM z<)Hc_eD05!&-p8*V6q5=`jx19KKDnA{eG3VXiq2IRj7JC_eb3Oy^o({ttOvjtwz=J zxnHAkUm>jsVdYnV>eukOUt@M(DFv(CSEB0q+^^B>_iLn~5axajs-DmN8qGO>t-5|K zs-Dk%m&SdSy1oij-^J&?%YDxG@tVJmT=UnV>iOJvY22?@&;5E-J)iq7EBpNhGWQ!$ z^?dGQjPL1glxCT1be_(78$tCkeC}h+`+c>yVGwq_YE(U+`xx{0^)~rfzX?^(=RU@` zzu)XF+SAJ4jH>5zzt#L6?HX^xAgufvQ2kau_ggj3^|nYuA=Ga{)$_UEYTVy%l{&T8 z{H>^ZKKENS=lpHbmiOI+)VSZJuHS{K=X2j`obzj?PVLIC1=Y9mxo_3%_q(Mr5mx?gR6U>j zR*m~T>iRvXdOr898uxqE^?OnEeD2c>_xq$yt^0kT`ZPZGY3BR;I&Z@uxUWOi^SMvc zxZf`gg;2jARnO->Z9uSb@80`%jRR6JAt2NrK-KfPKWMnG_ZDr8kWgO_sz1o*{-BlT z`3>ZmZUd^G&;3E;{{EoVxj%@i=W~D1%Jcj~WbO~4>iOKSa^K(k_(@hHsc!_;ui|sR zN^{OXEO`*FdWTWiOK?&^*^`lEy?> z`Awku8+`6>XxtxD*B?XG^SQsFaerK0e;ifM=l+J~{{Doz{sgL?&wZkm{r)7`?@xm2 z6ZzaHS~=&RBK4^KJ)iqTW4}Kmb!zwfGpKq#_j}Br-#2?324TN% z2G#H3bHB&@T(8B)`W93@pZh(={e7#qXiq2IR#ZKo`#qZd{;V`6!pc93s^@dRRCCU6 zQ`fhF>X-7lUut}Qe@^PuUh~hP>iOI+HGg04ytiQxR{nWZJ)irf8uu5Zp%CgXpz8VD zU)H$4sII>Vs=v(V{<7izlGLene+gC3=l-(U{bg^%Aguh$sCqv4m(8E!UGcI03aXyZ zeXhoRyEGKS+_!`3bNSro8t$)3om%%-QT2T8bKUp%K7N+dL7r}Opz8VD=Nk9-*R0O{ zHB>#H`{Tyv_nlVjJ3;lw`P?7ZJkP%_jfrs8yN;^obAQ~t-`|je$>mR|zk#afbAMcO z&c7)Qg;0MJRnO;sy=K3^rLMmPs$b9Ne!a2Z-o%&M&;5F{`z~+8Aguf@ zR6U>j^&0ngq@fV%@1W}W+~3o_^VWC3ThYx#o*vmtPjCgIx;Gsibd2)cG--jdL`h48@G0#N0{};{YKHB&kFUo3t6sn%j zeY9r3pCXNku=1y%>iOK)XxvAu>!U&SHGJ-CH2eKjb^TOSJ)ip;&G&X<)b%l_dOr6x z8u!!G_0v%GeC`X3=X$YHCpqGI+VNsR^#y$H3pDQIq%je$`EjUvKKBJ0_wnlbcvL-~ z`vQ&o>FWCFsCqv4&4&8~sZ;Ad0aV}2=f2rE=g+WOKLb_I=f2rE=Ov6 z_wiHAnSFzWYyM1BJ)ip&_c`Ck`Xo}H1gcMw+y{Sd|Eb+|;CR;&5cnD9rh0eJzjB~3 zx8Sk zx80^}w}IN_T^rc$)2`h+5CP&A0(vSPDeu|1_G;m0YkCX4o!f*p58AP-r5zjSy;|e_ zG3Aao?YCiAzZtt$+Z%{uk z$*xiPzUy!4BVOzJVdg!b&wIYxyYBv^cSQt>?SY_nKJWP&@9zKmw?iYW`Tn4GKJWQv z@7`8|&~68{^LamIc=zd$BeV~|w4dVheoFYs*f(b5JLO! zfZF-IpYmy`_pXTF6|eKZ3u@={o~-(;y*KwM<-9+IX;0?!o^19WL~~W=x*$+HpZ8?< z_v#*=>z?+K>{&*A>zejUb zYyS5z?R9+K>&)JN-?#SP2etEguQR;=ftO@Y-v0p9&gZ>OQ5Y*1+z0{}A z*m^H~eMB&Hg@6***}|&gXrH@qGTTXeGJCc{`&)}^{%=9;eBO&R`}=crM1=kQIZS&I zpZ6lo{{FnO{drJ3pZ6l+=iYm-Xy2i^_`ILfcz;pZ{vxQI&-*zm`}-hqe;)*D=ktC}bDj@T&U*-`ozHuQ#`{ak z_Lng28GPO|G~QoUw!aK&=kuOn-ron)T-E+Q7}U<^J;ON95Al+0+|UVo+z?PZpZCKW z@2}7i5!zqDv>)d4e%S5(w_h41E8Ljw-qY@@;vV-ZsGZOIVa@Zp*Oc@A8mOJm`(e%g zK2+I06x7b=y;9?Sn6iBsroEESd!=T7AFga44r=G~Ua9duLfJk7)XwLG@w$Fxu7+ux^Z_V<6H10l@&KY`l$ zyiYZ}zdv^Vg1Z_wPof23^x2-ME!y+PysV`cltpmsj*4d&lq$V_He3+tan~$DnpT?=y^_haF|HeH5mB2A}sC#`kUh z*JAtsg4+4K&oK7)(H7fBgWCDL&(J)t`w1Np;ac|-P&=RZ-G=wiXr=aAhyC>}9N(|o zZFnDJv3(3?-gonP-)(qzT5NZM+WEZiw(@09sGZOICC&Z&mvlsg zHUA~1{Su$|OXmCc@idn#0HJ+6sGZOIC1Zd8%1g4Rd)Ti)?R?%Z8Qv#YY@YyX=kuPU z@g7P?L|F5onD!h#?>T1g6KSr>`$SMXpZ6TM_nw!_yHr1XxzpFawSNt2=kuPUxqlC% z10k&WFi<<6_hTCG;mY=KO#3lD@5eOf`3Plu1gM?Q`!TckNikwH@A>Z3vRoB7;tcDvX8+)VEPO|=CJ^;1#xeC{`E+{Z{OLa2{H z)$_UEtlH~)r+b>Zej2Kt&;5MW>AtriOKyH}Ch;rC`;5KOI%i=l+bweS$O;!ZkkuRDXug{TchC)6NX3lT5j%)6NW3J)iqC z8uy9Pmbzbf40=Az2?tG)$_SOpmCofjfrs0PXW~*;B$XK<33egpNgvIbALeN zK22SphN|ate?YU}&r#RULDloQU!ifIuC7l9)vw@lze3|aLtUSNs^@dRLgPMDU7v}n z=X1Y8<33AWpM|RDbAQe4-p41}+2o0KHmLp@pZjaZ{e6x!%i6PE4yvBd{k8s&PP(~P z>vK`{eD1Gl+|QN9L^$crMb-1UpRU>O^VIcup!(^2?x!2>^QBI$`+QVApZn>;H*$MF zzM1E37=)ES4^_|Se!APek2~Icvg6H1)$_UEWqf{L;BCr1J<%=z)$ihSzf1U@Uhm3Z z;B6R$mA?R0&*y%Z`<(CN%3nxU{z6nepZi_LbG=30rrguYUxcdXbH7-#-xo?_BCPyE zQ2k;)_lq_6_eJXZB2+z}`^Dz{zE}z-#~xwsi&6D_?iXv$`HQ8Y5b77B>iOJX5Z`!rU)G)$_T(VBFs?l{(3kd)o1qqU!nFUod}fXPLKQ z5axavs-Dk%mS(?SE)9iHzZ_Jb#pgatdbDyPoo?oV}FGJPyxzEz<_bb%( zD^T@(?vGk|o?lMx@ybE4pZm3L_dY(^sw8t?iK^#wzgDx~uTjta8dN=>`?Z?= zeyzHGEvlZ+{T+?_Ds_DosQwP0`#YNbex1609jczs{T+?__3HZdsCqv4cZ}!x8>CL{ zX=ekfp3nU>jr)z#mvA4q3ZeEPt)x8o7MH3 zQT2T8w`tthsOxJ$_1pN|Z_~KnqORY9s^@dRP2+y6x_&FFp3nU@jr(ot`faFsKKF|> z?zgM!w}a{z@ws25alb=dzXMgz=YEmfy^l{fc9N$XJ5lv~?iZOq$J-@MH#zVNC*57B zdOr7OjqmH#dW-h-8E!48{w$ySvl{oir7;oaemAO~&;41=e!oXuzXw&%=l-m5f4^7i zB-}l%{Jp4pKKFCX-`m;eZ5V`=zYkPDhtK^S^Y?V?e5|iS)$_TZV|jLuU8&-WGx2z8+M6h|m2Yj)n@mN(sYw!k1+R*p!(H(?pJHh`G=*U5b6)3>iOKS zHun1?QYV>mPglJosCqv4tBrI1QLFVwQT2T8Z)%?FHA!P4to$ZW{Y^giH#P2$sq2rS z>iOK?G~6GTI<@YPqw4wG-_$(MKOv2Yu<}o!>iOKyG@k37lsdI@e-czblh6H3^ZorP zZ^IzW{V7yEpZl4H`_tZ{J?;0WQT2T8XKLJ^k;X)r`!lF|KKFYy`+c*zz8O@%m(TrP zv-=h)m|XsZxo<($^SR%vxxa6fhC--sMb-1U-)rUm{w%q_KZ~m8bHB{k@7ttVCJWxv zHNOp1zl_iQGR-;voHQoF%0Gvy=X1Zz{PXwcrC_oMg!=QSdOr8dH2eJpX()vH3#fWN z_gCEey^r7BxJa&g7eVz`_}pJ{yZ5pF5~;t0s^@cm#W?3*mex4o?&+F;8CB2c{))!^ z6=_Ta_g7H$eD3FJ&iU=?`gTzLTt4@6jnDC}N}bx3e-%~F=YFnvzwhui48qFqK-KfP zpKCnNzveC4(^c;ps-DmN361+sX-tHtOtQT2T8qcrFI2hx}bSG@adrG3>op-kikQTzVhS%N52n8zV-RI@nfEebpJ1k&wZ5fTrbLM zeH5ym&wZ5Peu~xlDX4lr_nS2CqopwsR(>?7eiNVjO`3E5RCWDSR6U>jO`3CljJiGs zRnO;sljeE;Gjc^dbz>iSqv{X9PR^Q?T17Z(sLT=n8m^?dH5tE)t}~bf7*)s8D#Efpz8VDpVqifloo<; z)k{Rx^SM8*+3#nn>t~|s`P|Ra?Dt9P`Xo^OEI#+MH0S(eb$v3bp3nU(jr&>Z`dO%Y zKKHXU?q{p(XQS%*-0#=8Pf^#Wfa>@2x!~I_ssO>iOL7 z*SMb}4TW&pnS-k5b6=)$pRTS?2i2GHxi8bW&rsKApz8VDmudFiOKq8$XYi=PlaPH&^mN_3?b}=NEVz24U_CK=nKM-0!sFegSd609DWDey8!horT_} z+|$Zmh^psvzteEP$ZGu}R6U>jV$C_fP#P1#eIcm6n9qH&=A2)ot}jB>^SLiJ+!sro zD1ch=}O_SMu}Dec#V>KiBU$_p)Sh+r%5FZ_b$#yZ zetCXe_v3#4BkPmjF_Ay{^~3mQpYgu=@A3Z@`ImVu;YaZ!6XJe5`lT^H7`^FaM*T|( z|D5no@jpy>`O{DTl=#i3pN@WI%)4)oP55!V`dWC5__grcFaPcT&tHv>`$t}=|NF>M z_eZwA9XDds3uBHozZm~v_{dG2Jn5^ci3tlM|Ma}(7bC$Lh>M>Q7yrBXNRX^phKol+ zf(rZSp^vApN)NvGN1C5`YjM0k67oTJdg;Np&kBV`-v^?9_TbB)_x?e)hZe9a_YYC@Jnth9z7e_@ zw?_RV_0L-SQS?0TBM-jO8@@+DDfinJKSI&-ypKHiM(GE_(hq>>pFQ{zyq6w)A6qfe zR=vk4dY<=@2j6J!2Q5xxvGc3^K@>gDd+EXVi4_wq_fJstJny9k-=}W+ry%-g555HN zr3c^S7tM~S2dLC{JF zjeaJIp69*KgKt_;7&Q7c5dE_UUxN3M2jA%WHY*6)FZZ)h^gQpsfAF0hmVP#hp67k! z!8fw1#dy0h$5`{{py+wtM;?5mm7g9q_vs+|XAiyv@1+OdxmHZHRc|hep67k!!B?$l z(IUvOfL;AQ14YmC{<{a?-z7xf&pe@+XN5w`{X7&s&-=)OuaR535Hj8LnIQUS555HN zZ+P$X=Uc!=bhO;hN73`Vzv1anHg+b6t2rc&uQS?0Ta|Q3Ste9x@ zSs?n{=!YHz?{kF@ycW6X7oq5R-scMM_t|dxY!p4u``j@5{KdvTe=&-l=Y6Z-eU3Z# zIUxF0p7*VS_eY7l)Y&-+yGz22G_>DQp>dETdn@xIXTz7R#v^FB4q`F^cIzZON$ z^S)NS;}r!Jyx(r82t;4Y^S)Nt=dZJ3qOE%CQ1m?SYt?tU>n%=WsrSqMdK5j+`&yyj zZ?Iyb<$eQ-p67j`u+J}c(-(v23whobdi(uG3)oOV%l$?aJ;Xc2_3WY{rf}-bn zUnqFL$xXirMbGoTeaQP5zgj9ab~~jY`gWf8?dp8L*~+q^yI;rKjH2gx->&-omaz0& zQ1m?S+Xe5pS~1a9y{#yEp7+^mpI>Hiij`jmqR-}epY84U+k(QN<$fEAp67kG_Z_c1 zM*4CTJQtK9TeAo_Bi_vJ(PdNH1Ib{VIfT_}2<_vOO* zzS^DpY7{-s`|=_0W6b?-Blo*e^gQo-1@HH`bH4{f-^=s9SGdpL>!#m}qUU+vD|o-p zO}`IC&-1=leYdmU;uKG~`%&~f?=#f-z9uYv4TwI2=Y2+)e*dkp=6{Q#=XswI=Ka3b zpsz*I^SsXx_W1`aAGCe`0TeyY`v#%k*SYEIK=chf?;C`Ef6z^T5Jk`PzCq~shurjs zQ1m?S8wBqUyXg<3=y~3kc<=S!X_a%b&N32k2^hZ$iJnu^c?;G6o z4Jdk^_a%b&N8R*CQS?0TyM%t<=%#N3(RcB@?^5^q$1F}`vGcpnKZc^`dEe!I$2%Sr z25r?lj-uyz-z9i|!U}~(e*#6%^FDvb`xtLEn~XDF6No;a=Y78S{r;qtZesz^a(@y< z&+|TC==aT5C^Y(J6g|)Te8KxuZu(OwdY<>^gnr-Rrf&h!pW}IdPWAiK7N?=RU%x+% zqUU*kF3i1NtFh*{qUd?vpHuhwXTs+G42quTeYN0yn-vpn&2IzISM$8D7S8u)-SlTs z^gQpYz5D!g7O=7WX}Ld#qUU*Et-L=U1nsxxpGVR2ydU)5>s<&6gO>XXAo@X`_k-Sj z{>2#SFQVvq-Vdtp^)3ZL`&IrW6g|)TLFN7Bu=JNv^gQoV!kq7~7<;`dAo>)Z_bFcQ zuLc!?w(4C)(eu1dQQo%)LHl{%j-uyzpE7j5kMU;fno;@JQ1m?SYrOrwBd7?p%I^Tt z*YLcr5%&4ltx#z6*HQF5?`ypK{2LaqYoC7uMbGoT#{0XRn=#VgMA7rSU#-5^>kNYS zTk|_X^s9N^ulDx)TR~yayuXE_=Xt-{>wQ;@^j#==p7*Q6^!wXJzrT&5=Xrm{+wZ$$ z%zZbA{tD0gD?|N0#=YJhBlmYu^gQpcsD1w3pp^Tq`FBzDJnye~`~AJ3FlcN3Jrq69 z`z){bJu%YvfatS$-e;+Iy!%1WewBY8MbGm-E6h7yud(X&qUd?vX9@5353GvNynleA z=Xu|x_W6Alr*X9NtNcC?eG||7CguIZu=Ed6^gQpIRKM>JOW%*8=Xu{Gc>lN`pAz5mfi<3If1{SV$9 zH}0dKjeBqW_@BP|O2W(k`tdLR@a4qMUr+wqf3EiMyzf!{ep*=iX()Q0_dSC5)2*0j zxu1@r=Xsy*^*+S{c6pxyqEF{}pRT;05d`gb%9(+p=Xswle4n3c#YC(8R1`hW`*gwk znQr=-D0-gv^)ik|0veVBdzY$NxxQS?0T>jm%U zxN|=TMbGoTSol6a-A$hkqA%uoU+jIqpKAfTb~|%X^gQp2!+4)zc%Oly=XqZ&ct6i7 z1g+!EL(%iR?^O5snHHx}@P0epOb~r1&-+fH-_N&VqUC-*ik|0vr}w*^1s1ST1RDJU z6g|)TPVf8u!WiinqUd?v=PB>Af}s7p&jQis@x0FyykBI+M63KoD0-gvdBXWV+fAR1 zqUU*^C-nQpZu-S2dY<=f%KIFP(^%^Lyw3sAxADAh6TDwy#YFRd35uTQeVe+^&$T$k z+~=a`dEU1P-Y>OcqUC-mik|0vm2ke#bJOR6=&N|%SE>8_WfrGc`O8rBJnyT7exGl} zM63LK6g|)TD#82ZZu;dYdY<a9Z2^Snppy+wt zrwRSO(4G526g|)Tw4r_E?|febqOaq5Ul+#vbw=*j zq3C(u*QxXU`mniQkD}*!Ul+#v4My%apy+wt7lqm97aR1&Ao?Pn_eI|K`;9>gXuF+_ zD0-gvMauhut3-#me7`qUU*^Bb@Kcte9xIF9Xr%@Vw6v z?)A30>9?WidEV!!eqU~Jij`lEqUU*^BlP?2R!p?Y-;Sc^dEX*&0MWPbyl+wO zcsndkvGRAI=y~3^DDNx7(pRGBdEU1O-tV+xqItg)MbGoTBFy=|%Gl{vf#@rE-d70y zewRD048@=y% z2d#9wI^IDPJIU z(C8aM^xZt~yOsCHf}s7@{9`D3p7-5D{XWJoRgW8cz2hi)p7-6}^ZkjSBG4-T1d5*L zeSz2erWom)K=cJX?+b)`y^~fbwA`OW(eu17Q2oBy;xv|ezg4dpMbGoTKN=rSrk3b`#pm9=d74$-k(F!^Ss|Pt05I*#32<43GvT{RE!=PQn4s`$om-A{HsRgUq#W=ykC?o^!s+J zBDBhHN72)~Uz9BD^RKz-uc7F9-tQIqeTSRA14O@<=lx!_&%bVQ8cV(3s&^el&+~q- z;Qb9NCYtv*Q1m?S_p0~%n--^-``dcV^p7$%l z?DM;f-A)&Zp6C4vq2J%ODneWJZlmaV-d|MScUzp|s@DyozsU3cqHw;yW5qkRKM@DIE{k$JK^?$=#TNdKPL41 zhgM9q+&@Io^SnQ%zT4@yIK|xeqv(0w9}Cm(9~u4r5sIGY{buj?dILd4pjG|=h<-EA z`_1Zn|2PQRZ`FH@qUU+PS=i?fS~1abKZv5|dB0ic_fOpPPf+wc@9(I7|J33X*Zij- z`a3-D?+E=q`3GYnVfg-?PpkaoA0+(#kFQPrtM#%t<>#*?sHep@#izwrUrBiOwD|gW zPm6bW-rw==^QT$BMiFTA(@^w0@9%`^_tSr%PlBF~qUU)(%iHf$V$6LCh<+B&`&p{r z&j^C{>-RHI^gQop3H?6RiizfZDvF-x{Vbv1&vetzMA7rSKdAbBn#E}>^?sF~2BJU6 z^ZuaF?`K&t(JFryik|2FL3O^LZE=dZpN*pDd4EvwevTCrE%$R!^gQp^dHa331#A?7 zMxPF%U&r%)o$B{RiqI;59*Uml{dLvv zGc8U-cfZQd1kqpTd4JvejyFFj3|j8zqv(0wUk~H`0>k?SD0-gv*9GqvT7{tHej$pU z=lv4l{XWZ0p9P{{!t;KKu+LxQreB1j=Xt+GINxWx>9bMvJnxqbdH?;Lt$&{z6(9Gj zgps2c8>`-86g|)T(}MRo?%d~q=uh*!KP`B_#7(~hMbGp8wBUWNn?4ss&-4DY(C?SJ z>6fDDdEW03yw7vf=Yi;V@VwvQ-RCc}fQ>ew?e&(S=y~4n@V?{a$4H-#qUU+PL;Wsi zc@VVU33oY)p6C5T!TSO$CR*hefao7u-s>-?JtHBn=E>2&@$)7R@2!#V-XHnlv)>xO zH8bv~bt6WMdST3lc_ZULO!{VW;@B@HP5NSD;)KaxP96K%q(A@UyZ<`&^9e~4rY23A zk~sD+?~VQB>#sikI&sQ3pCnG5Jn5^ci3ztdM#f*y7&$5q_v7WO=e&#M=@-rK99)a# z=@%jCi~K9Ge!6}2oO%Rl4TAPtE?QCaw8ir3xiE*LGcW4uoAK=y~1`sz;Cu zVd*b`=m&Y;530w{i(%<6qUd?v4|)$rmx98et@)Qw^gQneef~&MjP#dL^gQoVl=oMH zp#9eTD!dEVD3?{5Y{`+0v8 zMbGno_0ah~#*Wu%bi7Uw{c4`~tCjb+!sh-Kik|2FYOnWQL1ECm??Tb@ykG6z=iiQz z{x*u9=lvDc@4JJb{Z_qh5d9UN_g9qncf!)&LDBQPzas4O?^-d@yuXX0=Xrm{>-{|o z*x2xE^!HHoJnyrF`}`g&6dHXGh(3$weU|XC(|tGneH1;<`z*oxUN?O&ik|0vmhiFD z12_Ex6g|)TCc*nYH+>(7zKQ33QoebbQlF`n`I4f=i*Jh`yNTeX)0+KQ|~0 zTIJ70(eu17R^R7m1VQ`dJ_AM1^S;=7zMmHq1}*pVQ1m?SJB9oFOe+)`eI|&$ljnVB zn0`Os==bwc^gQo7!`$aDFz6Sc=y~3E3jKbeyYd&J=y~4fseYejaf1(=KT^BJ0&-*&>JKhEh*eC*xeglf0=Y7$T_c8u9 zqu6-MDF)FO@w_h*`u#?$5VYKHMA7rSFBdl0nWn!g=I z&-1=T@V>%|iI)2c5Pb{J`xe#jcUYW8!TaTY2a2BOeT&fVE3KGlxvxag^Sp0S-tV+H z#oX^i(eu2o5WKIlVxr}~3PfMQ^S;9Se!t5CHi|%_--V**d0!!TUu}g#qpwEM^SrMR z_W8Tr^t(~?Jn#Fw-|g(NfL)cp2Sne;^S)2m=kK*bp;i7~6g|)TKEeBaZu)&FdY<=v zL*B=DtGVB})!dJw=Xsy$^}fbRx6At)5Pc@k`%J<6Z>>;h-hYdt=Xsy0zR$0zi+c* zqUF8~M8AjU{T|^y|E!z-EQ+4z{T|^R@0^?d9EzUj{T}80d5cr*c;`{{Jnx@+-|;R4 zg+cTF0*L-8&-3gV(fIUVCW9d7y#5dB`B_j?8Jue<55qv(0w?-ly}4LAJ_ z6g|)Ty@K~Q-Sjt6^gQoZ2;O(P={rI6D|p_oQ2YE_7N^nZ{Py{`Q1m?SR|wvBSuxQ% zUKfg<=lzNy?_+$yzHRJuZ=>jW-e2^d@4KyZ8wzN-?*`Fd9p`9t2vxaQwC z*8KY@dYSUrA6;i*Jfgi?6v_3xe*@9@08qt5r!f}s6Yy=f?Vp7(czem~uciI)56D0-gvv%LL2#R7J%dMP0K zSv>D&dA*+zBmE2%J|!rben|6q)^=BI<`*YUhx z=k51%t#rF~J9AO=Jnz>D-e*{$&~l%FqUU+PZpeGbfli%mM#abdDq-a4c}C^WL(%iR zzwX`VXIklYRemOj{yNY5>q5VuZ-qju{P`$)p7+-U?-#h~7og~Q-d`8`{X#eWLKHpE z`z7jkxLFpbp}SiDw93x{(J$e7zeMnUkrfk-ei4eE=lv4F`)oITHj19-{Sv|Z#cuk= zD0-gvr-k!kG-SoLAdY<>E1@D)->6fDDdEW2v z?(_33V59MC9WM_=zk}!f4#E3nRwy+3Whi={_dABXkMV??Z=7)RQS?0Tcc}06mRngi z3f^z0yBtN&^ZsF&d%Xf9_XQyOhnDwmYPNy@H{Tlh;j>@=zEu(Tr?=xqjCx_r<&!VQ zf0*>ma&Ctn;Dhx<8k@(p46+W3O`O@bECjW0kxLx}t|_PI2)r1<~h5moEnWkoPe* z?#(agk20H4^gQo#1@E_56``$qTTt{o?{kN|k1_XKjofcV(eu1-Ro<6bSr)x71JSqg zyl)ly{WdEmTIFv;(eu1-6}&HZ)0d;@dEU1Q{eHWfemjbu=Y6GcJz3$VuK>|k^1QE9 zKQq{2af++n4ir7l`%3k>ab;NgN)$cM`^qr=ey7pzccSQd-uDaMS6LOIb-XGNeLv6p ze(&c;yDVU10nq4oq3C(u_Y3{L+6sk6UyY*YdEc+DCwE(%M#}xJCwHUhdETdnxt`o( zta^Ju^r<}WQ^TC^_ZswjQS?0TQw8t$Sw3jq??ch^yiX0&@An(I-;bi_d0#8^`xMTwp<$k-JIuLy!&-+5b`-4_YwA>#=(eu17ROkCc7N?l|LnwNl_l1J@hpm`sxj&4e z=Xu{Q^!s`@eLaZ2o#%bK_k4fE0yY%Ta(@Ix&-1=r*ylG`q0s0XQ1m?S+Xe5By6KOi z=y~2}dp|pDw18ce-w2}5=6Rp3?(>fYLHq4?j-lvz-e(VaAL9%5apQ!097WIbKHK|e zgH8k$fwtFzE|k?ZB|UQ+_!<~dwJgX3f`Y})1O7r z^Stl%dVkIWc6omeMbGoTS2*9Fw?d&+{&^HV&-)DT`Tl|h?8^NG5Pb&E`wX?uzZeAV zx8`3&(eu2|@OpnKC=6QVUqaFIywC9N^DoCpe;Gy3^S(jt^REO!`&IrG5PbvB`-Y)@ zALGr|RpW$v6-CeUzQOB#dr%Q*-nXOZdEPe&{r;L23XT36ik|0viMQW(Sir7+-vOd8 z;dx)8`u+7FXunnOI*Oj>eTndncf*Q_R{1wj^gQoN)cO9V#c340U+!R zsy6_lALMyI=>0zbu?6g^{KqJIp7(>o`F_v}g_iq46g|)TL80G2annCR(eu1d5xjru zrhf{ePvLo=BJ}&@kP?o(0pJnw6Sem~O+ zg+@OUMbGno^-#Z$@ynI8k@{bhO#{)d=6Szb==ZbSxu1oi=Xt-{>-}sC*l7G(< z5xk%0rk{tR=Xsx{zSGULIE|&=Z>O6HqR--apQZNs^TX25N73`V&l0>}V8ul1cneVU zJnypv?-#o17ozBS-Zy#A_gNOOu>fef&jQgm@w{*H-sdlhk$w@1p67j&;C;3g3N829 zD0-gvP2P9B#TKxu@)x7%dES?)exDNr?bq*fK=fri@5@xbUlNvn35uTQeVO2Wt`!r_ z`&<+~&-*f=-!FC3FGbPwyzddt_jzvmJP>^k&-)&4zh7nnyVm?=D0-gvJ%ac7Rwy*@ z^HKCX?|TIAm%Hhgqv(0wrwiT}xakW(^yxhB)4krWuz+3OuRziByiZr}_bY>-{q}k* zQS?0T(*^HWSuxR8y;UfBp7-@?pTFATGz#7?_p3ql^*rzEhwk%Ze8pU2ykf3F(eu2o z7y5mnRS{a{7ozBS-q)+|cGg;)V&$(z(eu177W#dW6%#G@MIicOp7+JVJKj1s{W=sq z&->yb?_*r`)*GwddK5j+`(p3C-Uch(#sZ*K{st61&-+fn`(i5;8htT{zLV#DXPAD! z(Wv~5D0-gvo!;+uO00CdD!&9p&-1=h*ynGuLZMauCKNr-`#iz>Qa61mh(3?!eV)3{ z-)wOj1@Cvt*^Hv+d7l@i-)}Kiy)7tup7(j)e!n%S2sH1vqUd?vw+Y^tS)tJA%RuyP zJn!3-_uDK^(fe&EdY<=f!uh`3iiwu{auhw!`!=E9Z+FvgN73`VuM)hkaMM?S=&N|% zR|)6)9d7y^D0-gvRf6}GZu&|TJjdw=b<=-~qUU*E=k>nU0(N;{i=yXwU#I&0fgotVz1{&7Ju#!bn8I$MLh3|yx;2`3@QR`%|D2u=XqZg=05+Bk^4g^dY<=1-t+z87;}FZMbGoT zL+JPQRw%T}uLse0@VxKvdVjLaTGnz`yAnX zf5J_F0!7dBzD4NwO>X)o5Pb{J`xbSdf70SK+Pq)KJBgy_dEX*<-)zN1^S&8H&-1=T z@cxvW{uGLy=Y56xT~3R|DOP?9h`xg7eMOjlf7p-tVtkG0}2=6-CeUzERlc zx4Y@vQS?0T8`b;$HH*{O;`vqnH55J1`%+<_-(kf>tNacSeJRiTQq}LTTbyFdJXzs6n)ft2EFR{53CwDmU_Qc z?*WQF>OFz}xVqQt3rpVzqCXz>opiq$qOE!lQS?0Tj|=yD{ciex6g|)TSzik|2F7VkUWfCX$Ue_G`afatgIyx*ek^&SU7`>pwpQS?0Tw+P-3S~1abKZv5| zdA~*Q{)wCZ35uTQ{as<7|I|(Y6hwcQ=l$I<`~2h=^=ZJ#FDCr{kFQPrtF`x@^7B^` z)aLt5ar6D^D+$jw->-kS`M%5Z{%)9k{xpMr8j7Ch{ax?-{qz^dM4FuAr#jlIHyuUK z^M1Cs->1Y#p8}$v&GUY?>i08(p#A#&3=}=j``KZ~ep6C5~<$Z?5DSDrQqUU+PUg-Dp zte9w(KMzIE^Zv$=_l_ft>hh!F<9?Mea&)G#*UJRa-{5(FL-2mSJNNTZ^gQoxgxTjW zFmk^DMbGp8#*p_hR{lbRej$pU=Y6i)=Vw_pF0T1mAo^UM_qpnPzbGvIA{0H(`&`xU zv%}J7qv(0w=X&q;76*kv>-URM^gQodg?)aG6$*_$2Sne>^S(9Aef|<-&0m6|=Xu{6 zrr+lp^tmW{p7*U@@0VJ|ZnQqF@|U9MdEQrg@ALCwq|XD%}iit*_kD}*!Un!jLm%Hhgqv(0w_Y3{Lz)fEO zqVMN<-yf#muP{2^3KTuh`+mXumG0cHMA7rS@AvlmRTi+T->*W^^Sn=0=lj(`(5e+^ zYyN5weJaoURN-E4jTIA(ehrGA=Y6X8_jrXCu&eS5QS?0TQ@!4=jgfvWik|0vt@6Go z2-?s4A`pEo&-+@z`*l`KH1F4;=y~4P3jKb)n|?itp67k7dcWUbaT<%AU*&H=(eu17 z4D*gxZ0vN4LG*<@?+XR*H(C{;dA|`w&-1>}+wV&(U_$|oz63?j^S)3x-*2))q0w(b z(eu1-_ulK3TEMQ{mxAcqdEU1R-fy--q2+!vik|0vyU_2qxaqf`=y~3^3*K*a({Dx5 z^SsX%?)A#t^kpFWY@YYog7@3p^xIJMJnypw@5|lvEMbGoT++LZ*-X0KrFVFklF#Ue7LBAJ8&-1=l_4|ERjT?)dU%%gnqUU+vtM2pn zho#?-qUU*^A@utiD<;~iR|BHY;CY`RobSJN(|?Pi=XswobiR-Alv8W0dbKEep7$9- zzdzv4{Q(p`&-(_!`#Lv$9f-bx=Y50V{XsYVK@>gD`v&1&?~t4R5Q?7XeS`PAox>Ke zvF*`1-eD9y&-;=v`}}%i&94X1m+-tV5xhTQ6@r%gBPe>F_a$nd-(YbX1@E`dZ$Q!W zye}E*_c88vjvAGJ6h+VTzAKFPjRt)qh`x*GeV6)O&atqSe+)&>^S;Y_uXj8s4BD!9 z97WIbzRTP1PsB)n0!7dBK3{#O+Y|)tx8^s2=<|8r=ZCr1J87(XCsFh~@AJLhHwP7g z=6y4Yp67kOaK1lfg+il0g`($qe@=PdVsVOly%rGtIm`PuwTOfNH{Xi;@Y!GOe`{Ub ze~lY4VpLqhKm6p2Nt3>qm^k+H2}u*CCQX`>IQB2^js2Hr|K%s2eErqOUnfraW=!!5 z@gF38GdXeWcmMYjCVx3~>}Qky?E8;D`-eVB{Lil=11}P~<9=?B#Te5TH|B?G*2SCu z9{+EVf0@YijQEiWalak?(wOMaXA}N8;h*AvnDFwapZ+QFn@>L-{mPhk-yR!oq{iDX z|Lyf}jZO-)Q#6ZxlCoP0477ua->q zdjo;KDa=_g%gB8eh`uS>8yNIq_JoTJ`b8*up7%}a4k+8IadFklM$z-UZxXy;Y{f+L zeld!k=Y3fi?{kdG&jHbw@w_in{eFp+WwG*?py+wtmkDn^xmHZH%Fjj7^Sm$f-T^JO zfQ=&1=$E4CdEWO7dGG$aJb6|qH2ORceGkw39^uVrnVWtYik|0vkKld2n?4^!&-1=V zecQF%;xu&kyZu~_qUU*^uI_*e!qOLj=+k-LrwiV%uwtUEdMi-$Jnz#5?^n9%SEA^7 z-lq%u{8euHRVaF%_w`}+`Kyh6{%R0?Ji!al#qDg-U}MIicOp7+HOmQ1m?SJC*mFEKbq;O(=Sv_jx{llPM?+n)jt3`aGWZd4l(wtx#z6n^E*U@ACxj zx47xIpy+wt=Y`qlZ#8y1TT%2p@7uiIms#mHmOriX%RuyPJn!2C@3&c@(CD|J=y~3^ z3Er2x>B~{{Jn!3td%f*$`t2xsp7&K=?<*`|m-iJQ`YN9HRo=hNvLiUYH~nrDJ(qBUH5R9#yIz5zKG|2k?Qvc zElzROJBXs^d0!;#^AA}u(JKECik|0vk>LGdH~nE0J@iY0yY%Ta$gUk@8Egg zA)N1zSfSA9kD%yz-ggMzH@N8=Q1m?SJJh}2QH#?kc)y+QQ4~GT`y8+LjX`11D!&m# zpTqM$$9tcDEJpfcD0-gvIfD1ctx#yWKaQg3d7tC`J>Cfm*jNBG`V%O6p7$+bzRzzm zPPk1V`WBw|EkeIPX%&K&`;#bop7$+7-p6>m(QM?t8AZ?YzQybPDJ$JB?@yuVdEQs3 zd%cz*XusV~3y8jg=Y56X{b?&En)jzs^gQn?)V*G-#c340U+!B`^gQn?g!BCwD<)d* z&!Fge-uDUjdTnm{HV}Ou&-*^X`?GHPvnYC=_kGIya~7xQ{W%mp&-=b1?_=EUoHuqm z=TY=L?=w}uzYsR}7eMrxJnu8reg4I;^cPX|Jnu7=_m{%bUqaFIyw4Q+{begATED-H zqUU+vDD?X)Zu%=A`bM7jjoy2`s}``)`n23%MbY!TZ&cp52SNMo^V?DMJntI?@2^=g z(Q)kWf{Cg;Rp7$5L-|h5R={A->&HEk@{RNu$R~M*zz57AXe!HFf82ZS+(sy$+ zp3Qi{O%1uaK%MV`Yk-~ zw+P-pcGEvb(eu3D;=R`!w15o-wA>G(=y~363A4|CV(jyupy+wt-xa)nY88T(`==oK zyFBmjhPl^EHfET2RXUR=qS3{UM(Bht$2^tg!U6Q1m?S4|%y`KE7N@b)`+1)ZqF>MRe!bA|=UOq*az7VE&+~r0u+PtM(`TUQdET!N)9>dQ z{eB*bp6C4yVV|Gr&V43`{szzc8-n-q-SqQO^gQoxsCT>t7N@x8FF?`ryuTsz`-N6a zv^9Srik|0vuHb!^n?4IfpUd+;SMYw3n|=|Bp67k8(C@R|^w}tSp7*)l-`_8`fQ{u( z^L{akp67k5_q&~(80m9B^sPMaTh;k~Nf5N(DQ5|ap67k5;C-$Y6Rq-dQS?0TTh)F3 zQj62j-7oh`QS?0TD~I}hj9;kd8K;~)5Pc=j`%0nTFS9B_tNdjsdY<={g7^7u`g{~U z&-+SYpTFEqzZ^x+^S)o$=NGu?3qbV!Jn#Eezh7Z-ifjG~6g|)Tes#WI8J2z}ik|0v zzu^5UD<;~SzY0aq^FCGZezlu^HHbcy=Y6X3evQQ`dcOum&+|T2==X(IOti``MA7rS zPZhjh>!x3eqUU*EtKRX7EKafVi$L_XJnw6Tef~NtCR*jML(%iRuNAys@1|dmqUU*E zD|o-bO}_y}&-1=8%=y09INuk8=nHw?7pi{0(aN&8>TN{P^Smz`6$*`h2a2BOeY4a>U+KY7N=PG`%&~f?=yt+eT@|pt@3L?^cg(wGX(Fyb<=-~qUU*^;r$(Mtp#i- zpyj?6MbGm-L+JMhtWaq52T=4p?;E`P{5lKRmHRpneFM+?2EqG-Rw%UGA4Jjfyl)Wt z{UJB~Arw8&`v&jtat>R-uF5}*qUU*EBJ}%uD->Gg*MsOwc;1(I=j9x+fL*yif}-bn zU!uO#Z3u$)JL5H==y~3kgt^Z@YTV}^MbY!T?-IOkv?@aLz7a&<#q++)>-{kc*ya5( z6g|)TuAzP(;|un2qw2IRw zdET$~p6@#?U}FK$D!&s%znbU$YQg(kRwy+3TPS*-_p1f(yWI3$D0-gvs|D|GyXkME z=y~2>5#H~+-Spic`YSx|uL$1Xans*H(eu2&;yvHrwSZk~{#_J3&-*Ka_xG$&XlwpG z6g|)TEVa+?u{e#C`|b04K=fHW@3Xw;`};v*&~kqtMbGm-OTFXu20{Dfz86K$^FC|H z`xw7md0^abJV4R&yl?XE^ZSB|K&$*d5PcKR`zE2^KeR%j(LY4d^So~o_WAv8`hFBW z&-*5!-#>EGKSI&-yf0Jze!$`s`~3ijzKrL6nYZ6R4hn*rrj{46o)oyn7?B)#*&_;FKy{z}5AUw{0Y2~!fkN_z8+-zH8?`sJ4slM<&)eJAO? z_doh*{D&XB|G~TC#(ng&aqo>E|I=4rNqG5RKmNrZzMS~^>&bun&($8D_dRN#KP?E_ zZ=XL6MbGoTC(J&7`b%T9+)qc*^Sn>@?(CAniUhR^3y={^*rzEhrD+*ch%=c#mD_B zVdUsp#+pA1MbGoT-g}=v+e){gfR_8&D0-gv^v z&-*m*eg4`QD}ODDp67j?(C>?^P-xy4f#~aa-q#8Jew~|s9g3dkeVw=8ueX3*{eC@) zp67j?>h~Lhp#A#&1{6Kd`yzF|FAhsz45BaMd0*r`-){^GgSP5zMA7rSFA~o8B~~aj z`Vtg9&-)^8zu#m5yH>qTD0-gv9o~I@X^ixxAo>oT_Z>pN-)x0KtNhIiN?p7%Mb-%l!@% zJ1?|<{H zxDSuD_b+XFa`J_@D&ziZ+=vmQ;u8MhCtpmO^u@%)v7b*!nlLqK(v-xpe|c~0zdZXd zKl$YAuRi`damqJiieHHTAnBXQiDSR}zn?Jq%c)~OoAhVjfBe}$^hx4>ejWL(ZbEn5 z&wph7q<2hP+?XH6Kl_>PoBtmFZ;^kQ*AjjdKQbZix1(Pg^MlcwPG;1S+5IAH-qaPl#Rie;=BUMy{H2UE&TP0HYF@EC_nYyCUKcB6F zSn~&YXTiv96;;(y1tJqv+~uIl@R+S)=RPt)C8pdDkJ&2Gg14ur z_?Lv1`{6NL#UC>o6MN!{M*n=a3S!N#;qCLE&sK?fw-Xt(qE-Izn5_~uJaD9n>I)kE z^Vup0?^pA@|GS^9qN+NofIUvd=!jb751ZL4F`n@vgH|;9;W1k!nsR%Jiji_v9~%Ah z*(!)t?+Wj&H!@o#ntOYSikSQ1F^1FoSley=bD>6$0g4P-(Q^NMwhCge$DOSbNq3C7k4#wu(FVk#Q=n+z*@C zD&JRj;1U=aw4zo1u$ism&V6K@iYxceXR9E*FXnk4nXM97?Dn7)t@4M*Y!#!j149uR zzoOAUpRIzZ{7&9?JCWHcf$6pO-R+9PhY!yiEdHp^zTg5-g(TcDqu4uU*9Fq%)8vXE?trE?$ zJw?SoC>s6nn5`1QjPYhGGJZv)A0D$+qPe%HsED~AHnUY?%zb3gikAD~Fl@R+Tl&Tml#>~ShabTs-QDHce>SX`f3zC@B91;;rsmEZu;FQdfq<2PuS=0 zantVs(f3(1m4M#&#XsBW?zK4m7ej5;+l!*-dEX~^zt4(^M!ye5&-1=d@P5CWem{zy z=Y6L4`}`UU*ib;reGP~{ljnV=(C@#sLZQ)ri=yXwpDB1>>!zJP zyxIV3xj%rS=Xu{K^!qw1CK`Pmh`y2MeWT$0K{x$D6g|)TMs=@u$l??${}76v=Y6B{ z{%~0O!zg;5_oagO^;S$Y?;~S7e~-A)F6DV&8t?fv;t>njP(Y(Uf}-bnUn=zb1}hXA zeFKV~=Y6TL&p+y>KZ>H~dEYH~-{_`q1krc%yzf@$`(qZTp}XH+?-+`n=Y6;8_s7H1 zA4k#iyzf^1{zO>%6DWF~_XUFYO;${_HNOc&U%>OeU}&Eo<4e{_<0b1Pik|0vf%3lD z%CflXHKXWx-WLe{{*)CHt@2Ny=y~2>5WH`3)3<==FYvs-5XSq{hWDpY^gQn`DDPXX zEQ{W^qUd?vUl6=MW5q=C{tSwq=lve_m|xCmr(RP@1J_-W?Z&_4F$B^Uq;c>ykC^8yuT6z?dSa!41MHZ>DAXq zyfNa9W9|J*o1UC}0r%a`qGWZicQq{iRTMqV`$fq=^t@)Y2Zcea{B{&Q&HF{kL;XI+ zyPa#sUhf);p6C5u<$Xs`%Kg0W0MYN|dB0ca_t&kMXqA5*MbGno?@+&wvGQ*im45?8 z&+~q-x8L8i((UT^H&OIF?^g)-dYx7%w94-U(XZfnze4c-mYe<-ik|2F3U#m7WpNq> z@3-c6q3C(uuTbxJx5LukM$z-UzbJU$ZN)_Mz8ge;k>~wI!TURI`a39kp7$5M`~15W zu(1GWxxb5|=XrloxYxU9g+imhhoa|sKVR^^$4%b@qMy(6e!k%SeK-Am6g|)T`NF+k zubaLXMbGnozTo`>H~j+?J1TO$!Qx=6xE7{vgl$gWlia z&We$K7K)zd{XxO|*;XjD+|NeQ^SnQ(yq{xnir&vb(eu1tr}p{jVd>LB^y_%uuN&(3 zF@Cu+*VyUKMbY!TUl(SdpJC8vpy+wtuT$R73tRc~Q1m?SuPg5}!_sGh=&$pr z^TX25N73`VzaGZ>1%~$vQ1m?SuY14OTNqRX+HPkdik|2F65)KGWraec&jQge;d#GA zINvXF(=S5N^Soc;eaFkTfQ>D$miufJJ`qUU*kTJV0Un|>*Zp6C4z@4a501#Bz; zTIJ_~=y&kE-ywLv%nF4@zYIms^L~fmeZHGMA4SjeeusCTzuW?LRsM1mJ(uAo=lcpq&{mXk}Kl%ErkH1cw^35lSQzuXQYHDJ_>60(U zpFH{EsJMjwxcHQ~_}|69mhhv`zn=J6V$wU4CrwFu=Y#R%5`G*%>enCtX2O)juad^R z@!Q0yNx%GZVp8IisqZAc_x?v8jsNh2_dj@d+_;Z^HtxOg{CEE^qdtdxgphnBx_Ir)Jn`L6se$aS#Vg95 z=wwd>u}7CK2D@v^5@r7nC;NYZ*rN*I?Zg4lV!-_i~!QNll&sdm-; zpD^sVc)s5f`ujvP5L(_Rg4lV!-x|7%{M#<(&nBgj?np)am&pIg2)*Y23}WZ`eoN@@ zUzrin^8OWwo#*>9p}$XZvQNUWFXQ>XOz?fOlYKIXo#*>9<@U&>!+xXh=f455^L*bmJ84+!*``;M$$2{L35AEk; ztoi?=*ZlthvGaU?toHN&*UYfd+WgkK{|jR0`Tp43-~SpA1g+-(3S#H^KHb~j|6gqE z{}01H-S&MTE`A2)J#M=CY&bbCfLRs2_Q07O2cox)2hzKC^rQav8aZFvyX-%)yRD+ik|2F{+Qn8qVzKj`k5$tp7*OD<)d*SEA^7-p>)v_p99W zt5Ebj?+>ee{%VWU*y8zB{%R2YVV?Jgz5RYoP#CnzUxT9Od4E{-`@$e-zuXt1=y~2B z7QA0;#YD^fS`bu=lurN@7GzJ;;Od}MbGnogYtfTSo-xSdY<-;AQ?dA~G__gf75Ehu`P z_e-_k+;2nC^SnQ!_W9)& zr?}>qqv(0wpApXY+pU;rx!;bW=Xt+V_4^8oQ_Ot@h<+!}`<-F>{SKq!?Lg7g!6rso4yJ}|A^=PBcb2#a?|fZ(eu22BzRx# zrmsfP^Spng?)7$CoMOk@jiTpyKU462j};Sb&EEr}pULxnrr`ZvH~n4|JgD`?cQh z^A81uL96^jD0-gvYX$EQTcObC52NUL-d|JwzTV;#E59B@e~st;HDRBB#EOZQ`y(iN zp7+dEQ@B=li2pmc`0HilXOvzu5bZ*BBHAt@0Z|^ox1kFBZH%W`#nd zKZc^`dB0e=*E{Z}KaQg3dB0fb_b1%+Cs6b}?@y_I-(+!$t6md`{uIyqQ-b#=t(a)u zpG48~ygwy)-|VJuM$z-UKP7m7%1wU?MbGnoyKug5anrYe=(qE{-|qcg&S?wSSpGEc zPowC0-ftJYZ?!_9(YK=LdERgLey?}N0(Ry842quT{R8hlzb!`kHW2*-p7#%get*^q zg;x1zQS?0TAEyLjkVM^jxNpHUK+r+6!zx;AyQsR`U?I<9WYN==U?+^fOTO zJn#1j`}|ZleJYBc=lwompFh)0KNCgI^M0k^eVUs-4Me|^=lx2-`&n-KStxp*_bUbO zXS?ZVqv(0wuk`No=UBk5Q_dU|JK3 zqv(0wFYw;$EwF%%BGBj;py+wtFA(MbY!T-=e-Q^A^gQop3;X<)Zu*rddY<>Qg?;`iH~lIUJpYw80pud=y~3+7tZ%Z zRw%T}F9Ol8=Xt+g@P3_}ejSRQ=ly#1z216@)6m`Tgu5O^&+~r0(C;@`G0`f21B#yK z{SCqUVmEy;i2er8`x}Dy8{PC9QS?0TZwUMR5;uJbik|2F4R62SWC0t?pXU806g|)T zT*3QND-;@iDTqFo=Y6ia*V}Ay8Y%bdc$-o5JnwT=zuyv;ehZ48=Y6iw@3&ep(JFr{ zik|0vtGd@KvpB`dF9XrH^1N>q`u#R5CR*;dq3C(uw<_<;Elx4_p67j~^1d<%+Hbd0iK6FuUnzLM z(~60f`<*Cyp7;IU`}`^k*j4#eAo_lu_x(b@-(`hD%l$4CJS%KKU? z%i^k6i=yXwUn_Wjz>10H{Q(p`&-+5b`#Lv$9f-b==Y3%q?++T@A4Jjfye|~q@ea9j ze+Wg-^S)5<{;-?=Fp8e%eY?8PueUhGHNPH2-_G;C-FvThBq$8pntudE&-1=r@V>zc zg+||iqUU+vE_i>`O@9!a^D1^Z{~U5EZpaxbkmJ5&dEPe*{r;4j{uGLy=Y6^0eT$pE1w>!Y^S)g5`_mSuxaOZm(eu17 z7rbw^Vxq12ttfh)_vM23XWaB>Q1m?Sd%gSoHVfELK+Am_h`yKSeXr2(&sw3-=+C0) zdEWO5-k)>RpF`2}yzd?IKE}74^TwU-c@#a*`wXGqUvTIC0*F3?=Y59Y{Y5wZMHD^H z`wYSROK$p0D0-gv8Q$-9E?dB^j&~VF&-1=P-REBkg7&-JxB{YY;CbJm_W4)C(qBc< z^Sp2H{{Fr_C=A+eryWJl^S;6B{k0hBuc7F9-j^uvJA$D7yzc@{S7z$4HP}k`x3ABH!WaSzrTs1=Xu{H?DIRVP-vCk38L@fdEX^?f6GmO z3q{ZKzDw}F%T3>fqUU+v<@Nry1?=+vHj19-eZK1V-9gZP{k|JSpU?9?U+wengr&cO zqUU*^ufEg08Fw0lwXJnzp5-al~X{sD@f=Y6&6_k9+pvB~l4_kAGxYM%Gig7*)t zm}qPMLliyF`)XmI-|wdHN73`VuU6mdJ+e5(%728S=XpOU^!ouTCR*hWfanK#-VX}i zKX%hUM$z-U9~8VFbkh%_=y~1`3jO|xoBj!kp67jv;QdoK{ZkNq3eWo#@9%PwM~;cC z_m0i4w(2F1O!)mDUz_^Z&%d7dSz^*VlP66{dgp`j{ik|0vjrTr(W>6Tk%Abj%=Xt+c z==W(>C^Y&s5dCVN_p8-?{w#}Ato&IhdY<>ImG`s5($7ZG^Soc}^?puJ7_`ctgQDko zf5rQKetL}b=^*+mJnye4@8UD6OBFtMbGp8im=b0=cb>B zqUU*^CG`7DH+?3EKFjic%pc?+edN3U$v3si2GhM2_wMH(9&7Jk z+Vte)3vX?U`)_e0MvRI}_=lf-F=^5l6BEaNJ|Sts)TBvM6371Ky|MrD?7#fvldr$} z`0KZLs(19~tRD?A8=fhkO*R9Woxr}&Y#2b4vPYg9G;HnoH=3;oQ zN<8X=J>o^H{NXXo#Rj(Lw;0jU=!eHJ7lY0Yg+1a$qaPl_Tx?)_ev1(ujed9xbFt|I zr@CnL&xg4nI$kU9@cG>^ml&T}B9mRT+&>@Yg2;U(@6stU%*9yM>_xyH@uKDa`7jqq z?s?uvhPedh-k$8D<^K6F7ewy+dEQ5cxkPvLkx4C)Dv(xxE%(D?m`jvCaE6Oo3^n@a z!(0%R|9qGW>3sium`i-z?-HW-=wp=N(bZts4|B1B?fEUnen4CGhRrY+ckUzeTZ}%a z(LW#Nf>`rwd5_C~_rqMG>zh5c#n4@?4O-<7k6|u~EEAfNqR|hJ zVJ?C6_J|jaes~OXQB@sHw>`hbh>k}8e3%Pj&CllD=YKcMMY_q2Rz+m8iQfFzZ>S_ ze#?o>Z;9mo`&CiP{qtcih}?6Bx!B&@^IKfr51U~w-&eMQYR4rq*+r}T;W5l5+U5gi zxTrX2^uuGAOJKU~5ic73@EGP2NN2^1w#7)fst=8R zcnov#$Bf3r9`T~l507CkQBHeoi+@lw`r$FmB}#9PZ4v2*$1oRv%xFyP5ieTqhsQ9N zD5pKP#Xl$-{qPv(qSo_h80`5iMi?~u=fhkOr<_6FeSTz^OEl&7*cKz@sy?*b507Ck ziYyuidwz=%2917r40DM>2hMO&5z^>~$1s;jZol8h8;MrT(EvrpylC{pW0*@6%^ut0 z9|w*8`7js6nol3*;y&R<=C>GTXt{qr%mtErUcZkFbBTH%IKxG)e_HOJ4|74}o;%D% z^k&-)%VtaVvf z`ei73p7%Y%K0n`ziI)3(6g|)T9>M$NZu;dYdY<>`%KHL~Q}n(7M4!&{K3#dgA}sw1 z6g|)T^f3GUmBv1QC5oQseY)WNDyt&2Rc{rFp67kN@_x0&DSE#eL|@PIzFyepud!mH z<$eu{p67kN(C-V~^o1yTp7-_Y{eG>*DOUbk6g|)T;xPAmMaG(61fnnId0(vh{kpKZ zUx%XSd0!l+->)}vzaB-;^S)SlzaebyH=yWw-ggS`_r+FBw2oH{qVME+->JOcXmN^b z{zeo%&-+fH-iK6FuKOpq`DmQ%< zh<JpdZ@04-MbGm-P4Ir76%#G@`%v^e@6%rK{3>a`1?;N){U~~#_jPKY zUlRoFSNSy{`Z}KXbz%1T-x~Y;Z&CC-@9T!VkMU-!)}XIN(eu2o^Ll?ENC9otJAk6+ zd0!;#^XsfoX!La;`XZk9MXKK)v^b5W-fz`Ah@$6tU-ZLg-)J8SOMeJO&-1=W*ykU% zVxm?4VH7>j`wrFb>n%>P^6NqL9X#(lg!BCoD<)d*kD%yz-ggN5{029D1B#yKeTU%v zQ8)cj6g|)T9PjzQ(E>IW0Il*HLG(F1?{ieYKNbY-*YA&^=y~4fc<=L%2Zce){c#jM z&-)zp-Oh<1XusT_K+*HOZyDO>$9TqTGR}BSAo>=b_btl%lVNjz5=GDRzD4Nw%~nh_ z@0(HdJnvhCet*hMe+os<^S(kj-?zBwTR`*`Jnt)neg0`T{b>|E&-)6&`&Ku7D~g`y zeTC}xXDm)*gX`Dt&!Fge-uDUKw^=dK*8DaQeIL*JKGpBfTAX6;&!Xsg-uDUKpR;13 z<^CLsp67j^(C^Q?>CdC+dERGw&-WKBU_${d_ZL9)nLO_^g?@k03WY|05k=4QK6A+X z7|(c@j5FRP6g|)TOu_rh?%ZES(eu1-6#D%YH~ke5eIw8N#(2-S8&@r0*Q$3FMbGoT zQRw&WRw%SpuN_6t^S)8={+gTq8j7CheW|zKcUZu#%I^Ttm-4(X^?H9jM*8b0dY<>C z>izyk5VYS3_Xdid=Y8pr_c88vZW_Ctn<#po_uXOc^E(atP7r-J&--p+pMT5pLF;(8 zQ1m?SyM=z=<)-gK(eu3TR`+_hEl#7+`K@}lQS?0T3xs`sw-pnu^1DIw1w8Kyy#4-; z1#A?7Mt=uI&-1=O_4~U)(0-MF7e&wWz93A$zh`v3dnkII_ZI~3d#sAkD!&Ire}U%x z)dgPf?_0nw@9$&iBmYXTzCPlO5pNu8?_b*Vb<4c)#1}jgh_=MIZH^K_ABZ z2Zr|#Q1nsn3G~Oq?DP8!`aTf-@u>F%T zz1|}?{Ua1T&-*PxzaMbZ4}j>m@VwtL(cj>C ze?$E)XMPa0-JnwVW`990y6j!}0 z5PdGs``j@7ev#4d7oq5R-scYW`xsv{vJLud6g|)TT*3RrmJgcui&6AE?^}iQeU6(x z2Sne>^S)I$-!E~~FG11syl++gKG)(D*Zf=*JJ8qUd?vR|?+ex#{yj z^p!mCE7d-KnZ+qq{xTFj&-=c=~ewDu&MbGoTUFi2)te9w(zXe6l^S)i^ z_gmfcTT%2p@3Vz|U*@JS1JP&myw6tNZ?ibXj<*d(&+|Upd%iCZ3WMf-If|a=eYUsX zZ;z3FJBps?eRG(8Ut#q73J`rW&->;u-|g%$=y#y#dEPgBy|0Y1@+(pFJnx%_`hAQ$ z-JM46ccSQd-j@sgzRD^DZOyL&(Uu__g$*rAGbJ- zl>7Dj<0yKb_g#YbC#;xgxj%uT=Xsy6ewWi^af-Qb0@3I5yw4ZBKWW88%l%0dJ2^1rcDK~FTbgZ*VxmblZQ8uG=}TTr+NAyew<*E7Aqo|;W zpn!m=;43OBC@Lr#P*hM?&uXBee1AyX$?hTH$X-(>&b;o|})I{k9T7mc<2X^JH{#B{^( z%Fyvs0>)WFo}L^S6!P?#(Z1$tgL$wt=)HGeo!}oZe%NoN4{blp6Q-e(hsp51*8uZC zuZ_neiw2l}Y-qdV7ql4JL>cKxq5OHd$o4p4py zGn3=a0XQGLoJCh4vIHB#UE1`!u;}RuM3(yE=W`BN(IYWq|2Y;tU4dxrE)Z@u2e|{- z4`R$kqHpC21fut?@`E0Eu7=#%mM9O4p3eJL^~KL_S1~X%e>WCAo%gMR#&8c81w+3F zi=NK=C_UciE4vzL?JYVnLr@s7+spMNj8_v7paCpiO@Oi=NK=Vs)Q?kOS6Q z^ABRt(|MmR@VM zMNj8_y4w3A9I#>%F!V>T=;^#~6nJ0EMZwS)Bhfd~dEY3g_a)l&C0O)y-Z$RA@Z~ra z6W-aXcNB}B&ih9BgPD%$r9Xy6Pv?E|{R>~c^rcAj$#mW)3%ozhWx}j_$FbAcSsoadkAGGS)^SuA=w?>oD_ck}+fN^zG{g+$*;=Y6NZ`)cj6 zug0RM^S)Dke}9exRxAL_%s+=kPv?E7T<_1jf_8SgaUP4F%==iY9`9=u-q+yJ+kT|S zp6ut{&)e~cCEjy8u~zl{eXXlWz|8zwEP68UW37Td{{j~ULw^B_p3M7LYj?eOv*KM; z%>0X3^mN|m$>(}?+$bxSdS|O%9TI&Wo%eb2Jzl+D`g$ySI`8wky?1lwUsBBcOIY-D z-scJW{L9>oU{<}$SoCz>r|7Bo4T`aEK%!5f^FBpg@2_yftyun;vA=>vPv?D#yuZKd z3fkG4e-(?K&ifR(&%dUZ{u&lNo%hv(bG=3`6K3W&BGFgVd0#Eq-#2N~H(}A!d0(yG z-#2r>TB}|&7CoK!)#~SVuDeNp9gCjM`)GmpEnF0g_bo{D(RAKN3%tLfO@9N6p3eJd z!T$cHHvLU3dOGi;<$8aM<5Vnm&bpmjSoCz>mk7LXmk9RvceqTLnSTe1p3eIWf%om&^zBIW8Fb!f$me=@IZols zzl%jr=Y59keTQE94lH^)?=$qA=igJDY~916r}Mr^{k&eMt4YADdYwr0O?2Kj$=+KB zDnDUE=)H9y%Da+co9Mi6Qt$7lxr%`q`)OG8blx|q_xID?q@RvOPv?EOe2*9E3fkE& zClrZ3oX-1j+4~uK>1SZk(|I2**ZY}z>1Sfm(|I4Rr{2#R$bRC?EG&9D?+exSKFrl5 zVAlLFB>F-+?+az`!(Bl;TlK=R=;^#K6rAhL<}zW%el`|8o%e-;{rwzm`Z-wibl$HK z?C&GA=_8Qn*U)*tM)rO#$0_Xd=VH;*dA~;BeI%C&<9#F+J)QS!1m4fnrk{sJPv?ET z+~-GeoWhwOg+yOZ=Y754JU^Ptgqiu#SoCz>*9*L#uT4K6i=NK=dOdyq0!5#{0E?c^ z`vlqh7;cn>-p3%(C(wDHAlLg?z4WnI^mN`QsL%D{T*bhwdU06vblxY(_xB53K|9;y zEySXy^S->h&v)|~?Rdqi7mq|=PUn5O?0tgXu}{FFr}MtNyWYDw_KAwIPsF09^S)et zp1;V|Bw$v(MOgH7-e=48KFJldvsEt%i9Vao`)t|!#d_%%W6{%jpDpO~mvEUdGk*yd zJ)QU2g8ltcZTh8H^mN|e)>H436>EMn68&w?d-fNsAb(FEkNdx1Wsl+0rwm=c{^R`d z{=NA1A1iHNopQWC=CA*-#&jQ3xLLTPJ=iO$Xb+Uf5Ntn^{ms;QQO6dQ-aBr@do#5) zM!t@m@1gS{jQLpfJRJ?d3`b;|S zGu3Y=PH;5|n3Ct}gld7tUzn1Tb=TJ;uT(bIX~a{rjZ6|}Qe zFA0ghh0gmHLA_thWx~w-#aQ%o-nYno{t}K;G2xw!{SqvCI`3Nqeg0A|6K3p}V$sui zA0g=TleOuSk?14nypIrgpQ25lf<;f~eT2HtU&a9|764}EFT!n|fMNj8_nxNjNa+xqQKNX2Sjn4Ztf%j{)>DOS<(|MmJd%u?B6neiFi=NK=H1+;| zovRp_nZFK;p3eIQx!$L_f_Ap%ryFkD--ty|=Y5i(-lub!Ff%_Li9U(W`y_$)8QSz2SoCz>Ckf8;H)+#v!lI}1 zK1ud|Gsh`h^)_SC(|KQ^emilds~8yXGm+>k=)A8Gc)x{Hwb@7v|K8SK#V=Vk=s{pVQpbl!)l`}|xESg`;w^tnj%VRYVy3F>_w z7X?F~hec24eV88acPqT#jYUuAeVDwz-@}cvV!}JCczdwu>AWux^!fQ*Cd|ywN1`vF z^S(gf{a$VQy;$^g-WSOG`+XdzaOUsBqNnq|Kv3`ZbD1zRe?JyIo%gBgdSAc+D<%O$ zUw}lPO6Ps5!23cj3WmNAi=NK=RKa=v0d4vNSoCz>r^@yIAjheg@XqS}K`eSY@9PBh zzKF|&nfXOX^mTOJ*U9_)Lma1Y>OqiK}1dE=| z`*?x(#oF}6Nc8b^-p6;>dpF;IU!vIImSEA-c^@x(f0P?#;i`8Oi=NK=_-^mr9Q$L6 zu|I}IPv?D^!242eMlh>hDH44do%dyeKL5Bj{c$XMI`7K_ef|k;`V(06bl#T<>itP= z`jc4nblzvl_jqL-r*O?LL!!^3^FB-9eL0s2v+9*&(bIXKCGh@~HvK6qdOGj31m2(4 zraz5EPv?DWcfEJB+o@1=I~7Rut#sbEsy}z*3^&}0<&W|H3>H0|_pNfDU+D_k*~wNV z7CoK!t@63vS-tdUvFPc%j}r9xRa_>_%&$VCkD~KFN>J~swdt#|=;^$VlD$92aVi!& zXWpN~qNnpdO7{M|Ui$M`^mN`A%YA;0Uium&`eHioiv`}-a+xrzUM&_qo%hAE_ZK)$ zq4yWC=;^#KR=*GTqN^B~vA>8#Pv?EQ`u@JoP5L?{`gA())8%`-dRNfSDqcMnJ)QUI zg8ls^E)!RAzi&|N?;DWl8|l1n6x91G+(cl; z{t6a7o%f9b@2_gpU&W%Q^S)7Vu6IqF{u&lNo%hLt{e7c0eIpWmGM)Fya=mZjI2B91 zvwGizMNj8_vcUUhE)!|2oNtH`{s zoiBTT!xgkM?{DDH+kPbb@69N=(1!ebGiv7xyuZn1!i@b*EczbrDd+|D{+2fVEiC#T z?+NIS%IA8m9H(&AYek|z+T%S1y`ay(&1J&O{M%Ucblx8oc;BW?--bm`=lxNE_jk1E z?_kl>dA~`}=eKLqwW;*Yi)!tkCyZBXc%*?m;NBRADwas+iHw(O< z#zn!^oip!4k?3dBc|Tjw=g;6W zVP^geEP6WcXA8WasZBo64M@+vvP+>-OHwPB%r->84=O(|O+} z@O~LL5ty0342z!5`!+$nU#?BR9E+aL`ZM&lGsSn#+V)^;Toi(|Ny7_CA&46ndYEM8A*D`+b5we+`!j zGxlq+=;^%QCwsq^;}nklS}b}x@AnD3U&m#_jQu(+dOGh{3C{J>wCU53=vUEsze?c! zdTsjkSoCz>uadprz;Oz_-+)C==lv@AJb$BJ`i)rhblzXkbB~v<*x{xl(O;nR{(}0s zoeWo#fLZf1u;}T$zo0(P-{dC!CMZqjeTqNnpdPH?Wbm5YKI`>j~?bl%4a`uuI$^xLrL>AXK7@9(oXPGPT? zg+zaX&ifOBK0lkwgqiu-SoCz>pOE|f?Hs3Y?6+gl(|LcQ+j}?fa&{;>-5pr;blz{( z)92?X=yQ*!HczIa#bl%?-^!dBFOqiL!8;hRK`}^FC5g@Aqrd@5iF2 z^Zu~Bzc1i8g{xix68&L1?+mV8*@(iGBl}_Z#Fs|Bx$aXLmV=u;}T$-yrb*Fqa84_J^_P z>Ac?{_xVRSPQ`?GHugub=;^$_D%bmBz4XOM^jGP;zpAI+mniCe2^Kw__gCd}y`y@^ z{wNkbo%dG-_5K)_3A5@Q!=k72eu-dzU#d-CibTJJ&if^T{rz!m`r}yiblxvfKeuy& z16GtiX6B#3qNnqIiNO1lToerbNi2Fg?<>`Pei;X>HTGpl^p$kpR|>o@=b~W7z8s65 z&ihKa-k;()6+`Z<(>;YnPv?E5!28o&Cd}BM#-gY5ewU!$S7_5$Akpul^M04Szdyrq z3TOTqEP6WccXjXY-8|K-RP1spvFPc%-zD(=EH@(<@6Te<(|O+^@V-i$z6y!HgUez_9I#>mV8*^0i=NK=4n5wVQ+R(4i=NK=4*6W~ysII1*6p0fqNnpdRN#FLmkBfT zYmn$e>AVjW^!c^g^tD*@bl!&w&hsy5(_g@%r}I8kQ135l(_h4*r}I8v;C-DoeH{{g zKArdZ>Uv+#0c+L!dMtW6@AKvT{Uuk>&MMv|EP6Wc^99~t<}zU_-eoL$I`3Bqyl>E^ zZ$P47LFfGnd4GR}<5cMGZ029VqNnqIg`nPF5KQs3Cu;}T$Z;`$4bOr6K&+kN{Z=v(PrQ3Tqdp+v__IIFL2bezi zcnfWuY=;^$V z5O_aRn|>x1J)QRvdc2>d@O~B+J)QSOa=j1ZMp>b|vw9zfL|;VbeUZTXa4r+Z`*19J zI`50*{rzl?Q#kgsvFPc%FY5OGSEhkp8;?g84KT}7+*2~VY4Y%z!)3yZ{TwWMI`7kD z?;|));n+tY(WlXQpC<5rE|&>2_H(i5>AX)<-{VDcz=}!0&_`m?(|MmJ@O~Z_1w%g% zi=NK=2HE>4j#Dw@@?yx)M$BWUXk3phOqVqmUQ14^4>0`0z>AX)8?C;~W>Ep2I>AX*p z-=cv!~lU&8Xc%OtspF`(;j=IlZ>?ZwUEP6WcbGp61e^2LpzV{q+ zEm53kFTtXx^FBv?f4|htv0sWsPv?ERpx!5QQ83;oBhk0hdEYKSub0AcI(LAfPr;(6 z^S)huuD8rp3=I7;EP6Wc+Xegk zGxjU7=;^!<6ZHA3xF{IG33rpHCJQN(|KPYpXaCQrB6koFQD_j zK(6;|^wO`vqNnq|K;ZpaE)&N4wOI6Y-WLe!{W@*>by)Ot-lq!ceVR6X8WMdfo%gBT z=X!4LaMvq#xa+a#>AX)>_xT&R;Z~GB#`_Ie^mN{*3cTOQMZwT-#G`mw`o3=Y5^p`%SK5U}pX%EP6Wc>$<&nbIsqZSo1ey(bIV! zFYrE-n+VL9=Fi(|KQ}zQ^0a0V@`N zX6$!h(bIXKC7EOD?o6D!q5y=(Er8J43&Io(9u* zpZPd2@Z$i>@bP{je!(GuL6+g4jU4`(;~$E^$ zc=))$iB7-V@kL`Tf0|-@udeBa;gzA>TYAma2J>La@iyIIzm-0;{V-3MhDshL!~0$X z%sp=?G5y%|vNYH<=%bH5wM_l!Bd;Om5l;;_eP5E_4Qv*^8~CX~U9b73m*GXasQ=*M z8ST+N&X%yze7(RRJ+D8x;K_lLf-R;kwy(}8FxbLrH%K!K(yt_2NL-fs@-Ug?*uCuU zj?NMkh@G4~OpZITG@&aHS%LzwOPhWd7Cl{o$Wj-G&pBYN0`WN(Jzarll?z0!D`;m0 zA{U9il`9a4-nR<8&*L&-ywAg;r}Ms5;QelG`rTOcbl$fL4uJP))9=Ber}I8aa1fNQ zO`nfMA4TVVlzb4hm*Z3{_0Cqky;$^g-bcv?LHqR5@57>}^FB&`&||+|`u$k+blw+t zd++A)cY)#z_t&ouvI&a>A=kVA|(2BI`7j3-XG$kVCWBF(bIXKF6i?QYttXbqNnpd zUA@0Q!T~FqK4$EXVA0cg-zezwi@7Km`eG#dMmp~s1>Tov)0be;(|O-0_xVRTPQ`?G zw#zw+MNj8_quTppu3}(j{xK|iI`5O^bG=em(9Xub6p22W&iiD6_s6+Rn6W>OMNj8_ zvYtNwgrd(sfkjW}eX^j>KdC+TC$Z@1yswhIFXK3cYknCLeHESeRRZtJxl9=E%dzO` zysr}U`KPq$Phrv1d0(aH0k6}FRqr$wJ)QTtviB9-C=0!>K%&p3^FFuRdyS(ExyR}G zsDd+!u|I=FPv?EE!23#WMlh>hB^EuM_qlq!KdTt~vsmcwfs!!O+)Y(UW-}YZZ8ZL7V;p7Co8wu~vci7q#gxV$sui zpC|YEbsVQcce&?fW_}$KeIA|nd3yT%dPT2Sk3~=CeV)MkOWcfL#{Lo(J)QS?diwm! zim|_pMNj8_itK#@H_Afq8<6Nz=)6ynpVzyhm;MSCJ)QR{0`ISKnK0g8#iFP4K1ERP zuW8d?!=k72zFM%qZ`7u5M53>z^S)Y7y>C*i`At~#blz7Byl>VX`(`Y9I`6C1_xIO1 zU`6R;R=w+3^mN`w3%qaPqG0G-km#f7ypI;_?{8?+-@u}$^FCUBZs#V)shIH2_INk3 z=;^$V7S#J&TqexSzlB9l=Y5IX=eKg4!m)2fqA#KIzC_^tZ7vgL>~CYy(|KPasP}E! z^le!5bl#T;yuYJOe+P@6&if2Oy>Hj1Z%3lfpz}UM;Qd`~`ny>4blztOyzkJa@4%v` z^FBl1{XK2^dsy^z-Z!c5@j5wRMd4>EUMCWL6P@=>^0}UMpo=#nIqP(-15F=%@3J62`o=`e-ub;6P@=>0`I4BnJ{BN4U3-6`zC?+)3xcR zW6{%jA1AWxO_TJ60pQ9N2 zIau^`-mj6*^&+@YRAYVf@IF$TJ`#(b&igg; z=k3hnIE6ER9u_^F_w|B$AH`+D%={=M`g%I=>*f7@G{-3%`)DkBI`8WR-p}VUVa9$w z7CoK!^@2Wsfj0dDEP6Wc6Xbdy!*L2{ehdf_fjzWx~w-SS)%v?-S(v`#6qM zIQDT^^mN`Q2)tj&Wx|a8LM(bZ@5{UEy_>t7c*QO!9*Mr3&iitK_X*l#pMXVA=Y6@L z-Y06)Ct}gld0(!6UT+ZxtXTdS?-yaw(|MmQ*ZU+_(9ZVvNl5hBblzvneg0y-^oz0R z>AcUD>-`eF^h>bl>AcSt)cd7eCXDw>vFPc%zb)9`Cu`FuBhlaHyk~#G3i9{#@wopB zRt_3Q6?Fglk9G4rdhzQ&u0(oB^^qQ^zy70A&$UE(fA(6Uyg$lg2(^`D%U5lsplvzT zU-@VTBk(CKda~uKwoAdgg_TJ4^@4RBwJC8+A=Y6R9apM|SlYpss zHAwWKbl!&wyszb=VCZYH=;^!<74-QRwCOKk(bIV!s>l0_3hyss(bIXKulBx<8*ato z&&>QfB>H?h@AKv3$$D4N&UQKVSoCz>=j-Y7FDYjJB`kV6@AK8y5|>>~0%qo4#-gY5 zeubdVZ{VU}=o^seSI~LCLiYX&$EjHCoOyo*i=NK=6@$$(S-rqO7f|l0? zX6&1>=;^$#QP=zHu3}*5uVc~Ec^@O6=eM|mb~g4cNc1su-p8oDzu_tdX6$cZ(bIV! zBYS_-6|}RlzllXp=Y5RY`&+JJV8;Fy7CoK!rFtGaY*p;>T9N2W>AWwM`~2IkhTPfA zzl}vt=Y6T{eVbnTHY|EN?@Q&k7v0fIe+P@6&ihQc-nZ+eZ%3lfr1L&gaGrmc%Y<3; z?_$x@d7mjb&+pKt@4%v`^FC9szrUwVe-DeE&ifYi{=Sm~*4p29BGI?ddEX-N-pYM~ z663uU=R=cfTj;!R5qLjMn|>M=J)QS00`I45(@)2ur}I8SPoE#E_(1GXB>D(C?;`}> z&(I$G8Cdjm-bV<$pQ%kh6N{eC`v`&ev$W}FVbRlhU!=$TFopMFNc2T?-WSRH`*3cQ z6-|z_{e3tVJ)QSO0`F&YnK0eXY%F>@?~4T9&(WrzgGEp0eVX9@K0=#50*OA2&igb$ zy`QU1KNpLh&igch_mSH4ky!L}-lqw?pQlYf4~w48`v&#?K8gcYls;z7k3ynvp!2># z;C(a~1w$W=MNj8_L$~*CKA}Ee(d*5}qNnq|q1$^m=@%&I7huuTd7mVo=f`k!T%o(O zPB#XLK8eozq;Bus9Q#=#!A>bLhO!5%l?sxhNR=#aQ%o-sh;F&tJj;YmNO9EP6WcbL4uz)D^U| zPIoC5J)QUMa-W~9mp&PZzMandcDdfC=%r7=qNnq|U9R`b^wKZGqNnq|UHzQya#t}h zYyNU9dOGjJ)b)OaoAfJ?=)>r|50kxL=?dD}n!gf@p3eI)!Fm2FE)!{nya(|KPY*ZWkx^r=Yn1$5pQ2>SdrTqexSUxP(Y=Y4^?&tJ;{D->Yp z*J9Drd0!yd->>7MVCdIj(bIXKDtn*CaVmz~*{YX@M4w9MeX9EXcNKREP6Wc zQw8;Y0~ZBDzX6M$&ihn>_Zzk8H)7G#d0(gQ^V2zC#R9;LeL50-9i8`eYVR}Lq|d;j zr}Mr};Qb~p3TEs#VbRlhU#H&RZ{~otX8vX@dOGjp<$9m#3fft{&qSh+r}I8u(C2UA zGGS)^7A$%?@8jijy{#OlV!}Hc`>j~?bl%6Sz2D|424?KHVbRlhUnYB>Wp zm(h7&Ch$I+%Y+&GY%F>@@5^NGw{x6A@3&*o(|KPe=<|1QnJ{C&1B;%{`z+b}9F9{s z_BlxOS#;iK$>;ey_0sReqNnpdOZI-3Uiw{F^mN{5$>;f>>!tr3i=NK=Rz2^>%T+w7 znu|o=O6Ps6dViniY7#JOejXM*o%gNk^Zeaz((lHir}Ms5_I{5mXlI@79xQq~@1yj1 zpRe#fABjGS&ig32-tW~r_It7D>Aa89^S+#Yim~5^MNj8_RCk~6=6f^tE9m!Q(bIWf ztUk{#aHRlK@d}XWi|M>C7VPf}xhNR=LM(bZ?~CO={{Y7++~XaHqZ@6!d|AL24$#{Li%J)QUI>OTK42dtO`4ETqexepTMH0^FCRR_a_zJpTwf4^S(;peVO*ymm$$t(Rp7bdtc6R3cWAKqNnq| zO3%68DaFh`g+)*2eU%>XPb=t8W6{%jpDXY0EA-C%3MBemI`4A@ef}9P6Q<&w!J?=0 zK3DGZD>+W#s#l3cPv?EEyuUxIm;NjkJ)QTRf_h)YWx~w-DkS<&I`2E>dSA_P3TJ*b z7CoK!o$7PFbFN}w#{L`@J)QTRg7f_IToerbc`SM|?_;fc?(b_9-A)Y-z3oSO?8$!K z{k%&qv@I&Vcid=dH%K!K(yyc^Ohd;{2^ePyd3thSP{`9`M*Et+FM0mr-FN(gER#aa z-tSw2L*DQY2(bhOKOHji*%w|I{rs3`$Bgjxec?IZk)ubCGMS`7@4fr#1pk2XUe>Pv z#ag@Ty_%LEP68UW37Vy{RQore*uf0%==iYT<=Lx*Oq)mSbi=NK=JoWo>E_1+&1%Mg*%UJYu-lquq z{01%xhQ0xbK84Qv6uHm8!f`5w+}Tam6)bu>?^E=g>s?iJJ6Eyj>AX)-dwaL)pXuh%icF}oI>xLu;}T$uh!#zv%>pkEP6Wcs|EY}>)ecB zX8v_7dOGi;1>U!4)3+eeN7H#9t$tt54Gvhb05D^J1B;%{`)KvK-c2{@Z(`BYc^@tB z?{B$+cGm0N!lI}1zC_;Nx9X*DMWQdE^S(r|zrW38!gzlhi=NK=62bnyO`Eu9V~h}?=#fT>9%vjtyS^bk?1q%yw4DLf0v7b@%}CrJ)QR%^8I}W$Eg@{ zXBDpli=NK=3_+iNkIRG^`+HdQblx`!`ut99`c5SJCOYq%^xWfF2b!6&w+=LY@W~Uw zf9Gx%gI*b8lCKtr39l9(8)9-?Ek0?#T5O{8zDf3e+CZ~Sb|ha%C3(|I2*pX<%wIE58&1{OV?_u;blGxgHX#GBErd3+cQsRG;gGbHIuPfEoL6EP6Wc3kCc8*<2J1{cJ3H zI`0eh)cZM#dOrt?p3eI<>ivBLH{4n?KLUw<4W0LE^z`|26=Odai=NK=HG1yxA{F$J zSoCz>uTkIM&vSF;&%>gp^S)j_&yR8i?d)tP3W>g+&ii`7xn4Av2~+W+vFPc%uNTz& z`P%gJvFPc%uUC7&fCJX5cnh%T>AX*ny^nDP?accaB>Dt8?-S(xeXL&kSS)%v?-K;x z$8ni3t6m%yJ)QRng8ltMZTf{+^mN{rckk~-C%K-I;Z2i=SG=Ovi$|g_r}MsCQ1278 z$36jzp3eJnJ$-(nV(b&K=;^#Km!I2N#Er6|%sZ=ii?HbFywBF-eUf7AlaT1M>AcUD z>-}QAW4{=Sp3eJh`8@bP{je!(GuL6+g4jU4`(;~$=RHE`lY z{}A)Zl?Lg#kg1a`!|i|FZ?b>z@Nt0?oqo0bYsOmsG{yGbVN=pdgOAbowHIIM<@<_j z=Sv{3G8p7lhCsZ^QBROp0mkJCE~h|p*$$G+c92}QgXFRuB$qajT-rc#X#>fn z4J4PDAi2y0$z>);E;B)L*$0x#K9F4Yf#k9eB$rhnxvT=oWfe#+t3YzO0FuiEkX$Z+ z7!mkl7fYyin+14u3#KytYXlFL<) zT&{xTaup<(B_O#h0m)?vNG?l2a;XH#r4l5UN|0PCL2}szlFKfTTy}xvvI``a4v<_r zKyv8-$)y7%mr#&gLP2r~1<558B$s@UT=GG3$p^_LA0(F*Ai1mn$z=sdE-OHCsR7BQ z1|*jnkX&j&a)|-SB?csy7?50IKyoPs$)yw|mr{^iNHn1IZ-~B$qUhT+%>tX#mNk z0VJ0OkX#x-a!CToB?%;#B#>N^Kys-7$)y4$mkN+vDnN3{0m&r?B$phJTyj8iX$Q%r z9VC}_kX+h9atQ;;B@85&FpylrKyoPn$)x}!mjaMn3P5s61<559B$rf>Tv9=DsRPNS z4kVX4kX-6Oa)}4YB_1S~c#vG;L2@Yr$)yY=mokuC%0P0-0?8!{B$q6ZT(UrNX$8rp z6(pBdkX%|pa)|=TB?=^$D3DyDKyoPt$)y-1mtv4yia~No2gxNJB$srMT+%^uX#~lo z5hRyJkX#x;a!CfsB^e}_WRP5vL2{`A$)yS;mnx84sz7qd1<55BB$r%}TyjBj=>*B8 z6C{^TkX$+;aIb5kXM1o#rAv1A%2!TkX-UWa>)b9B@ZN*6p&m}Kypa| z$t49Omuiq)szGw82FaxwB$sHAT%tj8i3Z6f8YGtzkX%YYaw!4Hr356G43JzhKyt|d z$t43MmnM*0nm}@C0?DNbB$sfIT*5(e2?xm~93+=QkX#Buaw!DKr4S^SH6XdH0m)?z zNG@wYa;XQ&r5+@gdXQY|L2^j|$t3|KmjsYp5J`ccVT?!;?jG^jeURY z%b{-)^vTe-67;QzedVC9EcW#TeZ5g%W7yXf|HF61-YZA?95#%at>;7gXDu*3=!f(%OJOU>rLYy`QrHS|DQpF~6t;p~3R^)gg{>f$ z!d8$=VJpa`uodJ|*a~tfYz4U#wt`#=TR|>`tss}eR**|!E6Am=735Ob3UVoI1-TTq zf?Nt)K`w=@AeX{ckV|1J$fd9q(%OJOU>rLYy` zQrHS|DQpF~6t;p~3R^)gg{>f$!d8$=VJpa`uodJ|*a~tfYz4U#wt`#=TR|>`tss}e zR**|!E6Am=735Ob3UVoI1-TTqf?Nt)K`w=@w;?Xet+ydA%dNLT&KYh)T$WpBfaEd* zB$pW=xy%5`WiLoBdqHy93zExTkX%-RlgmF2_M~IS!J`agbcLfaJ0TB$q89xoiQ+j`4E@wb;IRlc*8IW9dg5kVGF2_J}IR=u;F_2s~gXFRqB$v$~xoif>lUiE~i0qISrD_X^>oYfaJ0RB$pi^x$FSRk(SE+;{9ISG=>NswH&f#k9cB$sUl6aE@wel2g&7gkX$|o z$>np9T<(G7at|bzdmy>o1Ic9?NG{Vra+wB_%QTQ&c7x=y8zh(AAi3-Y$z>TxF3Ui2 zSq75JGLT%(f#h-yB$sm_xts&ZWj;tQ^FeZ%50cA#kX(*}%m&Hj07xzeKyo<%lFI>*T-Ji* zvKAzlwII2y1jn_E*C&@xd4*O1(00gKyrx#$t4aXmpG7IPJrZc0wk9cAi10X z$z>}@E?Yrz*$R@&R*+n7g5+`&B$t~Yx!eTFB@!f;NRV72L2`)%$>lIeE{8#KISi7^ zVUS!lfaJ0PB$o{!xoiN*Q3X;oJkX)94;yFhZ;1(M4ykX$-Ia_Iocr2{0F4v<_zL2?NN$t4sdmr#&g@azJv)0m-EuB$sxOT-rf$X$Q$A3?!E@kX*t*atQ;;r2r(C0+3t^ zKyoPn$t4vemsF5kQbBS_1<9ojB$qmnTfw2@WycHu@NSrwj|0UN=22dA|4V zJAOfyNg?L%zi<1GuYYFy;}pMuS8b=l{=p%ZN#mwI9WwIS7hV|s{FrCQjPUh+;W^)t zqeqW24VDJI_wK6``~$|D2T6uEO-8SNU4Qsl_aBBxhWAYay*3_?EE-_8{ll*u{{Z=I z+1GXa3v1LU_ALppvKqw1VKs%wOe<}i;3KBqrqGzXadq$f=L6+kbB(j*V1m(~71 z$=jN6);_lhzT05Z?>0pKZr(5Z95op2(tTGT-PkFU-ko9znrc37kjA*Ho#TtfTI}K_ z7$l>odRCaN2GbB}uxZdoAAM?>`q4+``)&>Uqcq;OB-n;q@-P|R_ZndCvC8yI)6bbN z@fu=&>8TeVd=C7wU-!g(Lwa66Th9%WBzbyz3^Pjod^~(g4S~AWvV7x%u4S{k*0OhA zeCxr{O-`mi_$v+uf5joH#bvyG4FBta*OqVDPB_-&)jMjMlRw&wIbm3C)KjKz^?%Tz z-PzwZv;o6DkzO)A!<4CSb(N`if&xFWj0^D(oFuGHnzhKWOv^Q6JivGBsf9*I9U9+N z{xoXg#9^QIlC9^v^*4KVRbCITLIcIO^K^W>^+wch*8}D1yEw+sOJ&0kyJo4;ysiHO z>(K6D9%ug0S*g`M9i1#O-fJ4CFX`Sw;C&OD&iLPC2*EeW^Q8DDlKf4){e8*|on7mx z!Q^&laI_blL7YJ~fz8tlrU6}5|7UDJFj4x{c|Y(a(_`I#*~`Q1;kD3Udd>8TvOgI0 z)JWay=vyA$673(@i@sp?Kxx)MPowFiLHeVFrPC0z)U^T*m~84=0|TT_B>UpwS!gi* z_LC=q|IR%y6ZHBJlf3g8=1xg@Y>3IR;d!#BOCIvrll{E=p@QXk-UqX-gACYuBN|wq zCjprBFuReHSZK-SaND%sCxr%Hdm zzo<#}Q&rFI|Af|ISbJJ0OiU~8qcN0fL6HSE#Yyww>l+7HRyd=PHO z13w67^{_o*@RJu`2N*}I1oc9Lp8bbY!vp zCSAuLZ8yA^47=?&yiW<9{rYi0K;SsP5X+n6gU!(f=|y)Rkz&5i@dVgdX-LOmg(@V@_N+Y@+Q~HS|#1`HM{vi|I z@V5kv|D}KMM8A-66P!P@G*lX5|NHMx@SpUtm(=xa%ZIkdY5%F~55KYf;onqa|CU{! zpFFTWkK6RI)?e<{$9sU!)qnKRhB!j|iX*gd;Dna3@wOwh#((tC9&=>n?MUyRaYD=9 z61ueOktf?sYc`y<>*@C6H5t9Q37?|kG5^8Vcd@<=d)M@~@`0HjJ@v!h8J%dq5}Mze zE1}~9rK1CFPy93*q|c;3I9s5x7oDz$PE6ySJwx+0`wWfl_0IM{4V(Xy_EUU$HKHPR zvSLsX+b_Yq9ZLxnu@l{?h#%{I{>Iy}4s<<;Lov1N-`4hYgyS3gv=|z6?J;s6c#ko& zYmf08-w&kGrteCF-v6`3GCsg!{(6Z056(M*A(F>y7K^$6c#CC3*OJ`z?iz#npCpeD zCQLOwKF1*a!GDrp(9{tlMvV0RKfb@w`_fY;+Fp9f#F37dHk$ug8t_hlt?5v__{b5x zcyV9Hi}!c>;v+}&=EX-jUfk2!i`!m%glfGHw0BAq9=KB)ZtIjXwk3DhFX11(|H+%&>l>a_oR1CKX+E8M%y0T?9pzROYLk0sXL->?d%TA1)LX7S53$IVK8Hiu)IzT z!+3Re{ei-5LN_N9E%YpgL;X8sFddL2XXur3mPU?3r!$l?M2S z>?`(8-X*H<>_Brd2sQ4|xfsmt#bWTTdNKH6@BFW|`+uf47a7Teq{V~2VEcRE8ZSPO z1ulx%ElW=arg;PmjDy{>I5{QnI%{@xV4lD@)GfUhBmwwb{v=lb0$# z2Wi+mgA`$~tr>5ce!?DF4=_Lfl+N1}$Bx$YMLYj?fC0}vyGp=avi(KbU2vFjsLm;O zt`}2om6Itq-|)!IvZ?md^yc22rf)Gyo4QWZVfWPHOFfinh_!1|&&S!ulR6ub2pQz1zfb z%R%3sPI2TT*BIcE*|(>2thzxDbeMv@r|a3GYCcE=+|W?(=^~A1bS_#Od$DK*s~4@` zD;t`3rMGQe>EpJ4Y5NeUH*FthYxL^hyFJ*deQq$Z-YLvaW}D- zY>7SZG%>$`0Pnz{@s^-qZ_8iCStf^g_k4n}w_or$%cSwPCkky@&Nr^J&p|ieiSfS= z`bLa5e2l&$hdm>CnZ6`F_I{Axq~KQrr%bYqY1awOTefE?C9|pP!&o05JH-C^HeQ|0 z+3F_zzwP-O#;yrIM`b-LWBJW!p zUmlrjetnpKKjWt(Q*(2-uH5jyfBW6(yu@ELjX3?)E&HFHbjN4xtH&l?_x|X=D}Gu2 zZ$F55u3*Z!DbK7le)_GF;5ROhy>}zB#N%5(dMViJ#E>(wi=O)F-%Np7q2t#bed$L- z|9Iy3b3dB0JmHUL-uwNBAAk3Q@YT1*{MaYXDA`XLL=*5Afq|b+nQT6*7hWHziQH&U8*r%WFON72KQ&U1G8r&Ka_rLdwNsX9M^QP{z~>lSpc7S{lnPz8GH=BFZw<&X(z`o7hON9gG2rK+Ye`UgS-W-@Orv^{ps(M zXpFY6KW@rmaeuXTOd@;!-~Wf1vO}EB18+xWK1qG`&Hh`covlEd=hh}l-q|M$9rbO= z7rVZF_Flo&zx?e_Cwl7J#nNw{_{=zP=8W)d9TU!(KFR;9_iue?yl43D-=BK_--Et6 z`0M50nE&nN*9OJxGAD;!c=mfK|MPP6Y)kX2ufF)$TeZJ=D(d^2GQa$-k%x!m4Igx6 z<(SjOzy8Ya@4h~#!*9Z-Hwymqr=x!I$9>AKP1w7j-;{E2G$Z z4xbf$%dCrK_I%wm+cu);wqu!H^~GLhzqO|G`LeCcpYB;^7afZDwa;&hQj@m+;Fnp0 z+EzEdJmNp%|DJt)WX!ydSgvJ zKx!BHK5380hDs5I(~epdg{^iy1O@(LFI9o=9*@1@DVbgg2@Ld}IAz=f z?{R^)1E)z{znI9|_J1K0EWs9U`RK*F>%WBfTY`ssPY$s7+5S^V&{S`~kNx~74YwVX zZTMoZ*+2FiTefZTtRp>Z_M|6g+RaWlukK#oS!hWfcD7=Ef$8@{cBQq&y%kh4 ze*240`aZX4WpVSZx8DBpn;EsY%xTh0t-zJWme&zDg56Aj!r><`g0n&MW>z-W?eX;2N zDV&uEd7mN+;FI%c>`ClDa?g#Z(~_)86{OH)uP=2Jnq7}brB_7Z_MYS+DDd$1B)MPg zH9OJo;NNav8!*0S&Hmrkm!{;t*6(evy`wf|93T0OBdY^H@LrbkQU9zrfA+-aS?~II z#(m@P<>%J@;O#Hp@Qi7mVE*&?wKx5bf46Ysw^J^>vTyR&c6_a4)}n~zqkNtlyyHjD z{N<(bCBOX2Z)dHl@RNRPdgi&rCw_nAFFw1e*X)M?`RK3Nt`x|6b;w&JUQHeC5v16Z?&t^V7`uHOWV=XTH4owb3~tr&~sSJ!a>B{m*|+sQX>{ zr*{W`P?R@f+uGgH^JX6(R5am}^46A7om>BD`K-sk6dfKl`}Ou|SH89LTvgjep99ou z_CtXDhgq|oE`Z*SwaTaH5nQvq=+^9#>EX+I&i)TSifi^zx1z9}s67M)9^RT=Y>d@) zVdd}sV^_+@-bUE&tG@Q4^Jm_=4s385iRiV__DMOw=jbCq>inb;WSe_?S)7Bk^id$~ zdj1M$OcS;yI!I4E0;JtfQsaKCMoQl0o6@_pa`OuY%U|F<(C3sf$-QuU9@L)f{vxP+_H>N- z2^0U5tN+zt)oj{p4|1oDARXVhC&+0A>4(xN$(<|*o7q8?bCO<`o^|W%CFO*XU%;or z7_KYk|9a4vzpc(xy%tTkXZp7Xei;47y-%dlxXisodL9?~%>6_hBH15${((*EFTbds ze9r!*diwpw9@SX)qjG%rp4s=L?USpkm+!U(3e180(}YeCGZcQfVr!k@9GA+_D2DMlRj4U?jXB+>(axHo!~j+ ga$P;+pC7bg`k}2~`reD~_DVS1w-23maMu|BA8g1sGXMYp diff --git a/vendor/cloud.google.com/go/datastore/datastore_test.go b/vendor/cloud.google.com/go/datastore/datastore_test.go index a3b7a6ebb..7b184fd57 100644 --- a/vendor/cloud.google.com/go/datastore/datastore_test.go +++ b/vendor/cloud.google.com/go/datastore/datastore_test.go @@ -263,6 +263,43 @@ type Y2 struct { F []int64 } +type Pointers struct { + Pi *int + Ps *string + Pb *bool + Pf *float64 + Pg *GeoPoint + Pt *time.Time +} + +type PointersOmitEmpty struct { + Pi *int `datastore:",omitempty"` + Ps *string `datastore:",omitempty"` + Pb *bool `datastore:",omitempty"` + Pf *float64 `datastore:",omitempty"` + Pg *GeoPoint `datastore:",omitempty"` + Pt *time.Time `datastore:",omitempty"` +} + +func populatedPointers() *Pointers { + var ( + i int + s string + b bool + f float64 + g GeoPoint + t time.Time + ) + return &Pointers{ + Pi: &i, + Ps: &s, + Pb: &b, + Pf: &f, + Pg: &g, + Pt: &t, + } +} + type Tagged struct { A int `datastore:"a,noindex"` B []int `datastore:"b"` @@ -406,10 +443,6 @@ type PtrToStructField struct { var two int = 2 -type PtrToInt struct { - I *int -} - type EmbeddedTime struct { time.Time } @@ -1645,15 +1678,6 @@ var testCases = []testCase{ "", "", }, - { - "save struct with pointer to int field", - &PtrToInt{ - I: &two, - }, - &PtrToInt{}, - "unsupported struct field", - "", - }, { "struct with nil ptr to struct fields", &PtrToStructField{ @@ -1903,6 +1927,20 @@ var testCases = []testCase{ "", "", }, + { + "pointer fields: nil", + &Pointers{}, + &Pointers{}, + "", + "", + }, + { + "pointer fields: populated with zeroes", + populatedPointers(), + populatedPointers(), + "", + "", + }, } // checkErr returns the empty string if either both want and err are zero, diff --git a/vendor/cloud.google.com/go/datastore/doc.go b/vendor/cloud.google.com/go/datastore/doc.go index df86cd2cc..1a05abb41 100644 --- a/vendor/cloud.google.com/go/datastore/doc.go +++ b/vendor/cloud.google.com/go/datastore/doc.go @@ -15,8 +15,6 @@ /* Package datastore provides a client for Google Cloud Datastore. -Note: This package is in beta. Some backwards-incompatible changes may occur. - Basic Operations @@ -43,7 +41,8 @@ Valid value types are: - time.Time (stored with microsecond precision), - structs whose fields are all valid value types, - pointers to structs whose fields are all valid value types, - - slices of any of the above. + - slices of any of the above, + - pointers to a signed integer, bool, string, float32, or float64. Slices of structs are valid, as are structs that contain slices. @@ -86,6 +85,10 @@ GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and Delete functions. They take a []*Key instead of a *Key, and may return a datastore.MultiError when encountering partial failure. +Mutate generalizes PutMulti and DeleteMulti to a sequence of any Datastore mutations. +It takes a series of mutations created with NewInsert, NewUpdate, NewUpsert and +NewDelete and applies them atomically. + Properties @@ -118,9 +121,10 @@ field name. A "-" tag name means that the datastore will ignore that field. The only valid options are "omitempty", "noindex" and "flatten". -If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save. -The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero. -Struct field values will never be empty. +If the options include "omitempty" and the value of the field is empty, then the +field will be omitted on Save. The empty values are false, 0, any nil pointer or +interface value, and any array, slice, map, or string of length zero. Struct field +values will never be empty, except for nil pointers. If options include "noindex" then the field will not be indexed. All fields are indexed by default. Strings or byte slices longer than 1500 bytes cannot be indexed; @@ -154,6 +158,36 @@ Example code: } +Slice Fields + +A field of slice type corresponds to a Datastore array property, except for []byte, which corresponds +to a Datastore blob. + +Zero-length slice fields are not saved. Slice fields of length 1 or greater are saved +as Datastore arrays. When a zero-length Datastore array is loaded into a slice field, +the slice field remains unchanged. + +If a non-array value is loaded into a slice field, the result will be a slice with +one element, containing the value. + +Loading Nulls + +Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value. +Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value. +Loading a Null into a pointer field results in nil. +Loading a Null into a field of struct type is an error. + +Pointer Fields + +A struct field can be a pointer to a signed integer, floating-point number, string or +bool. Putting a non-nil pointer will store its dereferenced value. Putting a nil +pointer will store a Datastore Null property, unless the field is marked omitempty, +in which case no property will be stored. + +Loading a Null into a pointer field sets the pointer to nil. Loading any other value +allocates new storage with the value, and sets the field to point to it. + + Key Field If the struct contains a *datastore.Key field tagged with the name "__key__", @@ -436,6 +470,9 @@ Example code: fmt.Printf("Count=%d\n", count) } +Pass the ReadOnly option to RunInTransaction if your transaction is used only for Get, +GetMulti or queries. Read-only transactions are more efficient. + Google Cloud Datastore Emulator This package supports the Cloud Datastore emulator, which is useful for testing and diff --git a/vendor/cloud.google.com/go/datastore/example_test.go b/vendor/cloud.google.com/go/datastore/example_test.go index c6f81e13b..88edbe980 100644 --- a/vendor/cloud.google.com/go/datastore/example_test.go +++ b/vendor/cloud.google.com/go/datastore/example_test.go @@ -396,6 +396,28 @@ func ExampleClient_GetAll() { } } +func ExampleClient_Mutate() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + key1 := datastore.NameKey("Post", "post1", nil) + key2 := datastore.NameKey("Post", "post2", nil) + key3 := datastore.NameKey("Post", "post3", nil) + key4 := datastore.NameKey("Post", "post4", nil) + + _, err = client.Mutate(ctx, + datastore.NewInsert(key1, Post{Title: "Post 1"}), + datastore.NewUpsert(key2, Post{Title: "Post 2"}), + datastore.NewUpdate(key3, Post{Title: "Post 3"}), + datastore.NewDelete(key4)) + if err != nil { + // TODO: Handle error. + } +} + func ExampleCommit_Key() { ctx := context.Background() client, err := datastore.NewClient(ctx, "") diff --git a/vendor/cloud.google.com/go/datastore/integration_test.go b/vendor/cloud.google.com/go/datastore/integration_test.go index 58a044c5b..334585751 100644 --- a/vendor/cloud.google.com/go/datastore/integration_test.go +++ b/vendor/cloud.google.com/go/datastore/integration_test.go @@ -35,6 +35,8 @@ import ( "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // TODO(djd): Make test entity clean up more robust: some test entities may @@ -1051,6 +1053,51 @@ func TestTransaction(t *testing.T) { } } +func TestReadOnlyTransaction(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t, nil) + defer client.Close() + + type value struct{ N int } + + // Put a value. + const n = 5 + v := &value{N: n} + key, err := client.Put(ctx, IncompleteKey("roTxn", nil), v) + if err != nil { + t.Fatal(err) + } + defer client.Delete(ctx, key) + + // Read it from a read-only transaction. + _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { + if err := tx.Get(key, v); err != nil { + return err + } + return nil + }, ReadOnly) + if err != nil { + t.Fatal(err) + } + if v.N != n { + t.Fatalf("got %d, want %d", v.N, n) + } + + // Attempting to write from a read-only transaction is an error. + _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { + if _, err := tx.Put(key, v); err != nil { + return err + } + return nil + }, ReadOnly) + if err == nil { + t.Fatal("got nil, want error") + } +} + func TestNilPointers(t *testing.T) { ctx := context.Background() client := newTestClient(ctx, t) @@ -1115,3 +1162,116 @@ func TestNestedRepeatedElementNoIndex(t *testing.T) { t.Fatalf("client.Delete: %v", err) } } + +func TestPointerFields(t *testing.T) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + want := populatedPointers() + key, err := client.Put(ctx, IncompleteKey("pointers", nil), want) + if err != nil { + t.Fatal(err) + } + var got Pointers + if err := client.Get(ctx, key, &got); err != nil { + t.Fatal(err) + } + if got.Pi == nil || *got.Pi != *want.Pi { + t.Errorf("Pi: got %v, want %v", got.Pi, *want.Pi) + } + if got.Ps == nil || *got.Ps != *want.Ps { + t.Errorf("Ps: got %v, want %v", got.Ps, *want.Ps) + } + if got.Pb == nil || *got.Pb != *want.Pb { + t.Errorf("Pb: got %v, want %v", got.Pb, *want.Pb) + } + if got.Pf == nil || *got.Pf != *want.Pf { + t.Errorf("Pf: got %v, want %v", got.Pf, *want.Pf) + } + if got.Pg == nil || *got.Pg != *want.Pg { + t.Errorf("Pg: got %v, want %v", got.Pg, *want.Pg) + } + if got.Pt == nil || !got.Pt.Equal(*want.Pt) { + t.Errorf("Pt: got %v, want %v", got.Pt, *want.Pt) + } +} + +func TestMutate(t *testing.T) { + // test Client.Mutate + testMutate(t, func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error) { + return client.Mutate(ctx, muts...) + }) + // test Transaction.Mutate + testMutate(t, func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error) { + var pkeys []*PendingKey + commit, err := client.RunInTransaction(ctx, func(tx *Transaction) error { + var err error + pkeys, err = tx.Mutate(muts...) + return err + }) + if err != nil { + return nil, err + } + var keys []*Key + for _, pk := range pkeys { + keys = append(keys, commit.Key(pk)) + } + return keys, nil + }) +} + +func testMutate(t *testing.T, mutate func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error)) { + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type T struct{ I int } + + check := func(k *Key, want interface{}) { + var x T + err := client.Get(ctx, k, &x) + switch want := want.(type) { + case error: + if err != want { + t.Errorf("key %s: got error %v, want %v", k, err, want) + } + case int: + if err != nil { + t.Fatalf("key %s: %v", k, err) + } + if x.I != want { + t.Errorf("key %s: got %d, want %d", k, x.I, want) + } + default: + panic("check: bad arg") + } + } + + keys, err := mutate(ctx, client, + NewInsert(IncompleteKey("t", nil), &T{1}), + NewUpsert(IncompleteKey("t", nil), &T{2}), + ) + if err != nil { + t.Fatal(err) + } + check(keys[0], 1) + check(keys[1], 2) + + _, err = mutate(ctx, client, + NewUpdate(keys[0], &T{3}), + NewDelete(keys[1]), + ) + check(keys[0], 3) + check(keys[1], ErrNoSuchEntity) + + _, err = mutate(ctx, client, NewInsert(keys[0], &T{4})) + if got, want := status.Code(err), codes.AlreadyExists; got != want { + t.Errorf("Insert existing key: got %s, want %s", got, want) + } + + _, err = mutate(ctx, client, NewUpdate(keys[1], &T{4})) + if got, want := status.Code(err), codes.NotFound; got != want { + t.Errorf("Update non-existing key: got %s, want %s", got, want) + } +} diff --git a/vendor/cloud.google.com/go/datastore/load.go b/vendor/cloud.google.com/go/datastore/load.go index 03bde8102..652b0da57 100644 --- a/vendor/cloud.google.com/go/datastore/load.go +++ b/vendor/cloud.google.com/go/datastore/load.go @@ -60,6 +60,10 @@ func typeMismatchReason(p Property, v reflect.Value) string { return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) } +func overflowReason(x interface{}, v reflect.Value) string { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) +} + type propertyLoader struct { // m holds the number of times a substruct field like "Foo.Bar.Baz" has // been seen so far. The map is constructed lazily. @@ -243,7 +247,7 @@ func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err } // setVal sets 'v' to the value of the Property 'p'. -func setVal(v reflect.Value, p Property) string { +func setVal(v reflect.Value, p Property) (s string) { pValue := p.Value switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -252,7 +256,7 @@ func setVal(v reflect.Value, p Property) string { return typeMismatchReason(p, v) } if v.OverflowInt(x) { - return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + return overflowReason(x, v) } v.SetInt(x) case reflect.Bool: @@ -273,12 +277,12 @@ func setVal(v reflect.Value, p Property) string { return typeMismatchReason(p, v) } if v.OverflowFloat(x) { - return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + return overflowReason(x, v) } v.SetFloat(x) case reflect.Ptr: - // v must be either a pointer to a Key or Entity. - if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct { + // v must be a pointer to either a Key, an Entity, or one of the supported basic types. + if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct && !isValidPointerType(v.Type().Elem()) { return typeMismatchReason(p, v) } @@ -290,21 +294,38 @@ func setVal(v reflect.Value, p Property) string { return "" } - switch x := pValue.(type) { - case *Key: + if x, ok := p.Value.(*Key); ok { if _, ok := v.Interface().(*Key); !ok { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) + return "" + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + switch x := pValue.(type) { case *Entity: - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } err := loadEntity(v.Interface(), x) if err != nil { return err.Error() } - + case int64: + if v.Elem().OverflowInt(x) { + return overflowReason(x, v.Elem()) + } + v.Elem().SetInt(x) + case float64: + if v.Elem().OverflowFloat(x) { + return overflowReason(x, v.Elem()) + } + v.Elem().SetFloat(x) + case bool: + v.Elem().SetBool(x) + case string: + v.Elem().SetString(x) + case GeoPoint, time.Time: + v.Elem().Set(reflect.ValueOf(x)) default: return typeMismatchReason(p, v) } diff --git a/vendor/cloud.google.com/go/datastore/load_test.go b/vendor/cloud.google.com/go/datastore/load_test.go index 0bbabb565..1755c1c1c 100644 --- a/vendor/cloud.google.com/go/datastore/load_test.go +++ b/vendor/cloud.google.com/go/datastore/load_test.go @@ -17,6 +17,7 @@ package datastore import ( "reflect" "testing" + "time" "cloud.google.com/go/internal/testutil" @@ -755,3 +756,131 @@ func TestKeyLoader(t *testing.T) { } } } + +func TestLoadPointers(t *testing.T) { + for _, test := range []struct { + desc string + in []Property + want Pointers + }{ + { + desc: "nil properties load as nil pointers", + in: []Property{ + Property{Name: "Pi", Value: nil}, + Property{Name: "Ps", Value: nil}, + Property{Name: "Pb", Value: nil}, + Property{Name: "Pf", Value: nil}, + Property{Name: "Pg", Value: nil}, + Property{Name: "Pt", Value: nil}, + }, + want: Pointers{}, + }, + { + desc: "missing properties load as nil pointers", + in: []Property(nil), + want: Pointers{}, + }, + { + desc: "non-nil properties load as the appropriate values", + in: []Property{ + Property{Name: "Pi", Value: int64(1)}, + Property{Name: "Ps", Value: "x"}, + Property{Name: "Pb", Value: true}, + Property{Name: "Pf", Value: 3.14}, + Property{Name: "Pg", Value: GeoPoint{Lat: 1, Lng: 2}}, + Property{Name: "Pt", Value: time.Unix(100, 0)}, + }, + want: func() Pointers { + p := populatedPointers() + *p.Pi = 1 + *p.Ps = "x" + *p.Pb = true + *p.Pf = 3.14 + *p.Pg = GeoPoint{Lat: 1, Lng: 2} + *p.Pt = time.Unix(100, 0) + return *p + }(), + }, + } { + var got Pointers + if err := LoadStruct(&got, test.in); err != nil { + t.Fatalf("%s: %v", test.desc, err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%s:\ngot %+v\nwant %+v", test.desc, got, test.want) + } + } +} + +func TestLoadNonArrayIntoSlice(t *testing.T) { + // Loading a non-array value into a slice field results in a slice of size 1. + var got struct{ S []string } + if err := LoadStruct(&got, []Property{{Name: "S", Value: "x"}}); err != nil { + t.Fatal(err) + } + if want := []string{"x"}; !testutil.Equal(got.S, want) { + t.Errorf("got %#v, want %#v", got.S, want) + } +} + +func TestLoadEmptyArrayIntoSlice(t *testing.T) { + // Loading an empty array into a slice field is a no-op. + var got = struct{ S []string }{[]string{"x"}} + if err := LoadStruct(&got, []Property{{Name: "S", Value: []interface{}{}}}); err != nil { + t.Fatal(err) + } + if want := []string{"x"}; !testutil.Equal(got.S, want) { + t.Errorf("got %#v, want %#v", got.S, want) + } +} + +func TestLoadNull(t *testing.T) { + // Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value. + // Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value. + // (As expected from the behavior of slices and nulls with basic types.) + type S struct { + I int64 + F float64 + S string + B bool + A []string + } + got := S{ + I: 1, + F: 1.0, + S: "1", + B: true, + A: []string{"X"}, + } + want := S{A: []string{""}} + props := []Property{{Name: "I"}, {Name: "F"}, {Name: "S"}, {Name: "B"}, {Name: "A"}} + if err := LoadStruct(&got, props); err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Loading a Null into a pointer to struct field results in a nil field. + got2 := struct{ X *S }{X: &S{}} + if err := LoadStruct(&got2, []Property{{Name: "X"}}); err != nil { + t.Fatal(err) + } + if got2.X != nil { + t.Errorf("got %v, want nil", got2.X) + } + + // Loading a Null into a struct field is an error. + got3 := struct{ X S }{} + err := LoadStruct(&got3, []Property{{Name: "X"}}) + if err == nil { + t.Error("got nil, want error") + } +} + +// var got2 struct{ S []Pet } +// if err := LoadStruct(&got2, []Property{{Name: "S", Value: nil}}); err != nil { +// t.Fatal(err) +// } + +// } diff --git a/vendor/cloud.google.com/go/datastore/mutation.go b/vendor/cloud.google.com/go/datastore/mutation.go new file mode 100644 index 000000000..894c80d9d --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/mutation.go @@ -0,0 +1,129 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// A Mutation represents a change to a Datastore entity. +type Mutation struct { + key *Key // needed for transaction PendingKeys and to dedup deletions + mut *pb.Mutation + err error +} + +func (m *Mutation) isDelete() bool { + _, ok := m.mut.Operation.(*pb.Mutation_Delete) + return ok +} + +// NewInsert creates a mutation that will save the entity src into the datastore with +// key k, returning an error if k already exists. +// See Client.Put for valid values of src. +func NewInsert(k *Key, src interface{}) *Mutation { + if !k.valid() { + return &Mutation{err: ErrInvalidKey} + } + p, err := saveEntity(k, src) + if err != nil { + return &Mutation{err: err} + } + return &Mutation{ + key: k, + mut: &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}}, + } +} + +// NewUpsert creates a mutation that saves the entity src into the datastore with key +// k, whether or not k exists. See Client.Put for valid values of src. +func NewUpsert(k *Key, src interface{}) *Mutation { + if !k.valid() { + return &Mutation{err: ErrInvalidKey} + } + p, err := saveEntity(k, src) + if err != nil { + return &Mutation{err: err} + } + return &Mutation{ + key: k, + mut: &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}}, + } +} + +// NewUpdate creates a mutation that replaces the entity in the datastore with key k, +// returning an error if k does not exist. See Client.Put for valid values of src. +func NewUpdate(k *Key, src interface{}) *Mutation { + if !k.valid() { + return &Mutation{err: ErrInvalidKey} + } + if k.Incomplete() { + return &Mutation{err: fmt.Errorf("datastore: can't update the incomplete key: %v", k)} + } + p, err := saveEntity(k, src) + if err != nil { + return &Mutation{err: err} + } + return &Mutation{ + key: k, + mut: &pb.Mutation{Operation: &pb.Mutation_Update{Update: p}}, + } +} + +// NewDelete creates a mutation that deletes the entity with key k. +func NewDelete(k *Key) *Mutation { + if !k.valid() { + return &Mutation{err: ErrInvalidKey} + } + if k.Incomplete() { + return &Mutation{err: fmt.Errorf("datastore: can't delete the incomplete key: %v", k)} + } + return &Mutation{ + key: k, + mut: &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}}, + } +} + +func mutationProtos(muts []*Mutation) ([]*pb.Mutation, error) { + // If any of the mutations have errors, collect and return them. + var merr MultiError + for i, m := range muts { + if m.err != nil { + if merr == nil { + merr = make(MultiError, len(muts)) + } + merr[i] = m.err + } + } + if merr != nil { + return nil, merr + } + var protos []*pb.Mutation + // Collect protos. Remove duplicate deletions (see deleteMutations). + seen := map[string]bool{} + for _, m := range muts { + if m.isDelete() { + ks := m.key.String() + if seen[ks] { + continue + } + seen[ks] = true + } + protos = append(protos, m.mut) + } + return protos, nil +} diff --git a/vendor/cloud.google.com/go/datastore/mutation_test.go b/vendor/cloud.google.com/go/datastore/mutation_test.go new file mode 100644 index 000000000..a434bb1ad --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/mutation_test.go @@ -0,0 +1,150 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +func TestMutationProtos(t *testing.T) { + var keys []*Key + for i := 1; i <= 4; i++ { + k := IDKey("kind", int64(i), nil) + keys = append(keys, k) + } + entity := &PropertyList{{Name: "n", Value: "v"}} + entityForKey := func(k *Key) *pb.Entity { + return &pb.Entity{ + Key: keyToProto(k), + Properties: map[string]*pb.Value{ + "n": &pb.Value{ValueType: &pb.Value_StringValue{StringValue: "v"}}, + }, + } + } + for _, test := range []struct { + desc string + in []*Mutation + want []*pb.Mutation + }{ + { + desc: "nil", + in: nil, + want: nil, + }, + { + desc: "empty", + in: []*Mutation{}, + want: nil, + }, + { + desc: "various", + in: []*Mutation{ + NewInsert(keys[0], entity), + NewUpsert(keys[1], entity), + NewUpdate(keys[2], entity), + NewDelete(keys[3]), + }, + want: []*pb.Mutation{ + &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: entityForKey(keys[0])}}, + &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: entityForKey(keys[1])}}, + &pb.Mutation{Operation: &pb.Mutation_Update{Update: entityForKey(keys[2])}}, + &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[3])}}, + }, + }, + { + desc: "duplicate deletes", + in: []*Mutation{ + NewDelete(keys[0]), + NewInsert(keys[1], entity), + NewDelete(keys[0]), + NewDelete(keys[2]), + NewDelete(keys[0]), + }, + want: []*pb.Mutation{ + &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[0])}}, + &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: entityForKey(keys[1])}}, + &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[2])}}, + }, + }, + } { + got, err := mutationProtos(test.in) + if err != nil { + t.Errorf("%s: %v", test.desc, err) + continue + } + if diff := testutil.Diff(got, test.want); diff != "" { + t.Errorf("%s: %s", test.desc, diff) + } + } +} + +func TestMutationProtosErrors(t *testing.T) { + entity := &PropertyList{{Name: "n", Value: "v"}} + k := IDKey("kind", 1, nil) + ik := IncompleteKey("kind", nil) + for _, test := range []struct { + desc string + in []*Mutation + want []int // non-nil indexes of MultiError + }{ + { + desc: "invalid key", + in: []*Mutation{ + NewInsert(nil, entity), + NewUpdate(nil, entity), + NewUpsert(nil, entity), + NewDelete(nil), + }, + want: []int{0, 1, 2, 3}, + }, + { + desc: "incomplete key", + in: []*Mutation{ + NewInsert(ik, entity), + NewUpdate(ik, entity), + NewUpsert(ik, entity), + NewDelete(ik), + }, + want: []int{1, 3}, + }, + { + desc: "bad entity", + in: []*Mutation{ + NewInsert(k, 1), + NewUpdate(k, 2), + NewUpsert(k, 3), + }, + want: []int{0, 1, 2}, + }, + } { + _, err := mutationProtos(test.in) + if err == nil { + t.Errorf("%s: got nil, want error", test.desc) + continue + } + var got []int + for i, err := range err.(MultiError) { + if err != nil { + got = append(got, i) + } + } + if !testutil.Equal(got, test.want) { + t.Errorf("%s: got errors at %v, want at %v", test.desc, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/oc_test.go b/vendor/cloud.google.com/go/datastore/oc_test.go new file mode 100644 index 000000000..40a00d259 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/oc_test.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package datastore + +import ( + "testing" + + "go.opencensus.io/trace" + "golang.org/x/net/context" +) + +func TestOCTracing(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + te := &testExporter{} + trace.RegisterExporter(te) + defer trace.UnregisterExporter(te) + trace.SetDefaultSampler(trace.AlwaysSample()) + + ctx := context.Background() + client := newTestClient(ctx, t) + defer client.Close() + + type SomeValue struct { + S string + } + _, err := client.Put(ctx, IncompleteKey("SomeKey", nil), &SomeValue{"foo"}) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + + if len(te.spans) != 1 { + t.Fatalf("Expected 1 span to be created, but got %d", len(te.spans)) + } +} + +type testExporter struct { + spans []*trace.SpanData +} + +func (te *testExporter) ExportSpan(s *trace.SpanData) { + te.spans = append(te.spans, s) +} diff --git a/vendor/cloud.google.com/go/datastore/query.go b/vendor/cloud.google.com/go/datastore/query.go index 09f083040..2ccab2314 100644 --- a/vendor/cloud.google.com/go/datastore/query.go +++ b/vendor/cloud.google.com/go/datastore/query.go @@ -23,6 +23,7 @@ import ( "strconv" "strings" + "cloud.google.com/go/internal/trace" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -445,7 +446,10 @@ func (q *Query) toProto(req *pb.RunQueryRequest) error { // with the sum of the query's offset and limit. Unless the result count is // expected to be small, it is best to specify a limit; otherwise Count will // continue until it finishes counting or the provided context expires. -func (c *Client) Count(ctx context.Context, q *Query) (int, error) { +func (c *Client) Count(ctx context.Context, q *Query) (_ int, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Count") + defer func() { trace.EndSpan(ctx, err) }() + // Check that the query is well-formed. if q.err != nil { return 0, q.err @@ -492,7 +496,10 @@ func (c *Client) Count(ctx context.Context, q *Query) (int, error) { // expected to be small, it is best to specify a limit; otherwise GetAll will // continue until it finishes collecting results or the provided context // expires. -func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) { +func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) (_ []*Key, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.GetAll") + defer func() { trace.EndSpan(ctx, err) }() + var ( dv reflect.Value mat multiArgType @@ -575,6 +582,9 @@ func (c *Client) Run(ctx context.Context, q *Query) *Iterator { ProjectId: c.dataset, }, } + + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Run") + defer func() { trace.EndSpan(ctx, t.err) }() if q.namespace != "" { t.req.PartitionId = &pb.PartitionId{ NamespaceId: q.namespace, @@ -622,7 +632,7 @@ type Iterator struct { // If the query is not keys only and dst is non-nil, it also loads the entity // stored for that key into the struct pointer or PropertyLoadSaver dst, with // the same semantics and possible errors as for the Get function. -func (t *Iterator) Next(dst interface{}) (*Key, error) { +func (t *Iterator) Next(dst interface{}) (_ *Key, err error) { k, e, err := t.next() if err != nil { return nil, err @@ -725,7 +735,10 @@ func (t *Iterator) nextBatch() error { } // Cursor returns a cursor for the iterator's current location. -func (t *Iterator) Cursor() (Cursor, error) { +func (t *Iterator) Cursor() (_ Cursor, err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Query.Cursor") + defer func() { trace.EndSpan(t.ctx, err) }() + // If there is still an offset, we need to the skip those results first. for t.err == nil && t.offset > 0 { t.err = t.nextBatch() diff --git a/vendor/cloud.google.com/go/datastore/save.go b/vendor/cloud.google.com/go/datastore/save.go index 5cc557782..b96d07c90 100644 --- a/vendor/cloud.google.com/go/datastore/save.go +++ b/vendor/cloud.google.com/go/datastore/save.go @@ -88,9 +88,19 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect return saveSliceProperty(props, name, opts, v) } case reflect.Ptr: + if isValidPointerType(v.Type().Elem()) { + if v.IsNil() { + // Nil pointer becomes a nil property value (unless omitempty, handled above). + p.Value = nil + *props = append(*props, p) + return nil + } + return saveStructProperty(props, name, opts, v.Elem()) + } if v.Type().Elem().Kind() != reflect.Struct { return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type()) } + // Pointer to struct is a special case. if v.IsNil() { return nil } @@ -395,10 +405,18 @@ func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) { // than the top-level value. val.ExcludeFromIndexes = false default: - if iv != nil { - return nil, fmt.Errorf("invalid Value type %t", iv) + rv := reflect.ValueOf(iv) + if !rv.IsValid() { + val.ValueType = &pb.Value_NullValue{} + } else if rv.Kind() == reflect.Ptr { // non-nil pointer: dereference + if rv.IsNil() { + val.ValueType = &pb.Value_NullValue{} + return val, nil + } + return interfaceToProto(rv.Elem().Interface(), noIndex) + } else { + return nil, fmt.Errorf("invalid Value type %T", iv) } - val.ValueType = &pb.Value_NullValue{} } // TODO(jbd): Support EntityValue. return val, nil @@ -423,3 +441,22 @@ func isEmptyValue(v reflect.Value) bool { } return false } + +// isValidPointerType reports whether a struct field can be a pointer to type t +// for the purposes of saving and loading. +func isValidPointerType(t reflect.Type) bool { + if t == typeOfTime || t == typeOfGeoPoint { + return true + } + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Bool: + return true + case reflect.String: + return true + case reflect.Float32, reflect.Float64: + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/datastore/save_test.go b/vendor/cloud.google.com/go/datastore/save_test.go index cd28a7cc1..fbef3b79f 100644 --- a/vendor/cloud.google.com/go/datastore/save_test.go +++ b/vendor/cloud.google.com/go/datastore/save_test.go @@ -16,22 +16,32 @@ package datastore import ( "testing" + "time" "cloud.google.com/go/internal/testutil" pb "google.golang.org/genproto/googleapis/datastore/v1" ) -func TestInterfaceToProtoNilKey(t *testing.T) { - var iv *Key - pv, err := interfaceToProto(iv, false) - if err != nil { - t.Fatalf("nil key: interfaceToProto: %v", err) - } - - _, ok := pv.ValueType.(*pb.Value_NullValue) - if !ok { - t.Errorf("nil key: type:\ngot: %T\nwant: %T", pv.ValueType, &pb.Value_NullValue{}) +func TestInterfaceToProtoNil(t *testing.T) { + // A nil *Key, or a nil value of any other pointer type, should convert to a NullValue. + for _, in := range []interface{}{ + (*Key)(nil), + (*int)(nil), + (*string)(nil), + (*bool)(nil), + (*float64)(nil), + (*GeoPoint)(nil), + (*time.Time)(nil), + } { + got, err := interfaceToProto(in, false) + if err != nil { + t.Fatalf("%T: %v", in, err) + } + _, ok := got.ValueType.(*pb.Value_NullValue) + if !ok { + t.Errorf("%T: got: %T\nwant: %T", in, got.ValueType, &pb.Value_NullValue{}) + } } } @@ -193,3 +203,83 @@ func TestSaveEntityNested(t *testing.T) { } } } + +func TestSavePointers(t *testing.T) { + for _, test := range []struct { + desc string + in interface{} + want []Property + }{ + { + desc: "nil pointers save as nil-valued properties", + in: &Pointers{}, + want: []Property{ + Property{Name: "Pi", Value: nil}, + Property{Name: "Ps", Value: nil}, + Property{Name: "Pb", Value: nil}, + Property{Name: "Pf", Value: nil}, + Property{Name: "Pg", Value: nil}, + Property{Name: "Pt", Value: nil}, + }, + }, + { + desc: "nil omitempty pointers not saved", + in: &PointersOmitEmpty{}, + want: []Property(nil), + }, + { + desc: "non-nil zero-valued pointers save as zero values", + in: populatedPointers(), + want: []Property{ + Property{Name: "Pi", Value: int64(0)}, + Property{Name: "Ps", Value: ""}, + Property{Name: "Pb", Value: false}, + Property{Name: "Pf", Value: 0.0}, + Property{Name: "Pg", Value: GeoPoint{}}, + Property{Name: "Pt", Value: time.Time{}}, + }, + }, + { + desc: "non-nil non-zero-valued pointers save as the appropriate values", + in: func() *Pointers { + p := populatedPointers() + *p.Pi = 1 + *p.Ps = "x" + *p.Pb = true + *p.Pf = 3.14 + *p.Pg = GeoPoint{Lat: 1, Lng: 2} + *p.Pt = time.Unix(100, 0) + return p + }(), + want: []Property{ + Property{Name: "Pi", Value: int64(1)}, + Property{Name: "Ps", Value: "x"}, + Property{Name: "Pb", Value: true}, + Property{Name: "Pf", Value: 3.14}, + Property{Name: "Pg", Value: GeoPoint{Lat: 1, Lng: 2}}, + Property{Name: "Pt", Value: time.Unix(100, 0)}, + }, + }, + } { + got, err := SaveStruct(test.in) + if err != nil { + t.Fatalf("%s: %v", test.desc, err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%s\ngot %#v\nwant %#v\n", test.desc, got, test.want) + } + } +} + +func TestSaveEmptySlice(t *testing.T) { + // Zero-length slice fields are not saved. + for _, slice := range [][]string{nil, {}} { + got, err := SaveStruct(&struct{ S []string }{S: slice}) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("%#v: got %d properties, wanted zero", slice, len(got)) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/transaction.go b/vendor/cloud.google.com/go/datastore/transaction.go index af5c427e7..ca25d2ae6 100644 --- a/vendor/cloud.google.com/go/datastore/transaction.go +++ b/vendor/cloud.google.com/go/datastore/transaction.go @@ -17,6 +17,7 @@ package datastore import ( "errors" + "cloud.google.com/go/internal/trace" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -32,6 +33,8 @@ var errExpiredTransaction = errors.New("datastore: transaction expired") type transactionSettings struct { attempts int + readOnly bool + prevID []byte // ID of the transaction to retry } // newTransactionSettings creates a transactionSettings with a given TransactionOption slice. @@ -62,6 +65,19 @@ func (w maxAttempts) apply(s *transactionSettings) { } } +// ReadOnly is a TransactionOption that marks the transaction as read-only. +var ReadOnly TransactionOption + +func init() { + ReadOnly = readOnly{} +} + +type readOnly struct{} + +func (readOnly) apply(s *transactionSettings) { + s.readOnly = true +} + // Transaction represents a set of datastore operations to be committed atomically. // // Operations are enqueued by calling the Put and Delete methods on Transaction @@ -80,20 +96,35 @@ type Transaction struct { } // NewTransaction starts a new transaction. -func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) { +func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (_ *Transaction, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.NewTransaction") + defer func() { trace.EndSpan(ctx, err) }() + for _, o := range opts { if _, ok := o.(maxAttempts); ok { return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option") } } - req := &pb.BeginTransactionRequest{ - ProjectId: c.dataset, + return c.newTransaction(ctx, newTransactionSettings(opts)) +} + +func (c *Client) newTransaction(ctx context.Context, s *transactionSettings) (*Transaction, error) { + req := &pb.BeginTransactionRequest{ProjectId: c.dataset} + if s.readOnly { + req.TransactionOptions = &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}}, + } + } else if s.prevID != nil { + req.TransactionOptions = &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{ + PreviousTransaction: s.prevID, + }}, + } } resp, err := c.client.BeginTransaction(ctx, req) if err != nil { return nil, err } - return &Transaction{ id: resp.Transaction, ctx: ctx, @@ -125,10 +156,13 @@ func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) // is, it should have the same result when called multiple times. Note that // Transaction.Get will append when unmarshalling slice fields, so it is not // necessarily idempotent. -func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) { +func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (_ *Commit, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.RunInTransaction") + defer func() { trace.EndSpan(ctx, err) }() + settings := newTransactionSettings(opts) for n := 0; n < settings.attempts; n++ { - tx, err := c.NewTransaction(ctx) + tx, err := c.newTransaction(ctx, settings) if err != nil { return nil, err } @@ -139,12 +173,20 @@ func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) e if cmt, err := tx.Commit(); err != ErrConcurrentTransaction { return cmt, err } + // Pass this transaction's ID to the retry transaction to preserve + // transaction priority. + if !settings.readOnly { + settings.prevID = tx.id + } } return nil, ErrConcurrentTransaction } // Commit applies the enqueued operations atomically. -func (t *Transaction) Commit() (*Commit, error) { +func (t *Transaction) Commit() (_ *Commit, err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Commit") + defer func() { trace.EndSpan(t.ctx, err) }() + if t.id == nil { return nil, errExpiredTransaction } @@ -181,13 +223,16 @@ func (t *Transaction) Commit() (*Commit, error) { } // Rollback abandons a pending transaction. -func (t *Transaction) Rollback() error { +func (t *Transaction) Rollback() (err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Rollback") + defer func() { trace.EndSpan(t.ctx, err) }() + if t.id == nil { return errExpiredTransaction } id := t.id t.id = nil - _, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{ + _, err = t.client.client.Rollback(t.ctx, &pb.RollbackRequest{ ProjectId: t.client.dataset, Transaction: id, }) @@ -199,11 +244,14 @@ func (t *Transaction) Rollback() error { // snapshot. Furthermore, if the transaction is set to a serializable isolation // level, another transaction cannot concurrently modify the data that is read // or modified by this transaction. -func (t *Transaction) Get(key *Key, dst interface{}) error { +func (t *Transaction) Get(key *Key, dst interface{}) (err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Get") + defer func() { trace.EndSpan(t.ctx, err) }() + opts := &pb.ReadOptions{ ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, } - err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts) + err = t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts) if me, ok := err.(MultiError); ok { return me[0] } @@ -211,7 +259,10 @@ func (t *Transaction) Get(key *Key, dst interface{}) error { } // GetMulti is a batch version of Get. -func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error { +func (t *Transaction) GetMulti(keys []*Key, dst interface{}) (err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.GetMulti") + defer func() { trace.EndSpan(t.ctx, err) }() + if t.id == nil { return errExpiredTransaction } @@ -240,7 +291,11 @@ func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { // PutMulti is a batch version of Put. One PendingKey is returned for each // element of src in the same order. -func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) { +// TODO(jba): rewrite in terms of Mutate. +func (t *Transaction) PutMulti(keys []*Key, src interface{}) (_ []*PendingKey, err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.PutMulti") + defer func() { trace.EndSpan(t.ctx, err) }() + if t.id == nil { return nil, errExpiredTransaction } @@ -279,7 +334,11 @@ func (t *Transaction) Delete(key *Key) error { } // DeleteMulti is a batch version of Delete. -func (t *Transaction) DeleteMulti(keys []*Key) error { +// TODO(jba): rewrite in terms of Mutate. +func (t *Transaction) DeleteMulti(keys []*Key) (err error) { + t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.DeleteMulti") + defer func() { trace.EndSpan(t.ctx, err) }() + if t.id == nil { return errExpiredTransaction } @@ -291,12 +350,53 @@ func (t *Transaction) DeleteMulti(keys []*Key) error { return nil } +// Mutate adds the mutations to the transaction. They will all be applied atomically +// upon calling Commit. Mutate returns a PendingKey for each Mutation in the argument +// list, in the same order. PendingKeys for Delete mutations are always nil. +// +// If any of the mutations are invalid, Mutate returns a MultiError with the errors. +// Mutate returns a MultiError in this case even if there is only one Mutation. +// +// For an example, see Client.Mutate. +func (t *Transaction) Mutate(muts ...*Mutation) ([]*PendingKey, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + pmuts, err := mutationProtos(muts) + if err != nil { + return nil, err + } + origin := len(t.mutations) + t.mutations = append(t.mutations, pmuts...) + // Prepare the returned handles, pre-populating where possible. + ret := make([]*PendingKey, len(muts)) + for i, mut := range muts { + if mut.isDelete() { + continue + } + p := &PendingKey{} + if mut.key.Incomplete() { + // This key will be in the final commit result. + t.pending[origin+i] = p + } else { + p.key = mut.key + } + ret[i] = p + } + return ret, nil +} + // Commit represents the result of a committed transaction. type Commit struct{} // Key resolves a pending key handle into a final key. func (c *Commit) Key(p *PendingKey) *Key { - if c != p.commit { + if p == nil { // if called on a *PendingKey from a Delete mutation + return nil + } + // If p.commit is nil, the PendingKey did not come from an incomplete key, + // so p.key is valid. + if p.commit != nil && c != p.commit { panic("PendingKey was not created by corresponding transaction") } return p.key diff --git a/vendor/cloud.google.com/go/datastore/transaction_test.go b/vendor/cloud.google.com/go/datastore/transaction_test.go new file mode 100644 index 000000000..1655f5f35 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/transaction_test.go @@ -0,0 +1,78 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +func TestNewTransaction(t *testing.T) { + var got *pb.BeginTransactionRequest + client := &Client{ + dataset: "project", + client: &fakeDatastoreClient{ + beginTransaction: func(req *pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) { + got = req + return &pb.BeginTransactionResponse{ + Transaction: []byte("tid"), + }, nil + }, + }, + } + ctx := context.Background() + for _, test := range []struct { + settings *transactionSettings + want *pb.BeginTransactionRequest + }{ + { + &transactionSettings{}, + &pb.BeginTransactionRequest{ProjectId: "project"}, + }, + { + &transactionSettings{readOnly: true}, + &pb.BeginTransactionRequest{ + ProjectId: "project", + TransactionOptions: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}}, + }, + }, + }, + { + &transactionSettings{prevID: []byte("tid")}, + &pb.BeginTransactionRequest{ + ProjectId: "project", + TransactionOptions: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{ + PreviousTransaction: []byte("tid"), + }, + }, + }, + }, + }, + } { + _, err := client.newTransaction(ctx, test.settings) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, test.want) { + t.Errorf("%+v:\ngot %+v\nwant %+v", test.settings, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go b/vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go new file mode 100644 index 000000000..b27b5dc65 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go @@ -0,0 +1,810 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + InspectContent []gax.CallOption + RedactImage []gax.CallOption + DeidentifyContent []gax.CallOption + ReidentifyContent []gax.CallOption + ListInfoTypes []gax.CallOption + CreateInspectTemplate []gax.CallOption + UpdateInspectTemplate []gax.CallOption + GetInspectTemplate []gax.CallOption + ListInspectTemplates []gax.CallOption + DeleteInspectTemplate []gax.CallOption + CreateDeidentifyTemplate []gax.CallOption + UpdateDeidentifyTemplate []gax.CallOption + GetDeidentifyTemplate []gax.CallOption + ListDeidentifyTemplates []gax.CallOption + DeleteDeidentifyTemplate []gax.CallOption + CreateDlpJob []gax.CallOption + ListDlpJobs []gax.CallOption + GetDlpJob []gax.CallOption + DeleteDlpJob []gax.CallOption + CancelDlpJob []gax.CallOption + ListJobTriggers []gax.CallOption + GetJobTrigger []gax.CallOption + DeleteJobTrigger []gax.CallOption + UpdateJobTrigger []gax.CallOption + CreateJobTrigger []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("dlp.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + InspectContent: retry[[2]string{"default", "idempotent"}], + RedactImage: retry[[2]string{"default", "idempotent"}], + DeidentifyContent: retry[[2]string{"default", "idempotent"}], + ReidentifyContent: retry[[2]string{"default", "idempotent"}], + ListInfoTypes: retry[[2]string{"default", "idempotent"}], + CreateInspectTemplate: retry[[2]string{"default", "non_idempotent"}], + UpdateInspectTemplate: retry[[2]string{"default", "non_idempotent"}], + GetInspectTemplate: retry[[2]string{"default", "idempotent"}], + ListInspectTemplates: retry[[2]string{"default", "idempotent"}], + DeleteInspectTemplate: retry[[2]string{"default", "idempotent"}], + CreateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}], + UpdateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}], + GetDeidentifyTemplate: retry[[2]string{"default", "idempotent"}], + ListDeidentifyTemplates: retry[[2]string{"default", "idempotent"}], + DeleteDeidentifyTemplate: retry[[2]string{"default", "idempotent"}], + CreateDlpJob: retry[[2]string{"default", "non_idempotent"}], + ListDlpJobs: retry[[2]string{"default", "idempotent"}], + GetDlpJob: retry[[2]string{"default", "idempotent"}], + DeleteDlpJob: retry[[2]string{"default", "idempotent"}], + CancelDlpJob: retry[[2]string{"default", "non_idempotent"}], + ListJobTriggers: retry[[2]string{"default", "idempotent"}], + GetJobTrigger: retry[[2]string{"default", "idempotent"}], + DeleteJobTrigger: retry[[2]string{"default", "idempotent"}], + UpdateJobTrigger: retry[[2]string{"default", "non_idempotent"}], + CreateJobTrigger: retry[[2]string{"default", "non_idempotent"}], + } +} + +// Client is a client for interacting with DLP API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client dlppb.DlpServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new dlp service client. +// +// The DLP API is a service that allows clients +// to detect the presence of Personally Identifiable Information (PII) and other +// privacy-sensitive data in user-supplied, unstructured data streams, like text +// blocks or images. +// The service also includes methods for sensitive data redaction and +// scheduling of data scans on Google Cloud Platform based data sets. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: dlppb.NewDlpServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// InspectContent finds potentially sensitive info in content. +// This method has limits on input size, processing time, and output size. +// How-to guide for text (at /dlp/docs/inspecting-text), How-to guide for +// images (at /dlp/docs/inspecting-images) +func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...) + var resp *dlppb.InspectContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.InspectContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RedactImage redacts potentially sensitive info from an image. +// This method has limits on input size, processing time, and output size. +// How-to guide (at /dlp/docs/redacting-sensitive-data-images) +func (c *Client) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest, opts ...gax.CallOption) (*dlppb.RedactImageResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RedactImage[0:len(c.CallOptions.RedactImage):len(c.CallOptions.RedactImage)], opts...) + var resp *dlppb.RedactImageResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RedactImage(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeidentifyContent de-identifies potentially sensitive info from a ContentItem. +// This method has limits on input size and output size. +// How-to guide (at /dlp/docs/deidentify-sensitive-data) +func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...) + var resp *dlppb.DeidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ReidentifyContent re-identify content that has been de-identified. +func (c *Client) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest, opts ...gax.CallOption) (*dlppb.ReidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ReidentifyContent[0:len(c.CallOptions.ReidentifyContent):len(c.CallOptions.ReidentifyContent)], opts...) + var resp *dlppb.ReidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ReidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInfoTypes returns sensitive information types DLP supports. +func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...) + var resp *dlppb.ListInfoTypesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateInspectTemplate creates an inspect template for re-using frequently used configuration +// for inspecting content, images, and storage. +func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateInspectTemplate[0:len(c.CallOptions.CreateInspectTemplate):len(c.CallOptions.CreateInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateInspectTemplate updates the inspect template. +func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateInspectTemplate[0:len(c.CallOptions.UpdateInspectTemplate):len(c.CallOptions.UpdateInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetInspectTemplate gets an inspect template. +func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetInspectTemplate[0:len(c.CallOptions.GetInspectTemplate):len(c.CallOptions.GetInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInspectTemplates lists inspect templates. +func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest, opts ...gax.CallOption) *InspectTemplateIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInspectTemplates[0:len(c.CallOptions.ListInspectTemplates):len(c.CallOptions.ListInspectTemplates)], opts...) + it := &InspectTemplateIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.InspectTemplate, string, error) { + var resp *dlppb.ListInspectTemplatesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInspectTemplates(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.InspectTemplates, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteInspectTemplate deletes inspect templates. +func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteInspectTemplate[0:len(c.CallOptions.DeleteInspectTemplate):len(c.CallOptions.DeleteInspectTemplate)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateDeidentifyTemplate creates an Deidentify template for re-using frequently used configuration +// for Deidentifying content, images, and storage. +func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDeidentifyTemplate[0:len(c.CallOptions.CreateDeidentifyTemplate):len(c.CallOptions.CreateDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDeidentifyTemplate updates the inspect template. +func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateDeidentifyTemplate[0:len(c.CallOptions.UpdateDeidentifyTemplate):len(c.CallOptions.UpdateDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetDeidentifyTemplate gets an inspect template. +func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDeidentifyTemplate[0:len(c.CallOptions.GetDeidentifyTemplate):len(c.CallOptions.GetDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDeidentifyTemplates lists inspect templates. +func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest, opts ...gax.CallOption) *DeidentifyTemplateIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDeidentifyTemplates[0:len(c.CallOptions.ListDeidentifyTemplates):len(c.CallOptions.ListDeidentifyTemplates)], opts...) + it := &DeidentifyTemplateIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DeidentifyTemplate, string, error) { + var resp *dlppb.ListDeidentifyTemplatesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDeidentifyTemplates(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.DeidentifyTemplates, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteDeidentifyTemplate deletes inspect templates. +func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDeidentifyTemplate[0:len(c.CallOptions.DeleteDeidentifyTemplate):len(c.CallOptions.DeleteDeidentifyTemplate)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateDlpJob create a new job to inspect storage or calculate risk metrics How-to +// guide (at /dlp/docs/compute-risk-analysis). +func (c *Client) CreateDlpJob(ctx context.Context, req *dlppb.CreateDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDlpJob[0:len(c.CallOptions.CreateDlpJob):len(c.CallOptions.CreateDlpJob)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDlpJobs lists DlpJobs that match the specified filter in the request. +func (c *Client) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest, opts ...gax.CallOption) *DlpJobIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDlpJobs[0:len(c.CallOptions.ListDlpJobs):len(c.CallOptions.ListDlpJobs)], opts...) + it := &DlpJobIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DlpJob, string, error) { + var resp *dlppb.ListDlpJobsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDlpJobs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Jobs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetDlpJob gets the latest state of a long-running DlpJob. +func (c *Client) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDlpJob[0:len(c.CallOptions.GetDlpJob):len(c.CallOptions.GetDlpJob)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteDlpJob deletes a long-running DlpJob. This method indicates that the client is +// no longer interested in the DlpJob result. The job will be cancelled if +// possible. +func (c *Client) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDlpJob[0:len(c.CallOptions.DeleteDlpJob):len(c.CallOptions.DeleteDlpJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CancelDlpJob starts asynchronous cancellation on a long-running DlpJob. The server +// makes a best effort to cancel the DlpJob, but success is not +// guaranteed. +func (c *Client) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelDlpJob[0:len(c.CallOptions.CancelDlpJob):len(c.CallOptions.CancelDlpJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.CancelDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListJobTriggers lists job triggers. +func (c *Client) ListJobTriggers(ctx context.Context, req *dlppb.ListJobTriggersRequest, opts ...gax.CallOption) *JobTriggerIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListJobTriggers[0:len(c.CallOptions.ListJobTriggers):len(c.CallOptions.ListJobTriggers)], opts...) + it := &JobTriggerIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.JobTrigger, string, error) { + var resp *dlppb.ListJobTriggersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListJobTriggers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.JobTriggers, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetJobTrigger gets a job trigger. +func (c *Client) GetJobTrigger(ctx context.Context, req *dlppb.GetJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetJobTrigger[0:len(c.CallOptions.GetJobTrigger):len(c.CallOptions.GetJobTrigger)], opts...) + var resp *dlppb.JobTrigger + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetJobTrigger(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteJobTrigger deletes a job trigger. +func (c *Client) DeleteJobTrigger(ctx context.Context, req *dlppb.DeleteJobTriggerRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteJobTrigger[0:len(c.CallOptions.DeleteJobTrigger):len(c.CallOptions.DeleteJobTrigger)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteJobTrigger(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// UpdateJobTrigger updates a job trigger. +func (c *Client) UpdateJobTrigger(ctx context.Context, req *dlppb.UpdateJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateJobTrigger[0:len(c.CallOptions.UpdateJobTrigger):len(c.CallOptions.UpdateJobTrigger)], opts...) + var resp *dlppb.JobTrigger + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateJobTrigger(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateJobTrigger creates a job to run DLP actions such as scanning storage for sensitive +// information on a set schedule. +func (c *Client) CreateJobTrigger(ctx context.Context, req *dlppb.CreateJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateJobTrigger[0:len(c.CallOptions.CreateJobTrigger):len(c.CallOptions.CreateJobTrigger)], opts...) + var resp *dlppb.JobTrigger + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateJobTrigger(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeidentifyTemplateIterator manages a stream of *dlppb.DeidentifyTemplate. +type DeidentifyTemplateIterator struct { + items []*dlppb.DeidentifyTemplate + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DeidentifyTemplate, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DeidentifyTemplateIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DeidentifyTemplateIterator) Next() (*dlppb.DeidentifyTemplate, error) { + var item *dlppb.DeidentifyTemplate + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DeidentifyTemplateIterator) bufLen() int { + return len(it.items) +} + +func (it *DeidentifyTemplateIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// DlpJobIterator manages a stream of *dlppb.DlpJob. +type DlpJobIterator struct { + items []*dlppb.DlpJob + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DlpJob, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DlpJobIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DlpJobIterator) Next() (*dlppb.DlpJob, error) { + var item *dlppb.DlpJob + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DlpJobIterator) bufLen() int { + return len(it.items) +} + +func (it *DlpJobIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InspectTemplateIterator manages a stream of *dlppb.InspectTemplate. +type InspectTemplateIterator struct { + items []*dlppb.InspectTemplate + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.InspectTemplate, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InspectTemplateIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InspectTemplateIterator) Next() (*dlppb.InspectTemplate, error) { + var item *dlppb.InspectTemplate + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InspectTemplateIterator) bufLen() int { + return len(it.items) +} + +func (it *InspectTemplateIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// JobTriggerIterator manages a stream of *dlppb.JobTrigger. +type JobTriggerIterator struct { + items []*dlppb.JobTrigger + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.JobTrigger, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *JobTriggerIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *JobTriggerIterator) Next() (*dlppb.JobTrigger, error) { + var item *dlppb.JobTrigger + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *JobTriggerIterator) bufLen() int { + return len(it.items) +} + +func (it *JobTriggerIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go b/vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go new file mode 100644 index 000000000..9a0f129e4 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2/dlp_client_example_test.go @@ -0,0 +1,498 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp_test + +import ( + "cloud.google.com/go/dlp/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_InspectContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.InspectContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.InspectContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_RedactImage() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.RedactImageRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RedactImage(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ReidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ReidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ReidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInfoTypes() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInfoTypesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListInfoTypes(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_CreateInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInspectTemplates() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInspectTemplatesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInspectTemplates(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDeidentifyTemplates() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListDeidentifyTemplatesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDeidentifyTemplates(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateDlpJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDlpJobs() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListDlpJobsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDlpJobs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_GetDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetDlpJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteDlpJobRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CancelDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CancelDlpJobRequest{ + // TODO: Fill request struct fields. + } + err = c.CancelDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_ListJobTriggers() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListJobTriggersRequest{ + // TODO: Fill request struct fields. + } + it := c.ListJobTriggers(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_GetJobTrigger() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetJobTriggerRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetJobTrigger(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteJobTrigger() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteJobTriggerRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteJobTrigger(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_UpdateJobTrigger() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateJobTriggerRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateJobTrigger(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_CreateJobTrigger() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateJobTriggerRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateJobTrigger(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2/doc.go b/vendor/cloud.google.com/go/dlp/apiv2/doc.go new file mode 100644 index 000000000..0387c17ef --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2/doc.go @@ -0,0 +1,45 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package dlp is an auto-generated package for the +// DLP API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +package dlp // import "cloud.google.com/go/dlp/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2/mock_test.go b/vendor/cloud.google.com/go/dlp/apiv2/mock_test.go new file mode 100644 index 000000000..cda3a74b7 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2/mock_test.go @@ -0,0 +1,1902 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDlpServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + dlppb.DlpServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectContentResponse), nil +} + +func (s *mockDlpServer) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest) (*dlppb.RedactImageResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.RedactImageResponse), nil +} + +func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyContentResponse), nil +} + +func (s *mockDlpServer) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest) (*dlppb.ReidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ReidentifyContentResponse), nil +} + +func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInfoTypesResponse), nil +} + +func (s *mockDlpServer) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest) (*dlppb.ListInspectTemplatesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInspectTemplatesResponse), nil +} + +func (s *mockDlpServer) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest) (*dlppb.ListDeidentifyTemplatesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListDeidentifyTemplatesResponse), nil +} + +func (s *mockDlpServer) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CreateJobTrigger(ctx context.Context, req *dlppb.CreateJobTriggerRequest) (*dlppb.JobTrigger, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.JobTrigger), nil +} + +func (s *mockDlpServer) UpdateJobTrigger(ctx context.Context, req *dlppb.UpdateJobTriggerRequest) (*dlppb.JobTrigger, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.JobTrigger), nil +} + +func (s *mockDlpServer) GetJobTrigger(ctx context.Context, req *dlppb.GetJobTriggerRequest) (*dlppb.JobTrigger, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.JobTrigger), nil +} + +func (s *mockDlpServer) ListJobTriggers(ctx context.Context, req *dlppb.ListJobTriggersRequest) (*dlppb.ListJobTriggersResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListJobTriggersResponse), nil +} + +func (s *mockDlpServer) DeleteJobTrigger(ctx context.Context, req *dlppb.DeleteJobTriggerRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CreateDlpJob(ctx context.Context, req *dlppb.CreateDlpJobRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest) (*dlppb.ListDlpJobsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListDlpJobsResponse), nil +} + +func (s *mockDlpServer) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDlp mockDlpServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + dlppb.RegisterDlpServiceServer(serv, &mockDlp) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDlpServiceInspectContent(t *testing.T) { + var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceInspectContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceRedactImage(t *testing.T) { + var redactedImage []byte = []byte("28") + var extractedText string = "extractedText998260012" + var expectedResponse = &dlppb.RedactImageResponse{ + RedactedImage: redactedImage, + ExtractedText: extractedText, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.RedactImageRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactImage(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceRedactImageError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.RedactImageRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactImage(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeidentifyContent(t *testing.T) { + var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.DeidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceDeidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.DeidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceReidentifyContent(t *testing.T) { + var expectedResponse *dlppb.ReidentifyContentResponse = &dlppb.ReidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ReidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceReidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ReidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInfoTypes(t *testing.T) { + var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInfoTypesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceCreateInspectTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateInspectTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateInspectTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceUpdateInspectTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.UpdateInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.UpdateInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetInspectTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInspectTemplates(t *testing.T) { + var nextPageToken string = "" + var inspectTemplatesElement *dlppb.InspectTemplate = &dlppb.InspectTemplate{} + var inspectTemplates = []*dlppb.InspectTemplate{inspectTemplatesElement} + var expectedResponse = &dlppb.ListInspectTemplatesResponse{ + NextPageToken: nextPageToken, + InspectTemplates: inspectTemplates, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListInspectTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectTemplates(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.InspectTemplates[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInspectTemplatesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListInspectTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectTemplates(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteInspectTemplate(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.DeleteInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.DeleteInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCreateDeidentifyTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateDeidentifyTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateDeidentifyTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceUpdateDeidentifyTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.UpdateDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.UpdateDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetDeidentifyTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.GetDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.GetDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListDeidentifyTemplates(t *testing.T) { + var nextPageToken string = "" + var deidentifyTemplatesElement *dlppb.DeidentifyTemplate = &dlppb.DeidentifyTemplate{} + var deidentifyTemplates = []*dlppb.DeidentifyTemplate{deidentifyTemplatesElement} + var expectedResponse = &dlppb.ListDeidentifyTemplatesResponse{ + NextPageToken: nextPageToken, + DeidentifyTemplates: deidentifyTemplates, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListDeidentifyTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.DeidentifyTemplates[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListDeidentifyTemplatesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListDeidentifyTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteDeidentifyTemplate(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.DeleteDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.DeleteDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCreateDlpJob(t *testing.T) { + var name string = "name3373707" + var jobTriggerName string = "jobTriggerName1819490804" + var expectedResponse = &dlppb.DlpJob{ + Name: name, + JobTriggerName: jobTriggerName, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.CreateDlpJobRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.CreateDlpJobRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListDlpJobs(t *testing.T) { + var nextPageToken string = "" + var jobsElement *dlppb.DlpJob = &dlppb.DlpJob{} + var jobs = []*dlppb.DlpJob{jobsElement} + var expectedResponse = &dlppb.ListDlpJobsResponse{ + NextPageToken: nextPageToken, + Jobs: jobs, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListDlpJobsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDlpJobs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Jobs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListDlpJobsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListDlpJobsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDlpJobs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetDlpJob(t *testing.T) { + var name2 string = "name2-1052831874" + var jobTriggerName string = "jobTriggerName1819490804" + var expectedResponse = &dlppb.DlpJob{ + Name: name2, + JobTriggerName: jobTriggerName, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.GetDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.GetDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteDlpJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.DeleteDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.DeleteDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCancelDlpJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.CancelDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceCancelDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.CancelDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceListJobTriggers(t *testing.T) { + var nextPageToken string = "" + var jobTriggersElement *dlppb.JobTrigger = &dlppb.JobTrigger{} + var jobTriggers = []*dlppb.JobTrigger{jobTriggersElement} + var expectedResponse = &dlppb.ListJobTriggersResponse{ + NextPageToken: nextPageToken, + JobTriggers: jobTriggers, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListJobTriggersRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListJobTriggers(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.JobTriggers[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListJobTriggersError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListJobTriggersRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListJobTriggers(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetJobTrigger(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.JobTrigger{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/jobTriggers/%s", "[PROJECT]", "[JOB_TRIGGER]") + var request = &dlppb.GetJobTriggerRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetJobTrigger(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetJobTriggerError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/jobTriggers/%s", "[PROJECT]", "[JOB_TRIGGER]") + var request = &dlppb.GetJobTriggerRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetJobTrigger(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteJobTrigger(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var name string = "name3373707" + var request = &dlppb.DeleteJobTriggerRequest{ + Name: name, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteJobTrigger(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteJobTriggerError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var request = &dlppb.DeleteJobTriggerRequest{ + Name: name, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteJobTrigger(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceUpdateJobTrigger(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.JobTrigger{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/jobTriggers/%s", "[PROJECT]", "[JOB_TRIGGER]") + var request = &dlppb.UpdateJobTriggerRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateJobTrigger(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateJobTriggerError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/jobTriggers/%s", "[PROJECT]", "[JOB_TRIGGER]") + var request = &dlppb.UpdateJobTriggerRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateJobTrigger(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceCreateJobTrigger(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.JobTrigger{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.CreateJobTriggerRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateJobTrigger(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateJobTriggerError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.CreateJobTriggerRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateJobTrigger(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go index b18c61ba7..4aadfbd44 100644 --- a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go @@ -69,13 +69,13 @@ func TestReportErrorsServiceSmoke(t *testing.T) { LineNumber: lineNumber, FunctionName: functionName, } - var context = &clouderrorreportingpb.ErrorContext{ + var context_ = &clouderrorreportingpb.ErrorContext{ ReportLocation: reportLocation, } var event = &clouderrorreportingpb.ReportedErrorEvent{ Message: message, ServiceContext: serviceContext, - Context: context, + Context: context_, } var request = &clouderrorreportingpb.ReportErrorEventRequest{ ProjectName: formattedProjectName, diff --git a/vendor/cloud.google.com/go/firestore/Makefile b/vendor/cloud.google.com/go/firestore/Makefile new file mode 100644 index 000000000..b1f9ff79a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/Makefile @@ -0,0 +1,13 @@ +# Copy textproto files in this directory from the source of truth. + +SRC=$(GOPATH)/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore + +.PHONY: refresh-tests + +refresh-tests: + -rm genproto/*.pb.go + cp $(SRC)/genproto/*.pb.go genproto + -rm testdata/*.textproto + cp $(SRC)/testdata/*.textproto testdata + openssl dgst -sha1 $(SRC)/testdata/test-suite.binproto > testdata/VERSION + diff --git a/vendor/cloud.google.com/go/firestore/client.go b/vendor/cloud.google.com/go/firestore/client.go index 058491aa0..6eb05fcb9 100644 --- a/vendor/cloud.google.com/go/firestore/client.go +++ b/vendor/cloud.google.com/go/firestore/client.go @@ -29,6 +29,7 @@ import ( pb "google.golang.org/genproto/googleapis/firestore/v1beta1" "github.com/golang/protobuf/ptypes" + tspb "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/grpc/metadata" @@ -128,6 +129,10 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen if err := checkTransaction(ctx); err != nil { return nil, err } + return c.getAll(ctx, docRefs, nil) +} + +func (c *Client) getAll(ctx context.Context, docRefs []*DocumentRef, tid []byte) ([]*DocumentSnapshot, error) { var docNames []string for _, dr := range docRefs { if dr == nil { @@ -139,13 +144,21 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen Database: c.path(), Documents: docNames, } + if tid != nil { + req.ConsistencySelector = &pb.BatchGetDocumentsRequest_Transaction{tid} + } streamClient, err := c.c.BatchGetDocuments(withResourceHeader(ctx, req.Database), req) if err != nil { return nil, err } // Read results from the stream and add them to a map. - docMap := map[string]*pb.Document{} + type result struct { + doc *pb.Document + readTime *tspb.Timestamp + } + + docMap := map[string]result{} for { res, err := streamClient.Recv() if err == io.EOF { @@ -156,13 +169,13 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen } switch x := res.Result.(type) { case *pb.BatchGetDocumentsResponse_Found: - docMap[x.Found.Name] = x.Found + docMap[x.Found.Name] = result{x.Found, res.ReadTime} case *pb.BatchGetDocumentsResponse_Missing: - if docMap[x.Missing] != nil { - return nil, fmt.Errorf("firestore: %q both missing and present", x.Missing) + if _, ok := docMap[x.Missing]; ok { + return nil, fmt.Errorf("firestore: %q seen twice", x.Missing) } - docMap[x.Missing] = nil + docMap[x.Missing] = result{nil, res.ReadTime} default: return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type") } @@ -172,12 +185,12 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen // DocumentRefs. docs := make([]*DocumentSnapshot, len(docNames)) for i, name := range docNames { - pbDoc, ok := docMap[name] + r, ok := docMap[name] if !ok { return nil, fmt.Errorf("firestore: passed %q to BatchGetDocuments but never saw response", name) } - if pbDoc != nil { - doc, err := newDocumentSnapshot(docRefs[i], pbDoc, c) + if r.doc != nil { + doc, err := newDocumentSnapshot(docRefs[i], r.doc, c, r.readTime) if err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/firestore/client_test.go b/vendor/cloud.google.com/go/firestore/client_test.go index 23b492cc9..ce3ab784c 100644 --- a/vendor/cloud.google.com/go/firestore/client_test.go +++ b/vendor/cloud.google.com/go/firestore/client_test.go @@ -17,10 +17,11 @@ package firestore import ( "testing" + tspb "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/firestore/v1beta1" - "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var testClient = &Client{ @@ -92,10 +93,23 @@ func TestClientCollDocErrors(t *testing.T) { } func TestGetAll(t *testing.T) { - ctx := context.Background() - const dbPath = "projects/projectID/databases/(default)" c, srv := newMock(t) defer c.Close() + const dbPath = "projects/projectID/databases/(default)" + req := &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{ + dbPath + "/documents/C/a", + dbPath + "/documents/C/b", + dbPath + "/documents/C/c", + }, + } + testGetAll(t, c, srv, dbPath, func(drs []*DocumentRef) ([]*DocumentSnapshot, error) { + return c.GetAll(context.Background(), drs) + }, req) +} + +func testGetAll(t *testing.T, c *Client, srv *mockServer, dbPath string, getAll func([]*DocumentRef) ([]*DocumentSnapshot, error), req *pb.BatchGetDocumentsRequest) { wantPBDocs := []*pb.Document{ { Name: dbPath + "/documents/C/a", @@ -111,25 +125,21 @@ func TestGetAll(t *testing.T) { Fields: map[string]*pb.Value{"f": intval(1)}, }, } - srv.addRPC( - &pb.BatchGetDocumentsRequest{ - Database: dbPath, - Documents: []string{ - dbPath + "/documents/C/a", - dbPath + "/documents/C/b", - dbPath + "/documents/C/c", - }, - }, + wantReadTimes := []*tspb.Timestamp{aTimestamp, aTimestamp2, aTimestamp3} + srv.addRPC(req, []interface{}{ // deliberately put these out of order &pb.BatchGetDocumentsResponse{ - Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]}, + Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]}, + ReadTime: aTimestamp3, }, &pb.BatchGetDocumentsResponse{ - Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]}, + Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]}, + ReadTime: aTimestamp, }, &pb.BatchGetDocumentsResponse{ - Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"}, + Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"}, + ReadTime: aTimestamp2, }, }, ) @@ -138,7 +148,7 @@ func TestGetAll(t *testing.T) { for _, name := range []string{"a", "b", "c"} { docRefs = append(docRefs, coll.Doc(name)) } - docs, err := c.GetAll(ctx, docRefs) + docs, err := getAll(docRefs) if err != nil { t.Fatal(err) } @@ -148,7 +158,7 @@ func TestGetAll(t *testing.T) { for i, got := range docs { var want *DocumentSnapshot if wantPBDocs[i] != nil { - want, err = newDocumentSnapshot(docRefs[i], wantPBDocs[i], c) + want, err = newDocumentSnapshot(docRefs[i], wantPBDocs[i], c, wantReadTimes[i]) if err != nil { t.Fatal(err) } @@ -176,7 +186,7 @@ func TestGetAllErrors(t *testing.T) { Database: dbPath, Documents: []string{docPath}, }, - []interface{}{grpc.Errorf(codes.Internal, "")}, + []interface{}{status.Errorf(codes.Internal, "")}, ) _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}) codeEq(t, "GetAll #1", codes.Internal, err) @@ -190,10 +200,12 @@ func TestGetAllErrors(t *testing.T) { }, []interface{}{ &pb.BatchGetDocumentsResponse{ - Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}}, + Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}}, + ReadTime: aTimestamp, }, &pb.BatchGetDocumentsResponse{ - Result: &pb.BatchGetDocumentsResponse_Missing{docPath}, + Result: &pb.BatchGetDocumentsResponse_Missing{docPath}, + ReadTime: aTimestamp, }, }, ) diff --git a/vendor/cloud.google.com/go/firestore/cross_language_test.go b/vendor/cloud.google.com/go/firestore/cross_language_test.go index 8575ac9f1..a06b7eab2 100644 --- a/vendor/cloud.google.com/go/firestore/cross_language_test.go +++ b/vendor/cloud.google.com/go/firestore/cross_language_test.go @@ -18,6 +18,7 @@ package firestore import ( "encoding/json" + "errors" "fmt" "io/ioutil" "path" @@ -63,22 +64,34 @@ func runTestFromFile(t *testing.T, filename string) { } func runTest(t *testing.T, msg string, test *pb.Test) { - check := func(gotErr error, wantErr bool) { + check := func(gotErr error, wantErr bool) bool { if wantErr && gotErr == nil { t.Errorf("%s: got nil, want error", msg) + return false } else if !wantErr && gotErr != nil { t.Errorf("%s: %v", msg, gotErr) + return false } + return true } ctx := context.Background() c, srv := newMock(t) - switch tt := test.Test.(type) { case *pb.Test_Get: - srv.addRPC(tt.Get.Request, &fspb.Document{ - CreateTime: &ts.Timestamp{}, - UpdateTime: &ts.Timestamp{}, + req := &fspb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: []string{tt.Get.DocRefPath}, + } + srv.addRPC(req, []interface{}{ + &fspb.BatchGetDocumentsResponse{ + Result: &fspb.BatchGetDocumentsResponse_Found{&fspb.Document{ + Name: tt.Get.DocRefPath, + CreateTime: &ts.Timestamp{}, + UpdateTime: &ts.Timestamp{}, + }}, + ReadTime: &ts.Timestamp{}, + }, }) ref := docRefFromPath(tt.Get.DocRefPath, c) _, err := ref.Get(ctx) @@ -126,14 +139,13 @@ func runTest(t *testing.T, msg string, test *pb.Test) { paths := convertFieldPaths(tt.UpdatePaths.FieldPaths) var ups []Update for i, path := range paths { - jsonValue := tt.UpdatePaths.JsonValues[i] - var val interface{} - if err := json.Unmarshal([]byte(jsonValue), &val); err != nil { - t.Fatalf("%s: %q: %v", msg, jsonValue, err) + val, err := convertJSONValue(tt.UpdatePaths.JsonValues[i]) + if err != nil { + t.Fatalf("%s: %v", msg, err) } ups = append(ups, Update{ FieldPath: path, - Value: convertTestValue(val), + Value: val, }) } _, err := ref.Update(ctx, ups, preconds...) @@ -146,6 +158,15 @@ func runTest(t *testing.T, msg string, test *pb.Test) { _, err := ref.Delete(ctx, preconds...) check(err, tt.Delete.IsError) + case *pb.Test_Query: + q := convertQuery(t, tt.Query) + got, err := q.toProto() + if check(err, tt.Query.IsError) && err == nil { + if want := tt.Query.Query; !proto.Equal(got, want) { + t.Errorf("%s\ngot: %s\nwant: %s", msg, proto.MarshalTextString(got), proto.MarshalTextString(want)) + } + } + default: t.Fatalf("unknown test type %T", tt) } @@ -159,6 +180,14 @@ func docRefFromPath(p string, c *Client) *DocumentRef { } } +func convertJSONValue(jv string) (interface{}, error) { + var val interface{} + if err := json.Unmarshal([]byte(jv), &val); err != nil { + return nil, err + } + return convertTestValue(val), nil +} + func convertData(jsonData string) (map[string]interface{}, error) { var m map[string]interface{} if err := json.Unmarshal([]byte(jsonData), &m); err != nil { @@ -236,3 +265,90 @@ func convertPrecondition(t *testing.T, fp *fspb.Precondition) []Precondition { } return []Precondition{pc} } + +func convertQuery(t *testing.T, qt *pb.QueryTest) Query { + parts := strings.Split(qt.CollPath, "/") + q := Query{ + parentPath: strings.Join(parts[:len(parts)-2], "/"), + collectionID: parts[len(parts)-1], + } + for _, c := range qt.Clauses { + switch c := c.Clause.(type) { + case *pb.Clause_Select: + q = q.SelectPaths(convertFieldPaths(c.Select.Fields)...) + case *pb.Clause_OrderBy: + var dir Direction + switch c.OrderBy.Direction { + case "asc": + dir = Asc + case "desc": + dir = Desc + default: + t.Fatalf("bad direction: %q", c.OrderBy.Direction) + } + q = q.OrderByPath(FieldPath(c.OrderBy.Path.Field), dir) + case *pb.Clause_Where: + val, err := convertJSONValue(c.Where.JsonValue) + if err != nil { + t.Fatal(err) + } + q = q.WherePath(FieldPath(c.Where.Path.Field), c.Where.Op, val) + case *pb.Clause_Offset: + q = q.Offset(int(c.Offset)) + case *pb.Clause_Limit: + q = q.Limit(int(c.Limit)) + case *pb.Clause_StartAt: + q = q.StartAt(convertCursor(t, c.StartAt)...) + case *pb.Clause_StartAfter: + q = q.StartAfter(convertCursor(t, c.StartAfter)...) + case *pb.Clause_EndAt: + q = q.EndAt(convertCursor(t, c.EndAt)...) + case *pb.Clause_EndBefore: + q = q.EndBefore(convertCursor(t, c.EndBefore)...) + default: + t.Fatalf("bad clause type %T", c) + } + } + return q +} + +// Returns args to a cursor method (StartAt, etc.). +func convertCursor(t *testing.T, c *pb.Cursor) []interface{} { + if c.DocSnapshot != nil { + ds, err := convertDocSnapshot(c.DocSnapshot) + if err != nil { + t.Fatal(err) + } + return []interface{}{ds} + } + var vals []interface{} + for _, jv := range c.JsonValues { + v, err := convertJSONValue(jv) + if err != nil { + t.Fatal(err) + } + vals = append(vals, v) + } + return vals +} + +func convertDocSnapshot(ds *pb.DocSnapshot) (*DocumentSnapshot, error) { + data, err := convertData(ds.JsonData) + if err != nil { + return nil, err + } + doc, transformPaths, err := toProtoDocument(data) + if err != nil { + return nil, err + } + if len(transformPaths) > 0 { + return nil, errors.New("saw transform paths in DocSnapshot") + } + return &DocumentSnapshot{ + Ref: &DocumentRef{ + Path: ds.Path, + Parent: &CollectionRef{Path: path.Dir(ds.Path)}, + }, + proto: doc, + }, nil +} diff --git a/vendor/cloud.google.com/go/firestore/doc.go b/vendor/cloud.google.com/go/firestore/doc.go index d0f45fa03..1ed24060a 100644 --- a/vendor/cloud.google.com/go/firestore/doc.go +++ b/vendor/cloud.google.com/go/firestore/doc.go @@ -21,6 +21,9 @@ database. See https://cloud.google.com/firestore/docs for an introduction to Cloud Firestore and additional help on using the Firestore API. +Note: you can't use both Cloud Firestore and Cloud Datastore in the same +project. + Creating a Client To start working with this package, create a client with a project ID: diff --git a/vendor/cloud.google.com/go/firestore/docref.go b/vendor/cloud.google.com/go/firestore/docref.go index 6cf93329f..81178a08d 100644 --- a/vendor/cloud.google.com/go/firestore/docref.go +++ b/vendor/cloud.google.com/go/firestore/docref.go @@ -22,6 +22,8 @@ import ( "golang.org/x/net/context" "google.golang.org/api/iterator" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" vkit "cloud.google.com/go/firestore/apiv1beta1" pb "google.golang.org/genproto/googleapis/firestore/v1beta1" @@ -64,12 +66,14 @@ func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) { if d == nil { return nil, errNilDocRef } - doc, err := d.Parent.c.c.GetDocument(withResourceHeader(ctx, d.Parent.c.path()), - &pb.GetDocumentRequest{Name: d.Path}) + docsnaps, err := d.Parent.c.getAll(ctx, []*DocumentRef{d}, nil) if err != nil { return nil, err } - return newDocumentSnapshot(d, doc, d.Parent.c) + if docsnaps[0] == nil { + return nil, status.Errorf(codes.NotFound, "%q not found", d.Path) + } + return docsnaps[0], nil } // Create creates the document with the given data. diff --git a/vendor/cloud.google.com/go/firestore/docref_test.go b/vendor/cloud.google.com/go/firestore/docref_test.go index 6b8415d74..92d31fdf5 100644 --- a/vendor/cloud.google.com/go/firestore/docref_test.go +++ b/vendor/cloud.google.com/go/firestore/docref_test.go @@ -45,7 +45,15 @@ func TestDocGet(t *testing.T) { UpdateTime: aTimestamp, Fields: map[string]*pb.Value{"f": intval(1)}, } - srv.addRPC(&pb.GetDocumentRequest{Name: path}, pdoc) + srv.addRPC(&pb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: []string{path}, + }, []interface{}{ + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{pdoc}, + ReadTime: aTimestamp2, + }, + }) ref := c.Collection("C").Doc("a") gotDoc, err := ref.Get(ctx) if err != nil { @@ -55,6 +63,7 @@ func TestDocGet(t *testing.T) { Ref: ref, CreateTime: aTime, UpdateTime: aTime, + ReadTime: aTime2, proto: pdoc, c: c, } @@ -62,12 +71,17 @@ func TestDocGet(t *testing.T) { t.Fatalf("\ngot %+v\nwant %+v", gotDoc, wantDoc) } + path2 := "projects/projectID/databases/(default)/documents/C/b" srv.addRPC( - &pb.GetDocumentRequest{ - Name: "projects/projectID/databases/(default)/documents/C/b", - }, - grpc.Errorf(codes.NotFound, "not found"), - ) + &pb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: []string{path2}, + }, []interface{}{ + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Missing{path2}, + ReadTime: aTimestamp3, + }, + }) _, err = c.Collection("C").Doc("b").Get(ctx) if grpc.Code(err) != codes.NotFound { t.Errorf("got %v, want NotFound", err) diff --git a/vendor/cloud.google.com/go/firestore/document.go b/vendor/cloud.google.com/go/firestore/document.go index 4a650f4cf..093e00612 100644 --- a/vendor/cloud.google.com/go/firestore/document.go +++ b/vendor/cloud.google.com/go/firestore/document.go @@ -23,6 +23,7 @@ import ( pb "google.golang.org/genproto/googleapis/firestore/v1beta1" "github.com/golang/protobuf/ptypes" + tspb "github.com/golang/protobuf/ptypes/timestamp" ) // A DocumentSnapshot contains document data and metadata. @@ -42,6 +43,9 @@ type DocumentSnapshot struct { // documents and the read time of a query. UpdateTime time.Time + // Read-only. The time at which the document was read. + ReadTime time.Time + c *Client proto *pb.Document } @@ -241,7 +245,7 @@ func extractTransformPathsFromStruct(v reflect.Value, prefix FieldPath) ([]Field return paths, nil } -func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client) (*DocumentSnapshot, error) { +func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client, readTime *tspb.Timestamp) (*DocumentSnapshot, error) { d := &DocumentSnapshot{ Ref: ref, c: c, @@ -257,5 +261,13 @@ func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client) (*Docu return nil, err } d.UpdateTime = ts + // TODO(jba): remove nil check when all callers pass a read time. + if readTime != nil { + ts, err = ptypes.Timestamp(readTime) + if err != nil { + return nil, err + } + d.ReadTime = ts + } return d, nil } diff --git a/vendor/cloud.google.com/go/firestore/document_test.go b/vendor/cloud.google.com/go/firestore/document_test.go index df3ffda07..982b41a2c 100644 --- a/vendor/cloud.google.com/go/firestore/document_test.go +++ b/vendor/cloud.google.com/go/firestore/document_test.go @@ -69,10 +69,11 @@ func TestNewDocumentSnapshot(t *testing.T) { Ref: docRef, CreateTime: time.Unix(10, 0).UTC(), UpdateTime: time.Unix(20, 0).UTC(), + ReadTime: aTime, proto: in, c: c, } - got, err := newDocumentSnapshot(docRef, in, c) + got, err := newDocumentSnapshot(docRef, in, c, aTimestamp) if err != nil { t.Fatal(err) } diff --git a/vendor/cloud.google.com/go/firestore/from_value.go b/vendor/cloud.google.com/go/firestore/from_value.go index cc940b681..e3d79bf3c 100644 --- a/vendor/cloud.google.com/go/firestore/from_value.go +++ b/vendor/cloud.google.com/go/firestore/from_value.go @@ -73,6 +73,14 @@ func setReflectFromProtoValue(v reflect.Value, vproto *pb.Value, c *Client) erro v.Set(reflect.ValueOf(t)) return nil + case typeOfProtoTimestamp: + x, ok := val.(*pb.Value_TimestampValue) + if !ok { + return typeErr() + } + v.Set(reflect.ValueOf(x.TimestampValue)) + return nil + case typeOfLatLng: x, ok := val.(*pb.Value_GeoPointValue) if !ok { diff --git a/vendor/cloud.google.com/go/firestore/from_value_test.go b/vendor/cloud.google.com/go/firestore/from_value_test.go index c5a1b8b07..1327fb476 100644 --- a/vendor/cloud.google.com/go/firestore/from_value_test.go +++ b/vendor/cloud.google.com/go/firestore/from_value_test.go @@ -24,14 +24,16 @@ import ( "testing" "time" + ts "github.com/golang/protobuf/ptypes/timestamp" pb "google.golang.org/genproto/googleapis/firestore/v1beta1" "google.golang.org/genproto/googleapis/type/latlng" ) var ( - tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC) - ll = &latlng.LatLng{Latitude: 20, Longitude: 30} + tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC) + ll = &latlng.LatLng{Latitude: 20, Longitude: 30} + ptm = &ts.Timestamp{12345, 67890} ) func TestCreateFromProtoValue(t *testing.T) { @@ -187,6 +189,7 @@ func TestSetFromProtoValueNoJSON(t *testing.T) { bs []byte tmi time.Time lli *latlng.LatLng + tmp *ts.Timestamp ) bytes := []byte{1, 2, 3} @@ -197,6 +200,7 @@ func TestSetFromProtoValueNoJSON(t *testing.T) { }{ {&bs, bytesval(bytes), bytes}, {&tmi, tsval(tm), tm}, + {&tmp, &pb.Value{&pb.Value_TimestampValue{ptm}}, ptm}, {&lli, geoval(ll), ll}, } { if err := setFromProtoValue(test.in, test.val, &Client{}); err != nil { diff --git a/vendor/cloud.google.com/go/firestore/genproto/README.md b/vendor/cloud.google.com/go/firestore/genproto/README.md deleted file mode 100644 index 146e7f465..000000000 --- a/vendor/cloud.google.com/go/firestore/genproto/README.md +++ /dev/null @@ -1,3 +0,0 @@ -The contents of this directory are copied from -github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/genproto. - diff --git a/vendor/cloud.google.com/go/firestore/genproto/test.pb.go b/vendor/cloud.google.com/go/firestore/genproto/test.pb.go index 39f35e49d..fe6973d81 100644 --- a/vendor/cloud.google.com/go/firestore/genproto/test.pb.go +++ b/vendor/cloud.google.com/go/firestore/genproto/test.pb.go @@ -9,6 +9,7 @@ It is generated from these files: test.proto It has these top-level messages: + TestSuite Test GetTest CreateTest @@ -17,6 +18,13 @@ It has these top-level messages: UpdatePathsTest DeleteTest SetOption + QueryTest + Clause + Select + Where + OrderBy + Cursor + DocSnapshot FieldPath */ package tests @@ -26,6 +34,7 @@ import fmt "fmt" import math "math" import google_firestore_v1beta14 "google.golang.org/genproto/googleapis/firestore/v1beta1" import google_firestore_v1beta1 "google.golang.org/genproto/googleapis/firestore/v1beta1" +import google_firestore_v1beta12 "google.golang.org/genproto/googleapis/firestore/v1beta1" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -38,6 +47,23 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// A collection of tests. +type TestSuite struct { + Tests []*Test `protobuf:"bytes,1,rep,name=tests" json:"tests,omitempty"` +} + +func (m *TestSuite) Reset() { *m = TestSuite{} } +func (m *TestSuite) String() string { return proto.CompactTextString(m) } +func (*TestSuite) ProtoMessage() {} +func (*TestSuite) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *TestSuite) GetTests() []*Test { + if m != nil { + return m.Tests + } + return nil +} + // A Test describes a single client method call and its expected result. type Test struct { Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` @@ -48,13 +74,14 @@ type Test struct { // *Test_Update // *Test_UpdatePaths // *Test_Delete + // *Test_Query Test isTest_Test `protobuf_oneof:"test"` } func (m *Test) Reset() { *m = Test{} } func (m *Test) String() string { return proto.CompactTextString(m) } func (*Test) ProtoMessage() {} -func (*Test) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Test) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } type isTest_Test interface { isTest_Test() @@ -78,6 +105,9 @@ type Test_UpdatePaths struct { type Test_Delete struct { Delete *DeleteTest `protobuf:"bytes,7,opt,name=delete,oneof"` } +type Test_Query struct { + Query *QueryTest `protobuf:"bytes,8,opt,name=query,oneof"` +} func (*Test_Get) isTest_Test() {} func (*Test_Create) isTest_Test() {} @@ -85,6 +115,7 @@ func (*Test_Set) isTest_Test() {} func (*Test_Update) isTest_Test() {} func (*Test_UpdatePaths) isTest_Test() {} func (*Test_Delete) isTest_Test() {} +func (*Test_Query) isTest_Test() {} func (m *Test) GetTest() isTest_Test { if m != nil { @@ -142,6 +173,13 @@ func (m *Test) GetDelete() *DeleteTest { return nil } +func (m *Test) GetQuery() *QueryTest { + if x, ok := m.GetTest().(*Test_Query); ok { + return x.Query + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*Test) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _Test_OneofMarshaler, _Test_OneofUnmarshaler, _Test_OneofSizer, []interface{}{ @@ -151,6 +189,7 @@ func (*Test) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, f (*Test_Update)(nil), (*Test_UpdatePaths)(nil), (*Test_Delete)(nil), + (*Test_Query)(nil), } } @@ -188,6 +227,11 @@ func _Test_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.Delete); err != nil { return err } + case *Test_Query: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Query); err != nil { + return err + } case nil: default: return fmt.Errorf("Test.Test has unexpected type %T", x) @@ -246,6 +290,14 @@ func _Test_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) ( err := b.DecodeMessage(msg) m.Test = &Test_Delete{msg} return true, err + case 8: // test.query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Query{msg} + return true, err default: return false, nil } @@ -285,6 +337,11 @@ func _Test_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(7<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *Test_Query: + s := proto.Size(x.Query) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -303,7 +360,7 @@ type GetTest struct { func (m *GetTest) Reset() { *m = GetTest{} } func (m *GetTest) String() string { return proto.CompactTextString(m) } func (*GetTest) ProtoMessage() {} -func (*GetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*GetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *GetTest) GetDocRefPath() string { if m != nil { @@ -337,7 +394,7 @@ type CreateTest struct { func (m *CreateTest) Reset() { *m = CreateTest{} } func (m *CreateTest) String() string { return proto.CompactTextString(m) } func (*CreateTest) ProtoMessage() {} -func (*CreateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*CreateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *CreateTest) GetDocRefPath() string { if m != nil { @@ -379,7 +436,7 @@ type SetTest struct { func (m *SetTest) Reset() { *m = SetTest{} } func (m *SetTest) String() string { return proto.CompactTextString(m) } func (*SetTest) ProtoMessage() {} -func (*SetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*SetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *SetTest) GetDocRefPath() string { if m != nil { @@ -429,7 +486,7 @@ type UpdateTest struct { func (m *UpdateTest) Reset() { *m = UpdateTest{} } func (m *UpdateTest) String() string { return proto.CompactTextString(m) } func (*UpdateTest) ProtoMessage() {} -func (*UpdateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*UpdateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *UpdateTest) GetDocRefPath() string { if m != nil { @@ -481,7 +538,7 @@ type UpdatePathsTest struct { func (m *UpdatePathsTest) Reset() { *m = UpdatePathsTest{} } func (m *UpdatePathsTest) String() string { return proto.CompactTextString(m) } func (*UpdatePathsTest) ProtoMessage() {} -func (*UpdatePathsTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*UpdatePathsTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *UpdatePathsTest) GetDocRefPath() string { if m != nil { @@ -536,7 +593,7 @@ type DeleteTest struct { func (m *DeleteTest) Reset() { *m = DeleteTest{} } func (m *DeleteTest) String() string { return proto.CompactTextString(m) } func (*DeleteTest) ProtoMessage() {} -func (*DeleteTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*DeleteTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *DeleteTest) GetDocRefPath() string { if m != nil { @@ -575,7 +632,7 @@ type SetOption struct { func (m *SetOption) Reset() { *m = SetOption{} } func (m *SetOption) String() string { return proto.CompactTextString(m) } func (*SetOption) ProtoMessage() {} -func (*SetOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*SetOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *SetOption) GetAll() bool { if m != nil { @@ -591,7 +648,495 @@ func (m *SetOption) GetFields() []*FieldPath { return nil } -// A field path. +type QueryTest struct { + CollPath string `protobuf:"bytes,1,opt,name=coll_path,json=collPath" json:"coll_path,omitempty"` + Clauses []*Clause `protobuf:"bytes,2,rep,name=clauses" json:"clauses,omitempty"` + Query *google_firestore_v1beta12.StructuredQuery `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"` + IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *QueryTest) Reset() { *m = QueryTest{} } +func (m *QueryTest) String() string { return proto.CompactTextString(m) } +func (*QueryTest) ProtoMessage() {} +func (*QueryTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *QueryTest) GetCollPath() string { + if m != nil { + return m.CollPath + } + return "" +} + +func (m *QueryTest) GetClauses() []*Clause { + if m != nil { + return m.Clauses + } + return nil +} + +func (m *QueryTest) GetQuery() *google_firestore_v1beta12.StructuredQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *QueryTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +type Clause struct { + // Types that are valid to be assigned to Clause: + // *Clause_Select + // *Clause_Where + // *Clause_OrderBy + // *Clause_Offset + // *Clause_Limit + // *Clause_StartAt + // *Clause_StartAfter + // *Clause_EndAt + // *Clause_EndBefore + Clause isClause_Clause `protobuf_oneof:"clause"` +} + +func (m *Clause) Reset() { *m = Clause{} } +func (m *Clause) String() string { return proto.CompactTextString(m) } +func (*Clause) ProtoMessage() {} +func (*Clause) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type isClause_Clause interface { + isClause_Clause() +} + +type Clause_Select struct { + Select *Select `protobuf:"bytes,1,opt,name=select,oneof"` +} +type Clause_Where struct { + Where *Where `protobuf:"bytes,2,opt,name=where,oneof"` +} +type Clause_OrderBy struct { + OrderBy *OrderBy `protobuf:"bytes,3,opt,name=order_by,json=orderBy,oneof"` +} +type Clause_Offset struct { + Offset int32 `protobuf:"varint,4,opt,name=offset,oneof"` +} +type Clause_Limit struct { + Limit int32 `protobuf:"varint,5,opt,name=limit,oneof"` +} +type Clause_StartAt struct { + StartAt *Cursor `protobuf:"bytes,6,opt,name=start_at,json=startAt,oneof"` +} +type Clause_StartAfter struct { + StartAfter *Cursor `protobuf:"bytes,7,opt,name=start_after,json=startAfter,oneof"` +} +type Clause_EndAt struct { + EndAt *Cursor `protobuf:"bytes,8,opt,name=end_at,json=endAt,oneof"` +} +type Clause_EndBefore struct { + EndBefore *Cursor `protobuf:"bytes,9,opt,name=end_before,json=endBefore,oneof"` +} + +func (*Clause_Select) isClause_Clause() {} +func (*Clause_Where) isClause_Clause() {} +func (*Clause_OrderBy) isClause_Clause() {} +func (*Clause_Offset) isClause_Clause() {} +func (*Clause_Limit) isClause_Clause() {} +func (*Clause_StartAt) isClause_Clause() {} +func (*Clause_StartAfter) isClause_Clause() {} +func (*Clause_EndAt) isClause_Clause() {} +func (*Clause_EndBefore) isClause_Clause() {} + +func (m *Clause) GetClause() isClause_Clause { + if m != nil { + return m.Clause + } + return nil +} + +func (m *Clause) GetSelect() *Select { + if x, ok := m.GetClause().(*Clause_Select); ok { + return x.Select + } + return nil +} + +func (m *Clause) GetWhere() *Where { + if x, ok := m.GetClause().(*Clause_Where); ok { + return x.Where + } + return nil +} + +func (m *Clause) GetOrderBy() *OrderBy { + if x, ok := m.GetClause().(*Clause_OrderBy); ok { + return x.OrderBy + } + return nil +} + +func (m *Clause) GetOffset() int32 { + if x, ok := m.GetClause().(*Clause_Offset); ok { + return x.Offset + } + return 0 +} + +func (m *Clause) GetLimit() int32 { + if x, ok := m.GetClause().(*Clause_Limit); ok { + return x.Limit + } + return 0 +} + +func (m *Clause) GetStartAt() *Cursor { + if x, ok := m.GetClause().(*Clause_StartAt); ok { + return x.StartAt + } + return nil +} + +func (m *Clause) GetStartAfter() *Cursor { + if x, ok := m.GetClause().(*Clause_StartAfter); ok { + return x.StartAfter + } + return nil +} + +func (m *Clause) GetEndAt() *Cursor { + if x, ok := m.GetClause().(*Clause_EndAt); ok { + return x.EndAt + } + return nil +} + +func (m *Clause) GetEndBefore() *Cursor { + if x, ok := m.GetClause().(*Clause_EndBefore); ok { + return x.EndBefore + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Clause) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Clause_OneofMarshaler, _Clause_OneofUnmarshaler, _Clause_OneofSizer, []interface{}{ + (*Clause_Select)(nil), + (*Clause_Where)(nil), + (*Clause_OrderBy)(nil), + (*Clause_Offset)(nil), + (*Clause_Limit)(nil), + (*Clause_StartAt)(nil), + (*Clause_StartAfter)(nil), + (*Clause_EndAt)(nil), + (*Clause_EndBefore)(nil), + } +} + +func _Clause_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Clause) + // clause + switch x := m.Clause.(type) { + case *Clause_Select: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Select); err != nil { + return err + } + case *Clause_Where: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Where); err != nil { + return err + } + case *Clause_OrderBy: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.OrderBy); err != nil { + return err + } + case *Clause_Offset: + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Offset)) + case *Clause_Limit: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Limit)) + case *Clause_StartAt: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StartAt); err != nil { + return err + } + case *Clause_StartAfter: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StartAfter); err != nil { + return err + } + case *Clause_EndAt: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndAt); err != nil { + return err + } + case *Clause_EndBefore: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.EndBefore); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Clause.Clause has unexpected type %T", x) + } + return nil +} + +func _Clause_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Clause) + switch tag { + case 1: // clause.select + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Select) + err := b.DecodeMessage(msg) + m.Clause = &Clause_Select{msg} + return true, err + case 2: // clause.where + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Where) + err := b.DecodeMessage(msg) + m.Clause = &Clause_Where{msg} + return true, err + case 3: // clause.order_by + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(OrderBy) + err := b.DecodeMessage(msg) + m.Clause = &Clause_OrderBy{msg} + return true, err + case 4: // clause.offset + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Clause = &Clause_Offset{int32(x)} + return true, err + case 5: // clause.limit + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Clause = &Clause_Limit{int32(x)} + return true, err + case 6: // clause.start_at + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cursor) + err := b.DecodeMessage(msg) + m.Clause = &Clause_StartAt{msg} + return true, err + case 7: // clause.start_after + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cursor) + err := b.DecodeMessage(msg) + m.Clause = &Clause_StartAfter{msg} + return true, err + case 8: // clause.end_at + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cursor) + err := b.DecodeMessage(msg) + m.Clause = &Clause_EndAt{msg} + return true, err + case 9: // clause.end_before + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cursor) + err := b.DecodeMessage(msg) + m.Clause = &Clause_EndBefore{msg} + return true, err + default: + return false, nil + } +} + +func _Clause_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Clause) + // clause + switch x := m.Clause.(type) { + case *Clause_Select: + s := proto.Size(x.Select) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_Where: + s := proto.Size(x.Where) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_OrderBy: + s := proto.Size(x.OrderBy) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_Offset: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Offset)) + case *Clause_Limit: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Limit)) + case *Clause_StartAt: + s := proto.Size(x.StartAt) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_StartAfter: + s := proto.Size(x.StartAfter) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_EndAt: + s := proto.Size(x.EndAt) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Clause_EndBefore: + s := proto.Size(x.EndBefore) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Select struct { + Fields []*FieldPath `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"` +} + +func (m *Select) Reset() { *m = Select{} } +func (m *Select) String() string { return proto.CompactTextString(m) } +func (*Select) ProtoMessage() {} +func (*Select) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Select) GetFields() []*FieldPath { + if m != nil { + return m.Fields + } + return nil +} + +type Where struct { + Path *FieldPath `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Op string `protobuf:"bytes,2,opt,name=op" json:"op,omitempty"` + JsonValue string `protobuf:"bytes,3,opt,name=json_value,json=jsonValue" json:"json_value,omitempty"` +} + +func (m *Where) Reset() { *m = Where{} } +func (m *Where) String() string { return proto.CompactTextString(m) } +func (*Where) ProtoMessage() {} +func (*Where) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *Where) GetPath() *FieldPath { + if m != nil { + return m.Path + } + return nil +} + +func (m *Where) GetOp() string { + if m != nil { + return m.Op + } + return "" +} + +func (m *Where) GetJsonValue() string { + if m != nil { + return m.JsonValue + } + return "" +} + +type OrderBy struct { + Path *FieldPath `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Direction string `protobuf:"bytes,2,opt,name=direction" json:"direction,omitempty"` +} + +func (m *OrderBy) Reset() { *m = OrderBy{} } +func (m *OrderBy) String() string { return proto.CompactTextString(m) } +func (*OrderBy) ProtoMessage() {} +func (*OrderBy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *OrderBy) GetPath() *FieldPath { + if m != nil { + return m.Path + } + return nil +} + +func (m *OrderBy) GetDirection() string { + if m != nil { + return m.Direction + } + return "" +} + +type Cursor struct { + // one of: + DocSnapshot *DocSnapshot `protobuf:"bytes,1,opt,name=doc_snapshot,json=docSnapshot" json:"doc_snapshot,omitempty"` + JsonValues []string `protobuf:"bytes,2,rep,name=json_values,json=jsonValues" json:"json_values,omitempty"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} +func (*Cursor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *Cursor) GetDocSnapshot() *DocSnapshot { + if m != nil { + return m.DocSnapshot + } + return nil +} + +func (m *Cursor) GetJsonValues() []string { + if m != nil { + return m.JsonValues + } + return nil +} + +type DocSnapshot struct { + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + JsonData string `protobuf:"bytes,2,opt,name=json_data,json=jsonData" json:"json_data,omitempty"` +} + +func (m *DocSnapshot) Reset() { *m = DocSnapshot{} } +func (m *DocSnapshot) String() string { return proto.CompactTextString(m) } +func (*DocSnapshot) ProtoMessage() {} +func (*DocSnapshot) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *DocSnapshot) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *DocSnapshot) GetJsonData() string { + if m != nil { + return m.JsonData + } + return "" +} + type FieldPath struct { Field []string `protobuf:"bytes,1,rep,name=field" json:"field,omitempty"` } @@ -599,7 +1144,7 @@ type FieldPath struct { func (m *FieldPath) Reset() { *m = FieldPath{} } func (m *FieldPath) String() string { return proto.CompactTextString(m) } func (*FieldPath) ProtoMessage() {} -func (*FieldPath) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*FieldPath) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *FieldPath) GetField() []string { if m != nil { @@ -609,6 +1154,7 @@ func (m *FieldPath) GetField() []string { } func init() { + proto.RegisterType((*TestSuite)(nil), "tests.TestSuite") proto.RegisterType((*Test)(nil), "tests.Test") proto.RegisterType((*GetTest)(nil), "tests.GetTest") proto.RegisterType((*CreateTest)(nil), "tests.CreateTest") @@ -617,46 +1163,81 @@ func init() { proto.RegisterType((*UpdatePathsTest)(nil), "tests.UpdatePathsTest") proto.RegisterType((*DeleteTest)(nil), "tests.DeleteTest") proto.RegisterType((*SetOption)(nil), "tests.SetOption") + proto.RegisterType((*QueryTest)(nil), "tests.QueryTest") + proto.RegisterType((*Clause)(nil), "tests.Clause") + proto.RegisterType((*Select)(nil), "tests.Select") + proto.RegisterType((*Where)(nil), "tests.Where") + proto.RegisterType((*OrderBy)(nil), "tests.OrderBy") + proto.RegisterType((*Cursor)(nil), "tests.Cursor") + proto.RegisterType((*DocSnapshot)(nil), "tests.DocSnapshot") proto.RegisterType((*FieldPath)(nil), "tests.FieldPath") } func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 559 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x55, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xc5, 0x71, 0xe2, 0x24, 0x93, 0x08, 0xca, 0x0a, 0x21, 0x53, 0x0e, 0x18, 0x4b, 0x40, 0x24, - 0x50, 0xaa, 0xc0, 0x91, 0x13, 0x34, 0xb4, 0x88, 0x0b, 0xd5, 0x16, 0xb8, 0x46, 0xae, 0x3d, 0x09, - 0x46, 0x8e, 0xd7, 0xec, 0xae, 0xfb, 0x9f, 0x38, 0x72, 0xe7, 0x47, 0x70, 0xe4, 0x8f, 0x70, 0x47, - 0xfb, 0xe1, 0xda, 0x06, 0x59, 0xca, 0xa1, 0xb4, 0xb7, 0xf5, 0x9b, 0x37, 0x1f, 0xef, 0xcd, 0x6e, - 0x02, 0x20, 0x51, 0xc8, 0x79, 0xc1, 0x99, 0x64, 0x64, 0xa0, 0xce, 0x62, 0x7f, 0xb6, 0x61, 0x6c, - 0x93, 0xe1, 0xc1, 0x3a, 0xe5, 0x28, 0x24, 0xe3, 0x78, 0x70, 0xbe, 0x38, 0x43, 0x19, 0x2d, 0x6a, - 0xc4, 0x24, 0xec, 0x3f, 0xea, 0x64, 0xc6, 0x6c, 0xbb, 0x65, 0xb9, 0xa1, 0x85, 0x3f, 0x7a, 0xd0, - 0xff, 0x80, 0x42, 0x92, 0x00, 0x26, 0x09, 0x8a, 0x98, 0xa7, 0x85, 0x4c, 0x59, 0xee, 0x3b, 0x81, - 0x33, 0x1b, 0xd3, 0x26, 0x44, 0x42, 0x70, 0x37, 0x28, 0xfd, 0x5e, 0xe0, 0xcc, 0x26, 0xcf, 0x6f, - 0xce, 0xf5, 0x40, 0xf3, 0x63, 0x94, 0x2a, 0xfd, 0xed, 0x0d, 0xaa, 0x82, 0xe4, 0x29, 0x78, 0x31, - 0xc7, 0x48, 0xa2, 0xef, 0x6a, 0xda, 0x6d, 0x4b, 0x3b, 0xd4, 0xa0, 0x65, 0x5a, 0x8a, 0x2a, 0x28, - 0x50, 0xfa, 0xfd, 0x56, 0xc1, 0xd3, 0xba, 0xa0, 0x30, 0x05, 0xcb, 0x22, 0x51, 0x05, 0x07, 0xad, - 0x82, 0x1f, 0x35, 0x58, 0x15, 0x34, 0x14, 0xf2, 0x12, 0xa6, 0xe6, 0xb4, 0x2a, 0x22, 0xf9, 0x59, - 0xf8, 0x9e, 0x4e, 0xb9, 0xdb, 0x4a, 0x39, 0x51, 0x11, 0x9b, 0x37, 0x29, 0x6b, 0x48, 0x75, 0x4a, - 0x30, 0x43, 0x89, 0xfe, 0xb0, 0xd5, 0x69, 0xa9, 0xc1, 0xaa, 0x93, 0xa1, 0xbc, 0xf6, 0xa0, 0xaf, - 0xa2, 0xa1, 0x80, 0xa1, 0x75, 0x80, 0x04, 0x30, 0x4d, 0x58, 0xbc, 0xe2, 0xb8, 0xd6, 0xdd, 0xad, - 0x83, 0x90, 0xb0, 0x98, 0xe2, 0x5a, 0xb5, 0x20, 0x47, 0x30, 0xe4, 0xf8, 0xb5, 0x44, 0x51, 0x99, - 0xf8, 0x6c, 0x6e, 0x96, 0x34, 0xaf, 0x97, 0x67, 0x97, 0xa4, 0x7c, 0x5d, 0xb2, 0xb8, 0xdc, 0x62, - 0x2e, 0xa9, 0xc9, 0xa1, 0x55, 0x72, 0xf8, 0xcd, 0x01, 0xa8, 0x0d, 0xdd, 0xa1, 0xf1, 0x7d, 0x18, - 0x7f, 0x11, 0x2c, 0x5f, 0x25, 0x91, 0x8c, 0x74, 0xeb, 0x31, 0x1d, 0x29, 0x60, 0x19, 0xc9, 0x88, - 0xbc, 0xaa, 0xa7, 0x32, 0x3b, 0x7b, 0xd2, 0x3d, 0xd5, 0x21, 0xdb, 0x6e, 0xd3, 0x7f, 0x06, 0x22, - 0xf7, 0x60, 0x94, 0x8a, 0x15, 0x72, 0xce, 0xb8, 0xde, 0xe6, 0x88, 0x0e, 0x53, 0xf1, 0x46, 0x7d, - 0x86, 0x3f, 0x1d, 0x18, 0x9e, 0xee, 0xec, 0xd0, 0x0c, 0x3c, 0x66, 0xee, 0x9f, 0x31, 0x68, 0xaf, - 0xbe, 0x14, 0xef, 0x35, 0x4e, 0x6d, 0xbc, 0x2d, 0xc9, 0xed, 0x96, 0xd4, 0xbf, 0x04, 0x49, 0x83, - 0xb6, 0xa4, 0xdf, 0x0e, 0x40, 0x7d, 0xfd, 0x76, 0x50, 0xf5, 0x0e, 0xa6, 0x05, 0xc7, 0x98, 0xe5, - 0x49, 0xda, 0xd0, 0xf6, 0xb8, 0x7b, 0xa6, 0x93, 0x06, 0x9b, 0xb6, 0x72, 0xaf, 0x53, 0xf7, 0xf7, - 0x1e, 0xdc, 0xfa, 0xeb, 0x0d, 0x5d, 0xb1, 0xf8, 0x05, 0x4c, 0xd6, 0x29, 0x66, 0x89, 0x7d, 0xde, - 0x6e, 0xe0, 0x36, 0xee, 0xc8, 0x91, 0x8a, 0xa8, 0x96, 0x14, 0xd6, 0xd5, 0x51, 0x90, 0x07, 0x30, - 0xd1, 0x7e, 0x9d, 0x47, 0x59, 0x89, 0xc2, 0xef, 0x07, 0xae, 0x9a, 0x4f, 0x41, 0x9f, 0x34, 0xd2, - 0xf4, 0x6c, 0x70, 0x09, 0x9e, 0x79, 0x6d, 0xcf, 0x7e, 0x39, 0x00, 0xf5, 0x0f, 0xc8, 0x15, 0xdb, - 0xf5, 0x7f, 0x5f, 0xf6, 0x31, 0x8c, 0x2f, 0x9e, 0x25, 0xd9, 0x03, 0x37, 0xca, 0x32, 0xad, 0x67, - 0x44, 0xd5, 0x51, 0x3d, 0x65, 0xbd, 0x06, 0xe1, 0xf7, 0x3a, 0xd6, 0x64, 0xe3, 0xe1, 0x43, 0x18, - 0x5f, 0x80, 0xe4, 0x0e, 0x0c, 0x34, 0xec, 0x3b, 0x7a, 0x53, 0xe6, 0xe3, 0xcc, 0xd3, 0x7f, 0x56, - 0x2f, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x8e, 0x38, 0xdd, 0x12, 0x07, 0x00, 0x00, + // 994 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x56, 0x5f, 0x6f, 0xdc, 0x44, + 0x10, 0xaf, 0x7d, 0x67, 0x9f, 0x3d, 0x0e, 0xa5, 0xac, 0x50, 0x65, 0x0a, 0x88, 0xab, 0x15, 0x92, + 0x83, 0xa2, 0x2b, 0x09, 0xe2, 0x09, 0x09, 0x94, 0x4b, 0x48, 0x2a, 0xa4, 0xaa, 0xc1, 0x57, 0xe0, + 0x05, 0xe9, 0x70, 0xec, 0x71, 0x62, 0xe4, 0xf3, 0x5e, 0x77, 0xd7, 0x45, 0xfd, 0x48, 0x20, 0xf1, + 0xc0, 0x37, 0xe1, 0x91, 0x4f, 0xc0, 0x37, 0x80, 0x67, 0xb4, 0x7f, 0x7c, 0xb6, 0xd3, 0x5c, 0xc9, + 0x43, 0x29, 0x6f, 0xbb, 0x33, 0xbf, 0x99, 0x9d, 0xf9, 0xcd, 0xec, 0xec, 0x02, 0x08, 0xe4, 0x62, + 0xba, 0x62, 0x54, 0x50, 0xe2, 0xc8, 0x35, 0xbf, 0x33, 0x39, 0xa7, 0xf4, 0xbc, 0xc4, 0xfb, 0x79, + 0xc1, 0x90, 0x0b, 0xca, 0xf0, 0xfe, 0xd3, 0xbd, 0x33, 0x14, 0xc9, 0x5e, 0x2b, 0xd1, 0x06, 0x77, + 0xde, 0xdf, 0x88, 0x4c, 0xe9, 0x72, 0x49, 0x2b, 0x03, 0xdb, 0xde, 0x08, 0x7b, 0x52, 0x23, 0x7b, + 0xa6, 0x51, 0xd1, 0x14, 0xfc, 0xc7, 0xc8, 0xc5, 0xbc, 0x2e, 0x04, 0x92, 0xbb, 0xa0, 0x83, 0x09, + 0xad, 0xf1, 0x60, 0x12, 0xec, 0x07, 0x53, 0xb5, 0x9b, 0x4a, 0x40, 0xac, 0x35, 0xd1, 0x9f, 0x36, + 0x0c, 0xe5, 0x9e, 0x8c, 0x21, 0xc8, 0x90, 0xa7, 0xac, 0x58, 0x89, 0x82, 0x56, 0xa1, 0x35, 0xb6, + 0x26, 0x7e, 0xdc, 0x15, 0x91, 0x08, 0x06, 0xe7, 0x28, 0x42, 0x7b, 0x6c, 0x4d, 0x82, 0xfd, 0x9b, + 0xc6, 0xd7, 0x09, 0x0a, 0x69, 0xfe, 0xe0, 0x46, 0x2c, 0x95, 0xe4, 0x1e, 0xb8, 0x29, 0xc3, 0x44, + 0x60, 0x38, 0x50, 0xb0, 0x37, 0x0c, 0xec, 0x50, 0x09, 0x0d, 0xd2, 0x40, 0xa4, 0x43, 0x8e, 0x22, + 0x1c, 0xf6, 0x1c, 0xce, 0x5b, 0x87, 0x5c, 0x3b, 0xac, 0x57, 0x99, 0x74, 0xe8, 0xf4, 0x1c, 0x7e, + 0xa3, 0x84, 0x8d, 0x43, 0x0d, 0x21, 0x9f, 0xc1, 0x96, 0x5e, 0x2d, 0x56, 0x89, 0xb8, 0xe0, 0xa1, + 0xab, 0x4c, 0x6e, 0xf7, 0x4c, 0x4e, 0xa5, 0xc6, 0xd8, 0x05, 0x75, 0x2b, 0x92, 0x27, 0x65, 0x58, + 0xa2, 0xc0, 0x70, 0xd4, 0x3b, 0xe9, 0x48, 0x09, 0x9b, 0x93, 0x34, 0x84, 0x4c, 0xc0, 0x51, 0xac, + 0x87, 0x9e, 0xc2, 0xde, 0x32, 0xd8, 0xaf, 0xa5, 0xcc, 0x40, 0x35, 0x60, 0xe6, 0xc2, 0x50, 0xea, + 0x22, 0x0e, 0x23, 0xc3, 0x15, 0x19, 0xc3, 0x56, 0x46, 0xd3, 0x05, 0xc3, 0x5c, 0xc5, 0x69, 0xb8, + 0x86, 0x8c, 0xa6, 0x31, 0xe6, 0x32, 0x18, 0x72, 0x0c, 0x23, 0x86, 0x4f, 0x6a, 0xe4, 0x0d, 0xdd, + 0x1f, 0x4d, 0x75, 0xf5, 0xa7, 0x6d, 0xf3, 0x98, 0xea, 0xcb, 0x0a, 0x1c, 0xd1, 0xb4, 0x5e, 0x62, + 0x25, 0x62, 0x6d, 0x13, 0x37, 0xc6, 0xd1, 0xcf, 0x16, 0x40, 0x4b, 0xfd, 0x35, 0x0e, 0x7e, 0x1b, + 0xfc, 0x1f, 0x39, 0xad, 0x16, 0x59, 0x22, 0x12, 0x75, 0xb4, 0x1f, 0x7b, 0x52, 0x70, 0x94, 0x88, + 0x84, 0x1c, 0xb4, 0x51, 0xe9, 0xea, 0xee, 0x6e, 0x8e, 0xea, 0x90, 0x2e, 0x97, 0xc5, 0x73, 0x01, + 0x91, 0xb7, 0xc0, 0x2b, 0xf8, 0x02, 0x19, 0xa3, 0x4c, 0xd5, 0xdd, 0x8b, 0x47, 0x05, 0xff, 0x52, + 0x6e, 0xa3, 0xdf, 0x2d, 0x18, 0xcd, 0xaf, 0xcd, 0xd0, 0x04, 0x5c, 0xaa, 0x3b, 0xd5, 0xee, 0x55, + 0x60, 0x8e, 0xe2, 0x91, 0x92, 0xc7, 0x46, 0xdf, 0x4f, 0x69, 0xb0, 0x39, 0xa5, 0xe1, 0x4b, 0x48, + 0xc9, 0xe9, 0xa7, 0xf4, 0x97, 0x05, 0xd0, 0x36, 0xea, 0x35, 0xb2, 0xfa, 0x0a, 0xb6, 0x56, 0x0c, + 0x53, 0x5a, 0x65, 0x45, 0x27, 0xb7, 0x9d, 0xcd, 0x31, 0x9d, 0x76, 0xd0, 0x71, 0xcf, 0xf6, 0xff, + 0xcc, 0xfb, 0x37, 0x1b, 0x5e, 0xbf, 0x74, 0xdb, 0x5e, 0x71, 0xf2, 0x7b, 0x10, 0xe4, 0x05, 0x96, + 0x99, 0x19, 0x04, 0x03, 0x35, 0xff, 0x9a, 0x1e, 0x39, 0x96, 0x1a, 0x79, 0x64, 0x0c, 0x79, 0xb3, + 0xe4, 0xe4, 0x3d, 0x08, 0x14, 0x5f, 0x4f, 0x93, 0xb2, 0x46, 0x1e, 0x0e, 0xc7, 0x03, 0x19, 0x9f, + 0x14, 0x7d, 0xab, 0x24, 0x5d, 0xce, 0x9c, 0x97, 0xc0, 0x99, 0xdb, 0xe7, 0xec, 0x0f, 0x0b, 0xa0, + 0x1d, 0x35, 0xaf, 0x98, 0xae, 0xff, 0xf6, 0x66, 0x9f, 0x80, 0xbf, 0xbe, 0x96, 0xe4, 0x16, 0x0c, + 0x92, 0xb2, 0x54, 0xf9, 0x78, 0xb1, 0x5c, 0xca, 0xab, 0xac, 0xca, 0xc0, 0x43, 0x7b, 0x43, 0x99, + 0x8c, 0x3e, 0xfa, 0xd5, 0x02, 0x7f, 0x3d, 0x62, 0x65, 0x83, 0xa7, 0xb4, 0x2c, 0xbb, 0xfc, 0x78, + 0x52, 0xa0, 0xd8, 0xd9, 0x85, 0x51, 0x5a, 0x26, 0x35, 0xc7, 0xc6, 0xeb, 0x6b, 0xcd, 0x4b, 0xa4, + 0xa4, 0x71, 0xa3, 0x25, 0x5f, 0x34, 0x93, 0x5c, 0x27, 0xfe, 0xc1, 0xe6, 0xc4, 0xe7, 0x82, 0xd5, + 0xa9, 0xa8, 0x19, 0x66, 0x2a, 0x06, 0x33, 0xe0, 0x5f, 0x94, 0xf8, 0xdf, 0x36, 0xb8, 0xfa, 0x3c, + 0xb2, 0x0b, 0x2e, 0xc7, 0x12, 0x53, 0xa1, 0x22, 0x6d, 0xc3, 0x99, 0x2b, 0xa1, 0x7c, 0x59, 0xb4, + 0x9a, 0x6c, 0x83, 0xf3, 0xd3, 0x05, 0x32, 0x34, 0xf5, 0xdc, 0x32, 0xb8, 0xef, 0xa4, 0x4c, 0xbe, + 0x2a, 0x4a, 0x49, 0xee, 0x81, 0x47, 0x59, 0x86, 0x6c, 0x71, 0xd6, 0x04, 0xde, 0xbc, 0x9f, 0x8f, + 0xa4, 0x78, 0xf6, 0xec, 0xc1, 0x8d, 0x78, 0x44, 0xf5, 0x92, 0x84, 0xe0, 0xd2, 0x3c, 0x6f, 0x9e, + 0x5a, 0x47, 0x1e, 0xa6, 0xf7, 0xe4, 0x36, 0x38, 0x65, 0xb1, 0x2c, 0x74, 0x43, 0x4b, 0x85, 0xde, + 0x92, 0x0f, 0xc1, 0xe3, 0x22, 0x61, 0x62, 0x91, 0x08, 0xf3, 0x88, 0xae, 0xe9, 0xab, 0x19, 0xa7, + 0x4c, 0x7a, 0x57, 0x80, 0x03, 0x41, 0x3e, 0x86, 0xc0, 0x60, 0x73, 0x81, 0xcc, 0x3c, 0x9e, 0xcf, + 0xc1, 0x41, 0xc3, 0x25, 0x84, 0xec, 0x80, 0x8b, 0x55, 0x26, 0x7d, 0x7b, 0x57, 0x83, 0x1d, 0xac, + 0xb2, 0x03, 0x41, 0xa6, 0x00, 0x12, 0x77, 0x86, 0x39, 0x65, 0x18, 0xfa, 0x57, 0x63, 0x7d, 0xac, + 0xb2, 0x99, 0x42, 0xcc, 0x3c, 0x70, 0x75, 0x55, 0xa3, 0x7d, 0x70, 0x35, 0xb1, 0x9d, 0xe6, 0xb2, + 0xfe, 0xa5, 0xb9, 0xbe, 0x07, 0x47, 0x91, 0x4c, 0xb6, 0x61, 0xb8, 0x6e, 0xa9, 0xab, 0x0c, 0x94, + 0x96, 0xdc, 0x04, 0x9b, 0xae, 0xcc, 0x13, 0x69, 0xd3, 0x15, 0x79, 0x17, 0xa0, 0x1d, 0x1f, 0x66, + 0xde, 0xfa, 0xeb, 0xe9, 0x11, 0x3d, 0x84, 0x91, 0xa9, 0xcc, 0x35, 0xfd, 0xbf, 0x03, 0x7e, 0x56, + 0x30, 0x4c, 0xd7, 0x77, 0xdb, 0x8f, 0x5b, 0x41, 0xf4, 0x03, 0xb8, 0x9a, 0x01, 0xf2, 0xa9, 0x1e, + 0x14, 0xbc, 0x4a, 0x56, 0xfc, 0x82, 0x36, 0xed, 0x45, 0x9a, 0xcf, 0x0b, 0x4d, 0xe7, 0x46, 0x13, + 0x07, 0x59, 0xbb, 0xb9, 0x3c, 0xed, 0xec, 0xcb, 0xd3, 0x2e, 0xfa, 0x1c, 0x82, 0x8e, 0x31, 0x21, + 0x9d, 0xa0, 0x7d, 0x13, 0xe2, 0x8b, 0x3e, 0x0b, 0xd1, 0x5d, 0xf0, 0xd7, 0x29, 0x91, 0x37, 0xc1, + 0x51, 0x2c, 0xab, 0x22, 0xf8, 0xb1, 0xde, 0xcc, 0x1e, 0xc2, 0x4e, 0x4a, 0x97, 0xcd, 0x85, 0x4b, + 0x4b, 0x5a, 0x67, 0x9d, 0x6b, 0x97, 0xd2, 0x2a, 0xa7, 0x6c, 0x99, 0x54, 0x29, 0xfe, 0x62, 0x47, + 0x27, 0x1a, 0x74, 0xa8, 0x40, 0xc7, 0x6b, 0xd0, 0x63, 0x95, 0xe5, 0xa9, 0xfc, 0xfa, 0x9e, 0xb9, + 0xea, 0x07, 0xfc, 0xc9, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x96, 0x46, 0xb3, 0x8d, 0x0b, + 0x00, 0x00, } diff --git a/vendor/cloud.google.com/go/firestore/integration_test.go b/vendor/cloud.google.com/go/firestore/integration_test.go index bc3433781..24bd061c0 100644 --- a/vendor/cloud.google.com/go/firestore/integration_test.go +++ b/vendor/cloud.google.com/go/firestore/integration_test.go @@ -19,6 +19,7 @@ import ( "flag" "fmt" "log" + "math" "os" "sort" "testing" @@ -259,6 +260,7 @@ func TestIntegration_GetAll(t *testing.T) { for i := 0; i < 5; i++ { doc := coll.NewDoc() docRefs = append(docRefs, doc) + // TODO(jba): omit one create so we can test missing doc behavior. mustCreate("GetAll #1", t, doc, getAll{N: i}) } docSnapshots, err := iClient.GetAll(ctx, docRefs) @@ -277,6 +279,9 @@ func TestIntegration_GetAll(t *testing.T) { if got != want { t.Errorf("%d: got %+v, want %+v", i, got, want) } + if ds.ReadTime.IsZero() { + t.Errorf("%d: got zero read time", i) + } } } @@ -686,6 +691,38 @@ func TestIntegration_Query(t *testing.T) { } } +// Test unary filters. +func TestIntegration_QueryUnary(t *testing.T) { + ctx := context.Background() + coll := integrationColl(t) + mustCreate("q", t, coll.NewDoc(), map[string]interface{}{"x": 2, "q": "a"}) + mustCreate("q", t, coll.NewDoc(), map[string]interface{}{"x": 2, "q": nil}) + mustCreate("q", t, coll.NewDoc(), map[string]interface{}{"x": 2, "q": math.NaN()}) + wantNull := map[string]interface{}{"q": nil} + wantNaN := map[string]interface{}{"q": math.NaN()} + + base := coll.Select("q").Where("x", "==", 2) + for _, test := range []struct { + q Query + want map[string]interface{} + }{ + {base.Where("q", "==", nil), wantNull}, + {base.Where("q", "==", math.NaN()), wantNaN}, + } { + got, err := test.q.Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if len(got) != 1 { + t.Errorf("got %d responses, want 1", len(got)) + continue + } + if g, w := got[0].Data(), test.want; !testEqual(g, w) { + t.Errorf("%v: got %v, want %v", test.q, g, w) + } + } +} + // Test the special DocumentID field in queries. func TestIntegration_QueryName(t *testing.T) { ctx := context.Background() @@ -783,6 +820,7 @@ func TestIntegration_RunTransaction(t *testing.T) { } return anError } + mustCreate("RunTransaction", t, patDoc, pat) err := client.RunTransaction(ctx, incPat) if err != nil { @@ -813,6 +851,41 @@ func TestIntegration_RunTransaction(t *testing.T) { } } +func TestIntegration_TransactionGetAll(t *testing.T) { + ctx := context.Background() + type Player struct { + Name string + Score int + } + lee := Player{Name: "Lee", Score: 3} + sam := Player{Name: "Sam", Score: 1} + client := integrationClient(t) + leeDoc := iColl.Doc("lee") + samDoc := iColl.Doc("sam") + mustCreate("TransactionGetAll", t, leeDoc, lee) + mustCreate("TransactionGetAll", t, samDoc, sam) + + err := client.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + docs, err := tx.GetAll([]*DocumentRef{samDoc, leeDoc}) + if err != nil { + return err + } + for i, want := range []Player{sam, lee} { + var got Player + if err := docs[i].DataTo(&got); err != nil { + return err + } + if !testutil.Equal(got, want) { + return fmt.Errorf("got %+v, want %+v", got, want) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } +} + func codeEq(t *testing.T, msg string, code codes.Code, err error) { if grpc.Code(err) != code { t.Fatalf("%s:\ngot <%v>\nwant code %s", msg, err, code) diff --git a/vendor/cloud.google.com/go/firestore/internal/doc.template b/vendor/cloud.google.com/go/firestore/internal/doc.template index 705f6bf15..f5e196d3b 100644 --- a/vendor/cloud.google.com/go/firestore/internal/doc.template +++ b/vendor/cloud.google.com/go/firestore/internal/doc.template @@ -21,6 +21,9 @@ database. See https://cloud.google.com/firestore/docs for an introduction to Cloud Firestore and additional help on using the Firestore API. +Note: you can't use both Cloud Firestore and Cloud Datastore in the same +project. + Creating a Client To start working with this package, create a client with a project ID: diff --git a/vendor/cloud.google.com/go/firestore/mock_test.go b/vendor/cloud.google.com/go/firestore/mock_test.go index d0cdba9e9..7c5014728 100644 --- a/vendor/cloud.google.com/go/firestore/mock_test.go +++ b/vendor/cloud.google.com/go/firestore/mock_test.go @@ -18,9 +18,12 @@ package firestore import ( "fmt" + "strings" "cloud.google.com/go/internal/testutil" pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/empty" @@ -54,7 +57,10 @@ func newMockServer() (*mockServer, error) { // addRPC adds a (request, response) pair to the server's list of expected // interactions. The server will compare the incoming request with wantReq -// using proto.Equal. +// using proto.Equal. The response can be a message or an error. +// +// For the Listen RPC, resp should be a []interface{}, where each element +// is either ListenResponse or an error. // // Passing nil for wantReq disables the request check. func (s *mockServer) addRPC(wantReq proto.Message, resp interface{}) { @@ -174,3 +180,28 @@ func (s *mockServer) Rollback(_ context.Context, req *pb.RollbackRequest) (*empt } return res.(*empty.Empty), nil } + +func (s *mockServer) Listen(stream pb.Firestore_ListenServer) error { + req, err := stream.Recv() + if err != nil { + return err + } + responses, err := s.popRPC(req) + if err != nil { + if status.Code(err) == codes.Unknown && strings.Contains(err.Error(), "mockServer") { + // The stream will retry on Unknown, but we don't want that to happen if + // the error comes from us. + panic(err) + } + return err + } + for _, res := range responses.([]interface{}) { + if err, ok := res.(error); ok { + return err + } + if err := stream.Send(res.(*pb.ListenResponse)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/firestore/order.go b/vendor/cloud.google.com/go/firestore/order.go new file mode 100644 index 000000000..262c1ad9c --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/order.go @@ -0,0 +1,216 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "bytes" + "fmt" + "math" + "sort" + "strings" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +// Returns a negative number, zero, or a positive number depending on whether a is +// less than, equal to, or greater than b according to Firestore's ordering of +// values. +func compareValues(a, b *pb.Value) int { + ta := typeOrder(a) + tb := typeOrder(b) + if ta != tb { + return compareInt64s(int64(ta), int64(tb)) + } + switch a := a.ValueType.(type) { + case *pb.Value_NullValue: + return 0 // nulls are equal + + case *pb.Value_BooleanValue: + av := a.BooleanValue + bv := b.GetBooleanValue() + switch { + case av && !bv: + return 1 + case bv && !av: + return -1 + default: + return 0 + } + + case *pb.Value_IntegerValue: + return compareNumbers(float64(a.IntegerValue), toFloat(b)) + + case *pb.Value_DoubleValue: + return compareNumbers(a.DoubleValue, toFloat(b)) + + case *pb.Value_TimestampValue: + return compareTimestamps(a.TimestampValue, b.GetTimestampValue()) + + case *pb.Value_StringValue: + return strings.Compare(a.StringValue, b.GetStringValue()) + + case *pb.Value_BytesValue: + return bytes.Compare(a.BytesValue, b.GetBytesValue()) + + case *pb.Value_ReferenceValue: + return compareReferences(a.ReferenceValue, b.GetReferenceValue()) + + case *pb.Value_GeoPointValue: + ag := a.GeoPointValue + bg := b.GetGeoPointValue() + if ag.Latitude != bg.Latitude { + return compareFloat64s(ag.Latitude, bg.Latitude) + } + return compareFloat64s(ag.Longitude, bg.Longitude) + + case *pb.Value_ArrayValue: + return compareArrays(a.ArrayValue.Values, b.GetArrayValue().Values) + + case *pb.Value_MapValue: + return compareMaps(a.MapValue.Fields, b.GetMapValue().Fields) + + default: + panic(fmt.Sprintf("bad value type: %v", a)) + } +} + +// Treats NaN as less than any non-NaN. +func compareNumbers(a, b float64) int { + switch { + case math.IsNaN(a): + if math.IsNaN(b) { + return 0 + } + return -1 + case math.IsNaN(b): + return 1 + default: + return compareFloat64s(a, b) + } +} + +// Return v as a float64, assuming it's an Integer or Double. +func toFloat(v *pb.Value) float64 { + if x, ok := v.ValueType.(*pb.Value_IntegerValue); ok { + return float64(x.IntegerValue) + } + return v.GetDoubleValue() +} + +func compareTimestamps(a, b *tspb.Timestamp) int { + if c := compareInt64s(a.Seconds, b.Seconds); c != 0 { + return c + } + return compareInt64s(int64(a.Nanos), int64(b.Nanos)) +} + +func compareReferences(a, b string) int { + // Compare path components lexicographically. + pa := strings.Split(a, "/") + pb := strings.Split(b, "/") + for i := 0; i < len(pa) && i < len(pb); i++ { + if c := strings.Compare(pa[i], pb[i]); c != 0 { + return c + } + } + return compareInt64s(int64(len(pa)), int64(len(pb))) +} + +func compareArrays(a, b []*pb.Value) int { + for i := 0; i < len(a) && i < len(b); i++ { + if c := compareValues(a[i], b[i]); c != 0 { + return c + } + } + return compareInt64s(int64(len(a)), int64(len(b))) +} + +func compareMaps(a, b map[string]*pb.Value) int { + sortedKeys := func(m map[string]*pb.Value) []string { + var ks []string + for k := range m { + ks = append(ks, k) + } + sort.Strings(ks) + return ks + } + + aks := sortedKeys(a) + bks := sortedKeys(b) + for i := 0; i < len(aks) && i < len(bks); i++ { + if c := strings.Compare(aks[i], bks[i]); c != 0 { + return c + } + k := aks[i] + if c := compareValues(a[k], b[k]); c != 0 { + return c + } + } + return compareInt64s(int64(len(aks)), int64(len(bks))) +} + +func compareFloat64s(a, b float64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +func compareInt64s(a, b int64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +// Return an integer corresponding to the type of value stored in v, such that +// comparing the resulting integers gives the Firestore ordering for types. +func typeOrder(v *pb.Value) int { + switch v.ValueType.(type) { + case *pb.Value_NullValue: + return 0 + case *pb.Value_BooleanValue: + return 1 + case *pb.Value_IntegerValue: + return 2 + case *pb.Value_DoubleValue: + return 2 + case *pb.Value_TimestampValue: + return 3 + case *pb.Value_StringValue: + return 4 + case *pb.Value_BytesValue: + return 5 + case *pb.Value_ReferenceValue: + return 6 + case *pb.Value_GeoPointValue: + return 7 + case *pb.Value_ArrayValue: + return 8 + case *pb.Value_MapValue: + return 9 + default: + panic(fmt.Sprintf("bad value type: %v", v)) + } +} diff --git a/vendor/cloud.google.com/go/firestore/order_test.go b/vendor/cloud.google.com/go/firestore/order_test.go new file mode 100644 index 000000000..070237464 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/order_test.go @@ -0,0 +1,118 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "math" + "testing" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/genproto/googleapis/type/latlng" +) + +func TestCompareValues(t *testing.T) { + // Ordered list of values. + vals := []*pb.Value{ + nullValue, + boolval(false), + boolval(true), + floatval(math.NaN()), + floatval(math.Inf(-1)), + floatval(-math.MaxFloat64), + int64val(math.MinInt64), + floatval(-1.1), + intval(-1), + intval(0), + floatval(math.SmallestNonzeroFloat64), + intval(1), + floatval(1.1), + intval(2), + int64val(math.MaxInt64), + floatval(math.MaxFloat64), + floatval(math.Inf(1)), + tsval(time.Date(2016, 5, 20, 10, 20, 0, 0, time.UTC)), + tsval(time.Date(2016, 10, 21, 15, 32, 0, 0, time.UTC)), + strval(""), + strval("\u0000\ud7ff\ue000\uffff"), + strval("(╯°□°)╯︵ ┻━┻"), + strval("a"), + strval("abc def"), + strval("e\u0301b"), + strval("æ"), + strval("\u00e9a"), + bytesval([]byte{}), + bytesval([]byte{0}), + bytesval([]byte{0, 1, 2, 3, 4}), + bytesval([]byte{0, 1, 2, 4, 3}), + bytesval([]byte{255}), + refval("projects/p1/databases/d1/documents/c1/doc1"), + refval("projects/p1/databases/d1/documents/c1/doc2"), + refval("projects/p1/databases/d1/documents/c1/doc2/c2/doc1"), + refval("projects/p1/databases/d1/documents/c1/doc2/c2/doc2"), + refval("projects/p1/databases/d1/documents/c10/doc1"), + refval("projects/p1/databases/dkkkkklkjnjkkk1/documents/c2/doc1"), + refval("projects/p2/databases/d2/documents/c1/doc1"), + refval("projects/p2/databases/d2/documents/c1-/doc1"), + geopoint(-90, -180), + geopoint(-90, 0), + geopoint(-90, 180), + geopoint(0, -180), + geopoint(0, 0), + geopoint(0, 180), + geopoint(1, -180), + geopoint(1, 0), + geopoint(1, 180), + geopoint(90, -180), + geopoint(90, 0), + geopoint(90, 180), + arrayval(), + arrayval(strval("bar")), + arrayval(strval("foo")), + arrayval(strval("foo"), intval(1)), + arrayval(strval("foo"), intval(2)), + arrayval(strval("foo"), strval("0")), + mapval(map[string]*pb.Value{"bar": intval(0)}), + mapval(map[string]*pb.Value{"bar": intval(0), "foo": intval(1)}), + mapval(map[string]*pb.Value{"foo": intval(1)}), + mapval(map[string]*pb.Value{"foo": intval(2)}), + mapval(map[string]*pb.Value{"foo": strval("0")}), + } + + for i, v1 := range vals { + if got := compareValues(v1, v1); got != 0 { + t.Errorf("compare(%v, %v) == %d, want 0", v1, v1, got) + } + for _, v2 := range vals[i+1:] { + if got := compareValues(v1, v2); got != -1 { + t.Errorf("compare(%v, %v) == %d, want -1", v1, v2, got) + } + if got := compareValues(v2, v1); got != 1 { + t.Errorf("compare(%v, %v) == %d, want 1", v1, v2, got) + } + } + } + + // Integers and Doubles order the same. + n1 := intval(17) + n2 := floatval(17) + if got := compareValues(n1, n2); got != 0 { + t.Errorf("compare(%v, %v) == %d, want 0", n1, n2, got) + } +} + +func geopoint(lat, lng float64) *pb.Value { + return geoval(&latlng.LatLng{Latitude: lat, Longitude: lng}) +} diff --git a/vendor/cloud.google.com/go/firestore/query.go b/vendor/cloud.google.com/go/firestore/query.go index af9b7dc96..5fe5f0d09 100644 --- a/vendor/cloud.google.com/go/firestore/query.go +++ b/vendor/cloud.google.com/go/firestore/query.go @@ -20,6 +20,7 @@ import ( "io" "math" "reflect" + "strings" "golang.org/x/net/context" @@ -43,10 +44,15 @@ type Query struct { offset int32 limit *wrappers.Int32Value startVals, endVals []interface{} + startDoc, endDoc *DocumentSnapshot startBefore, endBefore bool err error } +func (q *Query) collectionPath() string { + return q.parentPath + "/documents/" + q.collectionID +} + // DocumentID is the special field name representing the ID of a document // in queries. const DocumentID = "__name__" @@ -65,18 +71,17 @@ func (q Query) Select(paths ...string) Query { } fps = append(fps, fp) } - if fps == nil { - q.selection = []FieldPath{{DocumentID}} - } else { - q.selection = fps - } - return q + return q.SelectPaths(fps...) } // SelectPaths returns a new Query that specifies the field paths // to return from the result documents. func (q Query) SelectPaths(fieldPaths ...FieldPath) Query { - q.selection = fieldPaths + if len(fieldPaths) == 0 { + q.selection = []FieldPath{{DocumentID}} + } else { + q.selection = fieldPaths + } return q } @@ -128,7 +133,7 @@ func (q Query) OrderBy(path string, dir Direction) Query { q.err = err return q } - q.orders = append(append([]order(nil), q.orders...), order{fp, dir}) + q.orders = append(q.copyOrders(), order{fp, dir}) return q } @@ -136,10 +141,14 @@ func (q Query) OrderBy(path string, dir Direction) Query { // returned. A Query can have multiple OrderBy/OrderByPath specifications. // OrderByPath appends the specification to the list of existing ones. func (q Query) OrderByPath(fp FieldPath, dir Direction) Query { - q.orders = append(append([]order(nil), q.orders...), order{fp, dir}) + q.orders = append(q.copyOrders(), order{fp, dir}) return q } +func (q *Query) copyOrders() []order { + return append([]order(nil), q.orders...) +} + // Offset returns a new Query that specifies the number of initial results to skip. // It must not be negative. func (q Query) Offset(n int) Query { @@ -155,8 +164,13 @@ func (q Query) Limit(n int) Query { } // StartAt returns a new Query that specifies that results should start at -// the document with the given field values. The field path corresponding to -// each value is taken from the corresponding OrderBy call. For example, in +// the document with the given field values. +// +// If StartAt is called with a single DocumentSnapshot, its field values are used. +// The DocumentSnapshot must have all the fields mentioned in the OrderBy clauses. +// +// Otherwise, StartAt should be called with one field value for each OrderBy clause, +// in the order that they appear. For example, in // q.OrderBy("X", Asc).OrderBy("Y", Desc).StartAt(1, 2) // results will begin at the first document where X = 1 and Y = 2. // @@ -167,8 +181,9 @@ func (q Query) Limit(n int) Query { // client.Collection("States").OrderBy(DocumentID, firestore.Asc).StartAt("NewYork") // // Calling StartAt overrides a previous call to StartAt or StartAfter. -func (q Query) StartAt(fieldValues ...interface{}) Query { - q.startVals, q.startBefore = fieldValues, true +func (q Query) StartAt(docSnapshotOrFieldValues ...interface{}) Query { + q.startBefore = true + q.startVals, q.startDoc, q.err = q.processCursorArg("StartAt", docSnapshotOrFieldValues) return q } @@ -176,8 +191,9 @@ func (q Query) StartAt(fieldValues ...interface{}) Query { // the document with the given field values. See Query.StartAt for more information. // // Calling StartAfter overrides a previous call to StartAt or StartAfter. -func (q Query) StartAfter(fieldValues ...interface{}) Query { - q.startVals, q.startBefore = fieldValues, false +func (q Query) StartAfter(docSnapshotOrFieldValues ...interface{}) Query { + q.startBefore = false + q.startVals, q.startDoc, q.err = q.processCursorArg("StartAfter", docSnapshotOrFieldValues) return q } @@ -185,8 +201,9 @@ func (q Query) StartAfter(fieldValues ...interface{}) Query { // document with the given field values. See Query.StartAt for more information. // // Calling EndAt overrides a previous call to EndAt or EndBefore. -func (q Query) EndAt(fieldValues ...interface{}) Query { - q.endVals, q.endBefore = fieldValues, false +func (q Query) EndAt(docSnapshotOrFieldValues ...interface{}) Query { + q.endBefore = false + q.endVals, q.endDoc, q.err = q.processCursorArg("EndAt", docSnapshotOrFieldValues) return q } @@ -194,11 +211,24 @@ func (q Query) EndAt(fieldValues ...interface{}) Query { // the document with the given field values. See Query.StartAt for more information. // // Calling EndBefore overrides a previous call to EndAt or EndBefore. -func (q Query) EndBefore(fieldValues ...interface{}) Query { - q.endVals, q.endBefore = fieldValues, true +func (q Query) EndBefore(docSnapshotOrFieldValues ...interface{}) Query { + q.endBefore = true + q.endVals, q.endDoc, q.err = q.processCursorArg("EndBefore", docSnapshotOrFieldValues) return q } +func (q *Query) processCursorArg(name string, docSnapshotOrFieldValues []interface{}) ([]interface{}, *DocumentSnapshot, error) { + for _, e := range docSnapshotOrFieldValues { + if ds, ok := e.(*DocumentSnapshot); ok { + if len(docSnapshotOrFieldValues) == 1 { + return nil, ds, nil + } + return nil, nil, fmt.Errorf("firestore: a document snapshot must be the only argument to %s", name) + } + } + return docSnapshotOrFieldValues, nil, nil +} + func (q Query) query() *Query { return &q } func (q Query) toProto() (*pb.StructuredQuery, error) { @@ -245,33 +275,79 @@ func (q Query) toProto() (*pb.StructuredQuery, error) { cf.Filters = append(cf.Filters, pf) } } - for _, ord := range q.orders { + orders := q.orders + if q.startDoc != nil || q.endDoc != nil { + orders = q.adjustOrders() + } + for _, ord := range orders { po, err := ord.toProto() if err != nil { return nil, err } p.OrderBy = append(p.OrderBy, po) } - // StartAt and EndAt must have values that correspond exactly to the explicit order-by fields. - if len(q.startVals) != 0 { - vals, err := q.toPositionValues(q.startVals) - if err != nil { - return nil, err - } - p.StartAt = &pb.Cursor{Values: vals, Before: q.startBefore} + + cursor, err := q.toCursor(q.startVals, q.startDoc, q.startBefore, orders) + if err != nil { + return nil, err } - if len(q.endVals) != 0 { - vals, err := q.toPositionValues(q.endVals) - if err != nil { - return nil, err - } - p.EndAt = &pb.Cursor{Values: vals, Before: q.endBefore} + p.StartAt = cursor + cursor, err = q.toCursor(q.endVals, q.endDoc, q.endBefore, orders) + if err != nil { + return nil, err } + p.EndAt = cursor return p, nil } +// If there is a start/end that uses a Document Snapshot, we may need to adjust the OrderBy +// clauses that the user provided: we add OrderBy(__name__) if it isn't already present, and +// we make sure we don't invalidate the original query by adding an OrderBy for inequality filters. +func (q *Query) adjustOrders() []order { + // If the user is already ordering by document ID, don't change anything. + for _, ord := range q.orders { + if ord.isDocumentID() { + return q.orders + } + } + // If there are OrderBy clauses, append an OrderBy(DocumentID), using the direction of the last OrderBy clause. + if len(q.orders) > 0 { + return append(q.copyOrders(), order{ + fieldPath: FieldPath{DocumentID}, + dir: q.orders[len(q.orders)-1].dir, + }) + } + // If there are no OrderBy clauses but there is an inequality, add an OrderBy clause + // for the field of the first inequality. + var orders []order + for _, f := range q.filters { + if f.op != "==" { + orders = []order{{fieldPath: f.fieldPath, dir: Asc}} + break + } + } + // Add an ascending OrderBy(DocumentID). + return append(orders, order{fieldPath: FieldPath{DocumentID}, dir: Asc}) +} + +func (q *Query) toCursor(fieldValues []interface{}, ds *DocumentSnapshot, before bool, orders []order) (*pb.Cursor, error) { + var vals []*pb.Value + var err error + if ds != nil { + vals, err = q.docSnapshotToCursorValues(ds, orders) + } else if len(fieldValues) != 0 { + vals, err = q.fieldValuesToCursorValues(fieldValues) + } else { + return nil, nil + } + if err != nil { + return nil, err + } + return &pb.Cursor{Values: vals, Before: before}, nil +} + // toPositionValues converts the field values to protos. -func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error) { +func (q *Query) fieldValuesToCursorValues(fieldValues []interface{}) ([]*pb.Value, error) { if len(fieldValues) != len(q.orders) { return nil, errors.New("firestore: number of field values in StartAt/StartAfter/EndAt/EndBefore does not match number of OrderBy fields") } @@ -279,12 +355,14 @@ func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error) var err error for i, ord := range q.orders { fval := fieldValues[i] - if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID { + if ord.isDocumentID() { + // TODO(jba): support DocumentRefs as well as strings. + // TODO(jba): error if document ref does not belong to the right collection. docID, ok := fval.(string) if !ok { return nil, fmt.Errorf("firestore: expected doc ID for DocumentID field, got %T", fval) } - vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.parentPath + "/documents/" + q.collectionID + "/" + docID}} + vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.collectionPath() + "/" + docID}} } else { var sawTransform bool vals[i], sawTransform, err = toProtoValue(reflect.ValueOf(fval)) @@ -299,6 +377,62 @@ func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error) return vals, nil } +func (q *Query) docSnapshotToCursorValues(ds *DocumentSnapshot, orders []order) ([]*pb.Value, error) { + // TODO(jba): error if doc snap does not belong to the right collection. + vals := make([]*pb.Value, len(orders)) + for i, ord := range orders { + if ord.isDocumentID() { + dp, qp := ds.Ref.Parent.Path, q.collectionPath() + if dp != qp { + return nil, fmt.Errorf("firestore: document snapshot for %s passed to query on %s", dp, qp) + } + vals[i] = &pb.Value{&pb.Value_ReferenceValue{ds.Ref.Path}} + } else { + val, err := valueAtPath(ord.fieldPath, ds.proto.Fields) + if err != nil { + return nil, err + } + vals[i] = val + } + } + return vals, nil +} + +// Returns a function that compares DocumentSnapshots according to q's ordering. +func (q Query) compareFunc() func(d1, d2 *DocumentSnapshot) (int, error) { + // Add implicit sorting by name, using the last specified direction. + lastDir := Asc + if len(q.orders) > 0 { + lastDir = q.orders[len(q.orders)-1].dir + } + orders := append(q.copyOrders(), order{[]string{DocumentID}, lastDir}) + return func(d1, d2 *DocumentSnapshot) (int, error) { + for _, ord := range orders { + var cmp int + if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID { + cmp = strings.Compare(d1.Ref.Path, d2.Ref.Path) + } else { + v1, err := valueAtPath(ord.fieldPath, d1.proto.Fields) + if err != nil { + return 0, err + } + v2, err := valueAtPath(ord.fieldPath, d2.proto.Fields) + if err != nil { + return 0, err + } + cmp = compareValues(v1, v2) + } + if cmp != 0 { + if ord.dir == Desc { + cmp = -cmp + } + return cmp, nil + } + } + return 0, nil + } +} + type filter struct { fieldPath FieldPath op string @@ -309,6 +443,21 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) { if err := f.fieldPath.validate(); err != nil { return nil, err } + if uop, ok := unaryOpFor(f.value); ok { + if f.op != "==" { + return nil, fmt.Errorf("firestore: must use '==' when comparing %v", f.value) + } + return &pb.StructuredQuery_Filter{ + FilterType: &pb.StructuredQuery_Filter_UnaryFilter{ + UnaryFilter: &pb.StructuredQuery_UnaryFilter{ + OperandType: &pb.StructuredQuery_UnaryFilter_Field{ + Field: fref(f.fieldPath), + }, + Op: uop, + }, + }, + }, nil + } var op pb.StructuredQuery_FieldFilter_Operator switch f.op { case "<": @@ -333,7 +482,7 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) { } return &pb.StructuredQuery_Filter{ FilterType: &pb.StructuredQuery_Filter_FieldFilter{ - &pb.StructuredQuery_FieldFilter{ + FieldFilter: &pb.StructuredQuery_FieldFilter{ Field: fref(f.fieldPath), Op: op, Value: val, @@ -342,11 +491,37 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) { }, nil } +func unaryOpFor(value interface{}) (pb.StructuredQuery_UnaryFilter_Operator, bool) { + switch { + case value == nil: + return pb.StructuredQuery_UnaryFilter_IS_NULL, true + case isNaN(value): + return pb.StructuredQuery_UnaryFilter_IS_NAN, true + default: + return pb.StructuredQuery_UnaryFilter_OPERATOR_UNSPECIFIED, false + } +} + +func isNaN(x interface{}) bool { + switch x := x.(type) { + case float32: + return math.IsNaN(float64(x)) + case float64: + return math.IsNaN(x) + default: + return false + } +} + type order struct { fieldPath FieldPath dir Direction } +func (r order) isDocumentID() bool { + return len(r.fieldPath) == 1 && r.fieldPath[0] == DocumentID +} + func (r order) toProto() (*pb.StructuredQuery_Order, error) { if err := r.fieldPath.validate(); err != nil { return nil, err @@ -433,7 +608,7 @@ func (it *DocumentIterator) Next() (*DocumentSnapshot, error) { it.err = err return nil, err } - doc, err := newDocumentSnapshot(docRef, res.Document, client) + doc, err := newDocumentSnapshot(docRef, res.Document, client, res.ReadTime) if err != nil { it.err = err return nil, err diff --git a/vendor/cloud.google.com/go/firestore/query_test.go b/vendor/cloud.google.com/go/firestore/query_test.go index 0fea1147f..add4027df 100644 --- a/vendor/cloud.google.com/go/firestore/query_test.go +++ b/vendor/cloud.google.com/go/firestore/query_test.go @@ -15,6 +15,8 @@ package firestore import ( + "math" + "sort" "testing" "golang.org/x/net/context" @@ -22,34 +24,87 @@ import ( "cloud.google.com/go/internal/pretty" pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + tspb "github.com/golang/protobuf/ptypes/timestamp" "github.com/golang/protobuf/ptypes/wrappers" ) +func TestFilterToProto(t *testing.T) { + for _, test := range []struct { + in filter + want *pb.StructuredQuery_Filter + }{ + { + filter{[]string{"a"}, ">", 1}, + &pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_FieldFilter{ + FieldFilter: &pb.StructuredQuery_FieldFilter{ + Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"}, + Op: pb.StructuredQuery_FieldFilter_GREATER_THAN, + Value: intval(1), + }, + }}, + }, + { + filter{[]string{"a"}, "==", nil}, + &pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_UnaryFilter{ + UnaryFilter: &pb.StructuredQuery_UnaryFilter{ + OperandType: &pb.StructuredQuery_UnaryFilter_Field{ + Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"}, + }, + Op: pb.StructuredQuery_UnaryFilter_IS_NULL, + }, + }}, + }, + { + filter{[]string{"a"}, "==", math.NaN()}, + &pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_UnaryFilter{ + UnaryFilter: &pb.StructuredQuery_UnaryFilter{ + OperandType: &pb.StructuredQuery_UnaryFilter_Field{ + Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"}, + }, + Op: pb.StructuredQuery_UnaryFilter_IS_NAN, + }, + }}, + }, + } { + got, err := test.in.toProto() + if err != nil { + t.Fatal(err) + } + if !testEqual(got, test.want) { + t.Errorf("%+v:\ngot\n%v\nwant\n%v", test.in, pretty.Value(got), pretty.Value(test.want)) + } + } +} + func TestQueryToProto(t *testing.T) { - c := &Client{} + filtr := func(path []string, op string, val interface{}) *pb.StructuredQuery_Filter { + f, err := filter{path, op, val}.toProto() + if err != nil { + t.Fatal(err) + } + return f + } + + c := &Client{projectID: "P", databaseID: "DB"} coll := c.Collection("C") q := coll.Query - aFilter, err := filter{[]string{"a"}, ">", 5}.toProto() - if err != nil { - t.Fatal(err) - } - bFilter, err := filter{[]string{"b"}, "<", "foo"}.toProto() - if err != nil { - t.Fatal(err) - } - slashStarFilter, err := filter{[]string{"/", "*"}, ">", 5}.toProto() - if err != nil { - t.Fatal(err) - } type S struct { A int `firestore:"a"` } + docsnap := &DocumentSnapshot{ + Ref: coll.Doc("D"), + proto: &pb.Document{ + Fields: map[string]*pb.Value{"a": intval(7), "b": intval(8)}, + }, + } for _, test := range []struct { + desc string in Query want *pb.StructuredQuery }{ { - in: q.Select(), + desc: "q.Select()", + in: q.Select(), want: &pb.StructuredQuery{ Select: &pb.StructuredQuery_Projection{ Fields: []*pb.StructuredQuery_FieldReference{fref1("__name__")}, @@ -57,7 +112,8 @@ func TestQueryToProto(t *testing.T) { }, }, { - in: q.Select("a", "b"), + desc: `q.Select("a", "b")`, + in: q.Select("a", "b"), want: &pb.StructuredQuery{ Select: &pb.StructuredQuery_Projection{ Fields: []*pb.StructuredQuery_FieldReference{fref1("a"), fref1("b")}, @@ -65,7 +121,8 @@ func TestQueryToProto(t *testing.T) { }, }, { - in: q.Select("a", "b").Select("c"), // last wins + desc: `q.Select("a", "b").Select("c")`, + in: q.Select("a", "b").Select("c"), // last wins want: &pb.StructuredQuery{ Select: &pb.StructuredQuery_Projection{ Fields: []*pb.StructuredQuery_FieldReference{fref1("c")}, @@ -73,7 +130,8 @@ func TestQueryToProto(t *testing.T) { }, }, { - in: q.SelectPaths([]string{"*"}, []string{"/"}), + desc: `q.SelectPaths([]string{"*"}, []string{"/"})`, + in: q.SelectPaths([]string{"*"}, []string{"/"}), want: &pb.StructuredQuery{ Select: &pb.StructuredQuery_Projection{ Fields: []*pb.StructuredQuery_FieldReference{fref1("*"), fref1("/")}, @@ -81,18 +139,35 @@ func TestQueryToProto(t *testing.T) { }, }, { + desc: `q.Where("a", ">", 5)`, in: q.Where("a", ">", 5), - want: &pb.StructuredQuery{Where: aFilter}, + want: &pb.StructuredQuery{Where: filtr([]string{"a"}, ">", 5)}, }, { - in: q.Where("a", ">", 5).Where("b", "<", "foo"), + desc: `q.Where("a", "==", nil)`, + in: q.Where("a", "==", nil), + want: &pb.StructuredQuery{Where: filtr([]string{"a"}, "==", nil)}, + }, + { + desc: `q.Where("a", "==", NaN)`, + in: q.Where("a", "==", math.NaN()), + want: &pb.StructuredQuery{Where: filtr([]string{"a"}, "==", math.NaN())}, + }, + { + desc: `q.Where("a", "==", NaN)`, + in: q.Where("a", "==", float32(math.NaN())), + want: &pb.StructuredQuery{Where: filtr([]string{"a"}, "==", math.NaN())}, + }, + { + desc: `q.Where("a", ">", 5).Where("b", "<", "foo")`, + in: q.Where("a", ">", 5).Where("b", "<", "foo"), want: &pb.StructuredQuery{ Where: &pb.StructuredQuery_Filter{ &pb.StructuredQuery_Filter_CompositeFilter{ &pb.StructuredQuery_CompositeFilter{ Op: pb.StructuredQuery_CompositeFilter_AND, Filters: []*pb.StructuredQuery_Filter{ - aFilter, bFilter, + filtr([]string{"a"}, ">", 5), filtr([]string{"b"}, "<", "foo"), }, }, }, @@ -100,11 +175,13 @@ func TestQueryToProto(t *testing.T) { }, }, { + desc: ` q.WherePath([]string{"/", "*"}, ">", 5)`, in: q.WherePath([]string{"/", "*"}, ">", 5), - want: &pb.StructuredQuery{Where: slashStarFilter}, + want: &pb.StructuredQuery{Where: filtr([]string{"/", "*"}, ">", 5)}, }, { - in: q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc), + desc: `q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc)`, + in: q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc), want: &pb.StructuredQuery{ OrderBy: []*pb.StructuredQuery_Order{ {fref1("b"), pb.StructuredQuery_ASCENDING}, @@ -114,21 +191,24 @@ func TestQueryToProto(t *testing.T) { }, }, { - in: q.Offset(2).Limit(3), + desc: `q.Offset(2).Limit(3)`, + in: q.Offset(2).Limit(3), want: &pb.StructuredQuery{ Offset: 2, Limit: &wrappers.Int32Value{3}, }, }, { - in: q.Offset(2).Limit(3).Limit(4).Offset(5), // last wins + desc: `q.Offset(2).Limit(3).Limit(4).Offset(5)`, + in: q.Offset(2).Limit(3).Limit(4).Offset(5), // last wins want: &pb.StructuredQuery{ Offset: 5, Limit: &wrappers.Int32Value{4}, }, }, { - in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9), + desc: `q.OrderBy("a", Asc).StartAt(7).EndBefore(9)`, + in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9), want: &pb.StructuredQuery{ OrderBy: []*pb.StructuredQuery_Order{ {fref1("a"), pb.StructuredQuery_ASCENDING}, @@ -144,7 +224,8 @@ func TestQueryToProto(t *testing.T) { }, }, { - in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9), + desc: `q.OrderBy("a", Asc).StartAt(7).EndAt(9)`, + in: q.OrderBy("a", Asc).StartAt(7).EndAt(9), want: &pb.StructuredQuery{ OrderBy: []*pb.StructuredQuery_Order{ {fref1("a"), pb.StructuredQuery_ASCENDING}, @@ -155,12 +236,13 @@ func TestQueryToProto(t *testing.T) { }, EndAt: &pb.Cursor{ Values: []*pb.Value{intval(9)}, - Before: true, + Before: false, }, }, }, { - in: q.OrderBy("a", Asc).StartAfter(7).EndAt(9), + desc: `q.OrderBy("a", Asc).StartAfter(7).EndAt(9)`, + in: q.OrderBy("a", Asc).StartAfter(7).EndAt(9), want: &pb.StructuredQuery{ OrderBy: []*pb.StructuredQuery_Order{ {fref1("a"), pb.StructuredQuery_ASCENDING}, @@ -176,7 +258,25 @@ func TestQueryToProto(t *testing.T) { }, }, { - in: q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10), + desc: `q.OrderBy(DocumentID, Asc).StartAfter("foo").EndBefore("bar")`, + in: q.OrderBy(DocumentID, Asc).StartAfter("foo").EndBefore("bar"), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{refval(coll.parentPath + "/documents/C/foo")}, + Before: false, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{refval(coll.parentPath + "/documents/C/bar")}, + Before: true, + }, + }, + }, + { + desc: `q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10)`, + in: q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10), want: &pb.StructuredQuery{ OrderBy: []*pb.StructuredQuery_Order{ {fref1("a"), pb.StructuredQuery_ASCENDING}, @@ -194,6 +294,7 @@ func TestQueryToProto(t *testing.T) { }, { // last of StartAt/After wins, same for End + desc: `q.OrderBy("a", Asc).StartAfter(1).StartAt(2).EndAt(3).EndBefore(4)`, in: q.OrderBy("a", Asc). StartAfter(1).StartAt(2). EndAt(3).EndBefore(4), @@ -211,14 +312,128 @@ func TestQueryToProto(t *testing.T) { }, }, }, + // Start/End with DocumentSnapshot + // These tests are from the "Document Snapshot Cursors" doc. + { + desc: `q.StartAt(docsnap)`, + in: q.StartAt(docsnap), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.OrderBy("a", Asc).StartAt(docsnap)`, + in: q.OrderBy("a", Asc).StartAt(docsnap), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + + { + desc: `q.OrderBy("a", Desc).StartAt(docsnap)`, + in: q.OrderBy("a", Desc).StartAt(docsnap), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_DESCENDING}, + {fref1("__name__"), pb.StructuredQuery_DESCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.OrderBy("a", Desc).OrderBy("b", Asc).StartAt(docsnap)`, + in: q.OrderBy("a", Desc).OrderBy("b", Asc).StartAt(docsnap), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_DESCENDING}, + {fref1("b"), pb.StructuredQuery_ASCENDING}, + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), intval(8), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.Where("a", "==", 3).StartAt(docsnap)`, + in: q.Where("a", "==", 3).StartAt(docsnap), + want: &pb.StructuredQuery{ + Where: filtr([]string{"a"}, "==", 3), + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.Where("a", "<", 3).StartAt(docsnap)`, + in: q.Where("a", "<", 3).StartAt(docsnap), + want: &pb.StructuredQuery{ + Where: filtr([]string{"a"}, "<", 3), + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, + { + desc: `q.Where("b", "==", 1).Where("a", "<", 3).StartAt(docsnap)`, + in: q.Where("b", "==", 1).Where("a", "<", 3).StartAt(docsnap), + want: &pb.StructuredQuery{ + Where: &pb.StructuredQuery_Filter{ + &pb.StructuredQuery_Filter_CompositeFilter{ + &pb.StructuredQuery_CompositeFilter{ + Op: pb.StructuredQuery_CompositeFilter_AND, + Filters: []*pb.StructuredQuery_Filter{ + filtr([]string{"b"}, "==", 1), + filtr([]string{"a"}, "<", 3), + }, + }, + }, + }, + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + {fref1("__name__"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), refval(coll.parentPath + "/documents/C/D")}, + Before: true, + }, + }, + }, } { got, err := test.in.toProto() if err != nil { - t.Fatalf("%+v: %v", test.in, err) + t.Errorf("%s: %v", test.desc, err) + continue } test.want.From = []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}} if !testEqual(got, test.want) { - t.Errorf("%+v: got\n%v\nwant\n%v", test.in, pretty.Value(got), pretty.Value(test.want)) + t.Errorf("%s:\ngot\n%v\nwant\n%v", test.desc, pretty.Value(got), pretty.Value(test.want)) } } } @@ -230,7 +445,15 @@ func fref1(s string) *pb.StructuredQuery_FieldReference { func TestQueryToProtoErrors(t *testing.T) { st := map[string]interface{}{"a": ServerTimestamp} del := map[string]interface{}{"a": Delete} - q := (&Client{}).Collection("C").Query + c := &Client{projectID: "P", databaseID: "DB"} + coll := c.Collection("C") + docsnap := &DocumentSnapshot{ + Ref: coll.Doc("D"), + proto: &pb.Document{ + Fields: map[string]*pb.Value{"a": intval(7)}, + }, + } + q := coll.Query for _, query := range []Query{ Query{}, // no collection ID q.Where("x", "!=", 1), // invalid operator @@ -248,6 +471,11 @@ func TestQueryToProtoErrors(t *testing.T) { q.Where("x", "==", del), // Delete in filter q.OrderBy("a", Asc).StartAt(del), // Delete in Start q.OrderBy("a", Asc).EndAt(del), // Delete in End + q.OrderBy(DocumentID, Asc).StartAt(7), // wrong type for __name__ + q.OrderBy(DocumentID, Asc).EndAt(7), // wrong type for __name__ + q.OrderBy("b", Asc).StartAt(docsnap), // doc snapshot does not have order-by field + q.StartAt(docsnap).EndAt("x"), // mixed doc snapshot and fields + q.StartAfter("x").EndBefore(docsnap), // mixed doc snapshot and fields } { _, err := query.toProto() if err == nil { @@ -362,10 +590,10 @@ func TestQueryGetAll(t *testing.T) { Fields: map[string]*pb.Value{"f": intval(1)}, }, } - + wantReadTimes := []*tspb.Timestamp{aTimestamp, aTimestamp2} srv.addRPC(nil, []interface{}{ - &pb.RunQueryResponse{Document: wantPBDocs[0]}, - &pb.RunQueryResponse{Document: wantPBDocs[1]}, + &pb.RunQueryResponse{Document: wantPBDocs[0], ReadTime: aTimestamp}, + &pb.RunQueryResponse{Document: wantPBDocs[1], ReadTime: aTimestamp2}, }) gotDocs, err := c.Collection("C").Documents(ctx).GetAll() if err != nil { @@ -375,7 +603,7 @@ func TestQueryGetAll(t *testing.T) { t.Errorf("got %d docs, wanted %d", got, want) } for i, got := range gotDocs { - want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c) + want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c, wantReadTimes[i]) if err != nil { t.Fatal(err) } @@ -387,3 +615,113 @@ func TestQueryGetAll(t *testing.T) { } } } + +func TestQueryCompareFunc(t *testing.T) { + mv := func(fields ...interface{}) map[string]*pb.Value { + m := map[string]*pb.Value{} + for i := 0; i < len(fields); i += 2 { + m[fields[i].(string)] = fields[i+1].(*pb.Value) + } + return m + } + snap := func(ref *DocumentRef, fields map[string]*pb.Value) *DocumentSnapshot { + return &DocumentSnapshot{Ref: ref, proto: &pb.Document{Fields: fields}} + } + + c := &Client{} + coll := c.Collection("C") + doc1 := coll.Doc("doc1") + doc2 := coll.Doc("doc2") + doc3 := coll.Doc("doc3") + doc4 := coll.Doc("doc4") + for _, test := range []struct { + q Query + in []*DocumentSnapshot + want []*DocumentSnapshot + }{ + { + q: coll.OrderBy("foo", Asc), + in: []*DocumentSnapshot{ + snap(doc3, mv("foo", intval(2))), + snap(doc4, mv("foo", intval(1))), + snap(doc2, mv("foo", intval(2))), + }, + want: []*DocumentSnapshot{ + snap(doc4, mv("foo", intval(1))), + snap(doc2, mv("foo", intval(2))), + snap(doc3, mv("foo", intval(2))), + }, + }, + { + q: coll.OrderBy("foo", Desc), + in: []*DocumentSnapshot{ + snap(doc3, mv("foo", intval(2))), + snap(doc4, mv("foo", intval(1))), + snap(doc2, mv("foo", intval(2))), + }, + want: []*DocumentSnapshot{ + snap(doc3, mv("foo", intval(2))), + snap(doc2, mv("foo", intval(2))), + snap(doc4, mv("foo", intval(1))), + }, + }, + { + q: coll.OrderBy("foo.bar", Asc), + in: []*DocumentSnapshot{ + snap(doc1, mv("foo", mapval(mv("bar", intval(1))))), + snap(doc2, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc3, mv("foo", mapval(mv("bar", intval(2))))), + }, + want: []*DocumentSnapshot{ + snap(doc1, mv("foo", mapval(mv("bar", intval(1))))), + snap(doc2, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc3, mv("foo", mapval(mv("bar", intval(2))))), + }, + }, + { + q: coll.OrderBy("foo.bar", Desc), + in: []*DocumentSnapshot{ + snap(doc1, mv("foo", mapval(mv("bar", intval(1))))), + snap(doc2, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc3, mv("foo", mapval(mv("bar", intval(2))))), + }, + want: []*DocumentSnapshot{ + snap(doc3, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc2, mv("foo", mapval(mv("bar", intval(2))))), + snap(doc1, mv("foo", mapval(mv("bar", intval(1))))), + }, + }, + } { + got := append([]*DocumentSnapshot(nil), test.in...) + sort.Sort(byQuery{test.q.compareFunc(), got}) + if diff := testDiff(got, test.want); diff != "" { + t.Errorf("%+v: %s", test.q, diff) + } + } + + // Want error on missing field. + q := coll.OrderBy("bar", Asc) + if q.err != nil { + t.Fatalf("bad query: %v", q.err) + } + cf := q.compareFunc() + s := snap(doc1, mv("foo", intval(1))) + if _, err := cf(s, s); err == nil { + t.Error("got nil, want error") + } +} + +type byQuery struct { + compare func(d1, d2 *DocumentSnapshot) (int, error) + docs []*DocumentSnapshot +} + +func (b byQuery) Len() int { return len(b.docs) } +func (b byQuery) Swap(i, j int) { b.docs[i], b.docs[j] = b.docs[j], b.docs[i] } +func (b byQuery) Less(i, j int) bool { + c, err := b.compare(b.docs[i], b.docs[j]) + if err != nil { + panic(err) + } + return c < 0 +} diff --git a/vendor/cloud.google.com/go/firestore/testdata/Makefile b/vendor/cloud.google.com/go/firestore/testdata/Makefile deleted file mode 100644 index 499aa05cf..000000000 --- a/vendor/cloud.google.com/go/firestore/testdata/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# Copy textproto files in this directory from the source of truth. - -SRC=$(GOPATH)/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata - -.PHONY: refresh - -refresh: - -rm *.textproto - cp $(SRC)/*.textproto . - openssl dgst -sha1 $(SRC)/tests.binprotos > VERSION - diff --git a/vendor/cloud.google.com/go/firestore/testdata/VERSION b/vendor/cloud.google.com/go/firestore/testdata/VERSION index e1149bbb7..c02df2163 100644 --- a/vendor/cloud.google.com/go/firestore/testdata/VERSION +++ b/vendor/cloud.google.com/go/firestore/testdata/VERSION @@ -1 +1 @@ -SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/tests.binprotos)= b0fbaaac8664945cb4f5667da092a6f9ededc57e +SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/test-suite.binproto)= 3047565564b81726a57d7db719704ea8bf17a9ab diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto new file mode 100644 index 000000000..bab8601e8 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-order.textproto @@ -0,0 +1,68 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# When a document snapshot is used, the client appends a __name__ order-by clause +# with the direction of the last order-by clause. + +description: "query: cursor methods with a document snapshot, existing orderBy" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + order_by: < + path: < + field: "b" + > + direction: "desc" + > + > + clauses: < + start_after: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + order_by: < + field: < + field_path: "b" + > + direction: DESCENDING + > + order_by: < + field: < + field_path: "__name__" + > + direction: DESCENDING + > + start_at: < + values: < + integer_value: 7 + > + values: < + integer_value: 8 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto new file mode 100644 index 000000000..d0ce3df45 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-orderby-name.textproto @@ -0,0 +1,76 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If there is an existing orderBy clause on __name__, no changes are made to the +# list of orderBy clauses. + +description: "query: cursor method, doc snapshot, existing orderBy __name__" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "desc" + > + > + clauses: < + order_by: < + path: < + field: "__name__" + > + direction: "asc" + > + > + clauses: < + start_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + clauses: < + end_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: DESCENDING + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + start_at: < + values: < + integer_value: 7 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + before: true + > + end_at: < + values: < + integer_value: 7 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto new file mode 100644 index 000000000..8b1e217df --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-eq.textproto @@ -0,0 +1,53 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Where clause using equality doesn't change the implicit orderBy clauses. + +description: "query: cursor methods with a document snapshot and an equality where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "==" + json_value: "3" + > + > + clauses: < + end_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + where: < + field_filter: < + field: < + field_path: "a" + > + op: EQUAL + value: < + integer_value: 3 + > + > + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + end_at: < + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto new file mode 100644 index 000000000..a69edfc50 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq-orderby.textproto @@ -0,0 +1,72 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If there is an OrderBy clause, the inequality Where clause does not result in a +# new OrderBy clause. We still add a __name__ OrderBy clause + +description: "query: cursor method, doc snapshot, inequality where clause, and existing orderBy clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "desc" + > + > + clauses: < + where: < + path: < + field: "a" + > + op: "<" + json_value: "4" + > + > + clauses: < + start_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + where: < + field_filter: < + field: < + field_path: "a" + > + op: LESS_THAN + value: < + integer_value: 4 + > + > + > + order_by: < + field: < + field_path: "a" + > + direction: DESCENDING + > + order_by: < + field: < + field_path: "__name__" + > + direction: DESCENDING + > + start_at: < + values: < + integer_value: 7 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto new file mode 100644 index 000000000..871dd0ba3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap-where-neq.textproto @@ -0,0 +1,64 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A Where clause with an inequality results in an OrderBy clause on that clause's +# path, if there are no other OrderBy clauses. + +description: "query: cursor method with a document snapshot and an inequality where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "<=" + json_value: "3" + > + > + clauses: < + end_before: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + where: < + field_filter: < + field: < + field_path: "a" + > + op: LESS_THAN_OR_EQUAL + value: < + integer_value: 3 + > + > + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + end_at: < + values: < + integer_value: 7 + > + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto new file mode 100644 index 000000000..184bffc2d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-docsnap.textproto @@ -0,0 +1,34 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# When a document snapshot is used, the client appends a __name__ order-by clause. + +description: "query: cursor methods with a document snapshot" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + start_at: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + start_at: < + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D" + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto new file mode 100644 index 000000000..fb999ddab --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-no-order.textproto @@ -0,0 +1,16 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a cursor method with a list of values is provided, there must be at least as +# many explicit orderBy clauses as values. + +description: "query: cursor method without orderBy" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + start_at: < + json_values: "2" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto new file mode 100644 index 000000000..bb08ab7d4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1a.textproto @@ -0,0 +1,50 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Cursor methods take the same number of values as there are OrderBy clauses. + +description: "query: StartAt/EndBefore with values" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + start_at: < + json_values: "7" + > + > + clauses: < + end_before: < + json_values: "9" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + start_at: < + values: < + integer_value: 7 + > + before: true + > + end_at: < + values: < + integer_value: 9 + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto new file mode 100644 index 000000000..41e69e9e6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-1b.textproto @@ -0,0 +1,48 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Cursor methods take the same number of values as there are OrderBy clauses. + +description: "query: StartAfter/EndAt with values" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + start_after: < + json_values: "7" + > + > + clauses: < + end_at: < + json_values: "9" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + start_at: < + values: < + integer_value: 7 + > + > + end_at: < + values: < + integer_value: 9 + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto new file mode 100644 index 000000000..8e37ad003 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-2.textproto @@ -0,0 +1,71 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Cursor methods take the same number of values as there are OrderBy clauses. + +description: "query: Start/End with two values" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + order_by: < + path: < + field: "b" + > + direction: "desc" + > + > + clauses: < + start_at: < + json_values: "7" + json_values: "8" + > + > + clauses: < + end_at: < + json_values: "9" + json_values: "10" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + order_by: < + field: < + field_path: "b" + > + direction: DESCENDING + > + start_at: < + values: < + integer_value: 7 + > + values: < + integer_value: 8 + > + before: true + > + end_at: < + values: < + integer_value: 9 + > + values: < + integer_value: 10 + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto new file mode 100644 index 000000000..91af3486c --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-docid.textproto @@ -0,0 +1,50 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Cursor values corresponding to a __name__ field take the document path relative +# to the query's collection. + +description: "query: cursor methods with __name__" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "__name__" + > + direction: "asc" + > + > + clauses: < + start_after: < + json_values: "\"D1\"" + > + > + clauses: < + end_before: < + json_values: "\"D2\"" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "__name__" + > + direction: ASCENDING + > + start_at: < + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D1" + > + > + end_at: < + values: < + reference_value: "projects/projectID/databases/(default)/documents/C/D2" + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto new file mode 100644 index 000000000..9e8fbb19f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-cursor-vals-last-wins.textproto @@ -0,0 +1,60 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# When multiple Start* or End* calls occur, the values of the last one are used. + +description: "query: cursor methods, last one wins" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + start_after: < + json_values: "1" + > + > + clauses: < + start_at: < + json_values: "2" + > + > + clauses: < + end_at: < + json_values: "3" + > + > + clauses: < + end_before: < + json_values: "4" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "a" + > + direction: ASCENDING + > + start_at: < + values: < + integer_value: 2 + > + before: true + > + end_at: < + values: < + integer_value: 4 + > + before: true + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto new file mode 100644 index 000000000..c9d4adb7c --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-del-cursor.textproto @@ -0,0 +1,23 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Sentinel values are not permitted in queries. + +description: "query: Delete in cursor method" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + end_before: < + json_values: "\"Delete\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-del-where.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-del-where.textproto new file mode 100644 index 000000000..8e9252949 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-del-where.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Sentinel values are not permitted in queries. + +description: "query: Delete in Where" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "==" + json_value: "\"Delete\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-invalid-operator.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-operator.textproto new file mode 100644 index 000000000..e580c64a7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-operator.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The != operator is not supported. + +description: "query: invalid operator in Where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "!=" + json_value: "4" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-order.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-order.textproto new file mode 100644 index 000000000..e0a720576 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-order.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The path has an empty component. + +description: "query: invalid path in OrderBy clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "*" + field: "" + > + direction: "asc" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-select.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-select.textproto new file mode 100644 index 000000000..944f984f7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-select.textproto @@ -0,0 +1,18 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The path has an empty component. + +description: "query: invalid path in Where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + select: < + fields: < + field: "*" + field: "" + > + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-where.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-where.textproto new file mode 100644 index 000000000..527923b09 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-invalid-path-where.textproto @@ -0,0 +1,20 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The path has an empty component. + +description: "query: invalid path in Where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "*" + field: "" + > + op: "==" + json_value: "4" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit-last-wins.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit-last-wins.textproto new file mode 100644 index 000000000..dc301f439 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit-last-wins.textproto @@ -0,0 +1,30 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# With multiple Offset or Limit clauses, the last one wins. + +description: "query: multiple Offset and Limit clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + offset: 2 + > + clauses: < + limit: 3 + > + clauses: < + limit: 4 + > + clauses: < + offset: 5 + > + query: < + from: < + collection_id: "C" + > + offset: 5 + limit: < + value: 4 + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit.textproto new file mode 100644 index 000000000..136d9d46a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-offset-limit.textproto @@ -0,0 +1,24 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Offset and Limit clauses. + +description: "query: Offset and Limit clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + offset: 2 + > + clauses: < + limit: 3 + > + query: < + from: < + collection_id: "C" + > + offset: 2 + limit: < + value: 3 + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-order.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-order.textproto new file mode 100644 index 000000000..7ed4c4ead --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-order.textproto @@ -0,0 +1,42 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Multiple OrderBy clauses combine. + +description: "query: basic OrderBy clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "b" + > + direction: "asc" + > + > + clauses: < + order_by: < + path: < + field: "a" + > + direction: "desc" + > + > + query: < + from: < + collection_id: "C" + > + order_by: < + field: < + field_path: "b" + > + direction: ASCENDING + > + order_by: < + field: < + field_path: "a" + > + direction: DESCENDING + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-select-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-select-empty.textproto new file mode 100644 index 000000000..def8b55ac --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-select-empty.textproto @@ -0,0 +1,23 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# An empty Select clause selects just the document ID. + +description: "query: empty Select clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + select: < + > + > + query: < + select: < + fields: < + field_path: "__name__" + > + > + from: < + collection_id: "C" + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-select-last-wins.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-select-last-wins.textproto new file mode 100644 index 000000000..bd78d09eb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-select-last-wins.textproto @@ -0,0 +1,36 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The last Select clause is the only one used. + +description: "query: two Select clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + select: < + fields: < + field: "a" + > + fields: < + field: "b" + > + > + > + clauses: < + select: < + fields: < + field: "c" + > + > + > + query: < + select: < + fields: < + field_path: "c" + > + > + from: < + collection_id: "C" + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-select.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-select.textproto new file mode 100644 index 000000000..15e112497 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-select.textproto @@ -0,0 +1,32 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# An ordinary Select clause. + +description: "query: Select clause with some fields" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + select: < + fields: < + field: "a" + > + fields: < + field: "b" + > + > + > + query: < + select: < + fields: < + field_path: "a" + > + fields: < + field_path: "b" + > + > + from: < + collection_id: "C" + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-st-cursor.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-st-cursor.textproto new file mode 100644 index 000000000..66885d0dd --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-st-cursor.textproto @@ -0,0 +1,23 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Sentinel values are not permitted in queries. + +description: "query: ServerTimestamp in cursor method" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + order_by: < + path: < + field: "a" + > + direction: "asc" + > + > + clauses: < + end_before: < + json_values: "\"ServerTimestamp\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-st-where.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-st-where.textproto new file mode 100644 index 000000000..05da28d54 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-st-where.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Sentinel values are not permitted in queries. + +description: "query: ServerTimestamp in Where" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: "==" + json_value: "\"ServerTimestamp\"" + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-where-2.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-where-2.textproto new file mode 100644 index 000000000..103446307 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-where-2.textproto @@ -0,0 +1,59 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Multiple Where clauses are combined into a composite filter. + +description: "query: two Where clauses" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: ">=" + json_value: "5" + > + > + clauses: < + where: < + path: < + field: "b" + > + op: "<" + json_value: "\"foo\"" + > + > + query: < + from: < + collection_id: "C" + > + where: < + composite_filter: < + op: AND + filters: < + field_filter: < + field: < + field_path: "a" + > + op: GREATER_THAN_OR_EQUAL + value: < + integer_value: 5 + > + > + > + filters: < + field_filter: < + field: < + field_path: "b" + > + op: LESS_THAN + value: < + string_value: "foo" + > + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-where.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-where.textproto new file mode 100644 index 000000000..045c2befa --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-where.textproto @@ -0,0 +1,34 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple Where clause. + +description: "query: Where clause" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + where: < + path: < + field: "a" + > + op: ">" + json_value: "5" + > + > + query: < + from: < + collection_id: "C" + > + where: < + field_filter: < + field: < + field_path: "a" + > + op: GREATER_THAN + value: < + integer_value: 5 + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/query-wrong-collection.textproto b/vendor/cloud.google.com/go/firestore/testdata/query-wrong-collection.textproto new file mode 100644 index 000000000..ad6f353d5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/query-wrong-collection.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a document snapshot is passed to a Start*/End* method, it must be in the same +# collection as the query. + +description: "query: doc snapshot with wrong collection in cursor method" +query: < + coll_path: "projects/projectID/databases/(default)/documents/C" + clauses: < + end_before: < + doc_snapshot: < + path: "projects/projectID/databases/(default)/documents/C2/D" + json_data: "{\"a\": 7, \"b\": 8}" + > + > + > + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/to_value.go b/vendor/cloud.google.com/go/firestore/to_value.go index 0c6fe93bb..1c97ecf8e 100644 --- a/vendor/cloud.google.com/go/firestore/to_value.go +++ b/vendor/cloud.google.com/go/firestore/to_value.go @@ -22,6 +22,7 @@ import ( "cloud.google.com/go/internal/fields" "github.com/golang/protobuf/ptypes" + ts "github.com/golang/protobuf/ptypes/timestamp" pb "google.golang.org/genproto/googleapis/firestore/v1beta1" "google.golang.org/genproto/googleapis/type/latlng" ) @@ -29,10 +30,11 @@ import ( var nullValue = &pb.Value{&pb.Value_NullValue{}} var ( - typeOfByteSlice = reflect.TypeOf([]byte{}) - typeOfGoTime = reflect.TypeOf(time.Time{}) - typeOfLatLng = reflect.TypeOf((*latlng.LatLng)(nil)) - typeOfDocumentRef = reflect.TypeOf((*DocumentRef)(nil)) + typeOfByteSlice = reflect.TypeOf([]byte{}) + typeOfGoTime = reflect.TypeOf(time.Time{}) + typeOfLatLng = reflect.TypeOf((*latlng.LatLng)(nil)) + typeOfDocumentRef = reflect.TypeOf((*DocumentRef)(nil)) + typeOfProtoTimestamp = reflect.TypeOf((*ts.Timestamp)(nil)) ) // toProtoValue converts a Go value to a Firestore Value protobuf. @@ -64,6 +66,12 @@ func toProtoValue(v reflect.Value) (pbv *pb.Value, sawServerTimestamp bool, err return nil, false, err } return &pb.Value{&pb.Value_TimestampValue{ts}}, false, nil + case *ts.Timestamp: + if x == nil { + // gRPC doesn't like nil oneofs. Use NullValue. + return nullValue, false, nil + } + return &pb.Value{&pb.Value_TimestampValue{x}}, false, nil case *latlng.LatLng: if x == nil { // gRPC doesn't like nil oneofs. Use NullValue. @@ -240,7 +248,7 @@ func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, e // isLeafType determines whether or not a type is a 'leaf type' // and should not be recursed into, but considered one field. func isLeafType(t reflect.Type) bool { - return t == typeOfGoTime || t == typeOfLatLng + return t == typeOfGoTime || t == typeOfLatLng || t == typeOfProtoTimestamp } var fieldCache = fields.NewCache(parseTag, nil, isLeafType) diff --git a/vendor/cloud.google.com/go/firestore/to_value_test.go b/vendor/cloud.google.com/go/firestore/to_value_test.go index ae58a1354..eac26a364 100644 --- a/vendor/cloud.google.com/go/firestore/to_value_test.go +++ b/vendor/cloud.google.com/go/firestore/to_value_test.go @@ -20,54 +20,58 @@ import ( "testing" "time" + ts "github.com/golang/protobuf/ptypes/timestamp" pb "google.golang.org/genproto/googleapis/firestore/v1beta1" "google.golang.org/genproto/googleapis/type/latlng" ) type testStruct1 struct { - B bool - I int - U uint32 - F float64 - S string - Y []byte - T time.Time - G *latlng.LatLng - L []int - M map[string]int - P *int + B bool + I int + U uint32 + F float64 + S string + Y []byte + T time.Time + Ts *ts.Timestamp + G *latlng.LatLng + L []int + M map[string]int + P *int } var ( p = new(int) testVal1 = testStruct1{ - B: true, - I: 1, - U: 2, - F: 3.0, - S: "four", - Y: []byte{5}, - T: tm, - G: ll, - L: []int{6}, - M: map[string]int{"a": 7}, - P: p, + B: true, + I: 1, + U: 2, + F: 3.0, + S: "four", + Y: []byte{5}, + T: tm, + Ts: ptm, + G: ll, + L: []int{6}, + M: map[string]int{"a": 7}, + P: p, } mapVal1 = mapval(map[string]*pb.Value{ - "B": boolval(true), - "I": intval(1), - "U": intval(2), - "F": floatval(3), - "S": &pb.Value{&pb.Value_StringValue{"four"}}, - "Y": bytesval([]byte{5}), - "T": tsval(tm), - "G": geoval(ll), - "L": arrayval(intval(6)), - "M": mapval(map[string]*pb.Value{"a": intval(7)}), - "P": intval(8), + "B": boolval(true), + "I": intval(1), + "U": intval(2), + "F": floatval(3), + "S": &pb.Value{&pb.Value_StringValue{"four"}}, + "Y": bytesval([]byte{5}), + "T": tsval(tm), + "Ts": &pb.Value{&pb.Value_TimestampValue{ptm}}, + "G": geoval(ll), + "L": arrayval(intval(6)), + "M": mapval(map[string]*pb.Value{"a": intval(7)}), + "P": intval(8), }) ) @@ -81,6 +85,7 @@ func TestToProtoValue(t *testing.T) { {[]int(nil), nullValue}, {map[string]int(nil), nullValue}, {(*testStruct1)(nil), nullValue}, + {(*ts.Timestamp)(nil), nullValue}, {(*latlng.LatLng)(nil), nullValue}, {(*DocumentRef)(nil), nullValue}, {true, boolval(true)}, @@ -90,6 +95,7 @@ func TestToProtoValue(t *testing.T) { {"str", strval("str")}, {[]byte{1, 2}, bytesval([]byte{1, 2})}, {tm, tsval(tm)}, + {ptm, &pb.Value{&pb.Value_TimestampValue{ptm}}}, {ll, geoval(ll)}, {[]int{1, 2}, arrayval(intval(1), intval(2))}, {&[]int{1, 2}, arrayval(intval(1), intval(2))}, @@ -234,19 +240,21 @@ func TestToProtoValueTags(t *testing.T) { } func TestToProtoValueEmbedded(t *testing.T) { - // Embedded time.Time or LatLng should behave like non-embedded. + // Embedded time.Time, LatLng, or Timestamp should behave like non-embedded. type embed struct { time.Time *latlng.LatLng + *ts.Timestamp } - got, _, err := toProtoValue(reflect.ValueOf(embed{tm, ll})) + got, _, err := toProtoValue(reflect.ValueOf(embed{tm, ll, ptm})) if err != nil { t.Fatal(err) } want := mapval(map[string]*pb.Value{ - "Time": tsval(tm), - "LatLng": geoval(ll), + "Time": tsval(tm), + "LatLng": geoval(ll), + "Timestamp": &pb.Value{&pb.Value_TimestampValue{ptm}}, }) if !testEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) diff --git a/vendor/cloud.google.com/go/firestore/transaction.go b/vendor/cloud.google.com/go/firestore/transaction.go index 0fa7e7c06..27ed1b979 100644 --- a/vendor/cloud.google.com/go/firestore/transaction.go +++ b/vendor/cloud.google.com/go/firestore/transaction.go @@ -195,7 +195,8 @@ func (t *Transaction) rollback() { // Note: Rollback is idempotent so it will be retried by the gapic layer. } -// Get gets the document in the context of the transaction. +// Get gets the document in the context of the transaction. The transaction holds a +// pessimistic lock on the returned document. func (t *Transaction) Get(dr *DocumentRef) (*DocumentSnapshot, error) { if len(t.writes) > 0 { t.readAfterWrite = true @@ -208,7 +209,19 @@ func (t *Transaction) Get(dr *DocumentRef) (*DocumentSnapshot, error) { if err != nil { return nil, err } - return newDocumentSnapshot(dr, docProto, t.c) + return newDocumentSnapshot(dr, docProto, t.c, nil) +} + +// GetAll retrieves multiple documents with a single call. The DocumentSnapshots are +// returned in the order of the given DocumentRefs. If a document is not present, the +// corresponding DocumentSnapshot will be nil. The transaction holds a pessimistic +// lock on all of the returned documents. +func (t *Transaction) GetAll(drs []*DocumentRef) ([]*DocumentSnapshot, error) { + if len(t.writes) > 0 { + t.readAfterWrite = true + return nil, errReadAfterWrite + } + return t.c.getAll(t.ctx, drs, t.id) } // A Queryer is a Query or a CollectionRef. CollectionRefs act as queries whose diff --git a/vendor/cloud.google.com/go/firestore/transaction_test.go b/vendor/cloud.google.com/go/firestore/transaction_test.go index fad528d95..2fc864419 100644 --- a/vendor/cloud.google.com/go/firestore/transaction_test.go +++ b/vendor/cloud.google.com/go/firestore/transaction_test.go @@ -18,6 +18,7 @@ import ( "testing" "golang.org/x/net/context" + "google.golang.org/grpc/status" pb "google.golang.org/genproto/googleapis/firestore/v1beta1" @@ -125,7 +126,7 @@ func TestRunTransaction(t *testing.T) { // Retry entire transaction. srv.reset() srv.addRPC(beginReq, beginRes) - srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, "")) + srv.addRPC(commitReq, status.Errorf(codes.Aborted, "")) srv.addRPC( &pb.BeginTransactionRequest{ Database: db, @@ -150,7 +151,7 @@ func TestTransactionErrors(t *testing.T) { c, srv := newMock(t) var ( tid = []byte{1} - internalErr = grpc.Errorf(codes.Internal, "so sad") + internalErr = status.Errorf(codes.Internal, "so sad") beginReq = &pb.BeginTransactionRequest{ Database: db, } @@ -189,7 +190,7 @@ func TestTransactionErrors(t *testing.T) { srv.reset() srv.addRPC(beginReq, beginRes) srv.addRPC(getReq, internalErr) - srv.addRPC(rollbackReq, grpc.Errorf(codes.FailedPrecondition, "")) + srv.addRPC(rollbackReq, status.Errorf(codes.FailedPrecondition, "")) err = c.RunTransaction(ctx, get) if grpc.Code(err) != codes.Internal { t.Errorf("got <%v>, want Internal", err) @@ -275,7 +276,7 @@ func TestTransactionErrors(t *testing.T) { // Too many retries. srv.reset() srv.addRPC(beginReq, beginRes) - srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, "")) + srv.addRPC(commitReq, status.Errorf(codes.Aborted, "")) srv.addRPC( &pb.BeginTransactionRequest{ Database: db, @@ -287,7 +288,7 @@ func TestTransactionErrors(t *testing.T) { }, beginRes, ) - srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, "")) + srv.addRPC(commitReq, status.Errorf(codes.Aborted, "")) srv.addRPC(rollbackReq, &empty.Empty{}) err = c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }, MaxAttempts(2)) @@ -343,3 +344,33 @@ func TestTransactionErrors(t *testing.T) { } } } + +func TestTransactionGetAll(t *testing.T) { + c, srv := newMock(t) + defer c.Close() + const dbPath = "projects/projectID/databases/(default)" + tid := []byte{1} + beginReq := &pb.BeginTransactionRequest{Database: dbPath} + beginRes := &pb.BeginTransactionResponse{Transaction: tid} + srv.addRPC(beginReq, beginRes) + req := &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{ + dbPath + "/documents/C/a", + dbPath + "/documents/C/b", + dbPath + "/documents/C/c", + }, + ConsistencySelector: &pb.BatchGetDocumentsRequest_Transaction{tid}, + } + err := c.RunTransaction(context.Background(), func(_ context.Context, tx *Transaction) error { + testGetAll(t, c, srv, dbPath, + func(drs []*DocumentRef) ([]*DocumentSnapshot, error) { return tx.GetAll(drs) }, + req) + commitReq := &pb.CommitRequest{Database: dbPath, Transaction: tid} + srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp}) + return nil + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/firestore/util_test.go b/vendor/cloud.google.com/go/firestore/util_test.go index db8e1513f..34313c929 100644 --- a/vendor/cloud.google.com/go/firestore/util_test.go +++ b/vendor/cloud.google.com/go/firestore/util_test.go @@ -99,7 +99,11 @@ func newMock(t *testing.T) (*Client, *mockServer) { } func intval(i int) *pb.Value { - return &pb.Value{&pb.Value_IntegerValue{int64(i)}} + return int64val(int64(i)) +} + +func int64val(i int64) *pb.Value { + return &pb.Value{&pb.Value_IntegerValue{i}} } func boolval(b bool) *pb.Value { diff --git a/vendor/cloud.google.com/go/firestore/watch.go b/vendor/cloud.google.com/go/firestore/watch.go new file mode 100644 index 000000000..9a0f3253d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/watch.go @@ -0,0 +1,115 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "io" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Implementation of realtime updates (a.k.a. watch). +// This code is closely based on the Node.js implementation, +// https://github.com/googleapis/nodejs-firestore/blob/master/src/watch.js. + +var defaultBackoff = gax.Backoff{ + // Values from https://github.com/googleapis/nodejs-firestore/blob/master/src/backoff.js. + Initial: 1 * time.Second, + Max: 60 * time.Second, + Multiplier: 1.5, +} + +type watchStream struct { + ctx context.Context + c *Client + target *pb.Target // document or query being watched + lc pb.Firestore_ListenClient + backoff gax.Backoff +} + +func newWatchStream(ctx context.Context, c *Client, target *pb.Target) *watchStream { + return &watchStream{ + ctx: ctx, + c: c, + target: target, + backoff: defaultBackoff, + } +} + +// recv receives the next message from the stream. It also handles opening the stream +// initially, and reopening it on non-permanent errors. +// recv doesn't have to be goroutine-safe. +func (s *watchStream) recv() (*pb.ListenResponse, error) { + var err error + for { + if s.lc == nil { + s.lc, err = s.open() + if err != nil { + // Do not retry if open fails. + return nil, err + } + } + res, err := s.lc.Recv() + if err == nil || isPermanentWatchError(err) { + return res, err + } + // Non-permanent error. Sleep and retry. + // TODO: from node: + // request.addTarget.resumeToken = resumeToken; + // changeMap.clear(); + dur := s.backoff.Pause() + // If we're out of quota, wait a long time before retrying. + if status.Code(err) == codes.ResourceExhausted { + dur = s.backoff.Max + } + if err := gax.Sleep(s.ctx, dur); err != nil { + return nil, err + } + s.lc = nil + } +} + +func (s *watchStream) open() (pb.Firestore_ListenClient, error) { + lc, err := s.c.c.Listen(s.ctx) + if err == nil { + err = lc.Send(&pb.ListenRequest{ + Database: s.c.path(), + TargetChange: &pb.ListenRequest_AddTarget{AddTarget: s.target}, + }) + } + if err != nil { + return nil, err + } + return lc, nil +} + +func isPermanentWatchError(err error) bool { + if err == io.EOF { + // Retry on normal end-of-stream. + return false + } + switch status.Code(err) { + case codes.Canceled, codes.Unknown, codes.DeadlineExceeded, codes.ResourceExhausted, + codes.Internal, codes.Unavailable, codes.Unauthenticated: + return false + default: + return true + } +} diff --git a/vendor/cloud.google.com/go/firestore/watch_test.go b/vendor/cloud.google.com/go/firestore/watch_test.go new file mode 100644 index 000000000..bab7fa78e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/watch_test.go @@ -0,0 +1,66 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestWatchRecv(t *testing.T) { + ctx := context.Background() + c, srv := newMock(t) + db := defaultBackoff + defaultBackoff = gax.Backoff{Initial: 1, Max: 1, Multiplier: 1} + defer func() { defaultBackoff = db }() + + ws := newWatchStream(ctx, c, &pb.Target{}) + request := &pb.ListenRequest{ + Database: "projects/projectID/databases/(default)", + TargetChange: &pb.ListenRequest_AddTarget{&pb.Target{}}, + } + response := &pb.ListenResponse{ResponseType: &pb.ListenResponse_DocumentChange{&pb.DocumentChange{}}} + // Stream should retry on non-permanent errors, returning only the responses. + srv.addRPC(request, []interface{}{response, status.Error(codes.Unknown, "")}) + srv.addRPC(request, []interface{}{response}) // stream will return io.EOF + srv.addRPC(request, []interface{}{response, status.Error(codes.DeadlineExceeded, "")}) + srv.addRPC(request, []interface{}{status.Error(codes.ResourceExhausted, "")}) + srv.addRPC(request, []interface{}{status.Error(codes.Internal, "")}) + srv.addRPC(request, []interface{}{status.Error(codes.Unavailable, "")}) + srv.addRPC(request, []interface{}{status.Error(codes.Unauthenticated, "")}) + srv.addRPC(request, []interface{}{response}) + for i := 0; i < 4; i++ { + res, err := ws.recv() + if err != nil { + t.Fatal(err) + } + if !proto.Equal(res, response) { + t.Fatalf("got %v, want %v", res, response) + } + } + + // Stream should not retry on a permanent error. + srv.addRPC(request, []interface{}{status.Error(codes.AlreadyExists, "")}) + _, err := ws.recv() + if got, want := status.Code(err), codes.AlreadyExists; got != want { + t.Fatalf("got %s, want %s", got, want) + } +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go index 8722ee883..37720aa2d 100644 --- a/vendor/cloud.google.com/go/iam/iam.go +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -22,9 +22,13 @@ package iam import ( + "time" + + gax "github.com/googleapis/gax-go" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" + "google.golang.org/grpc/codes" ) // client abstracts the IAMPolicy API to allow multiple implementations. @@ -39,26 +43,50 @@ type grpcClient struct { c pb.IAMPolicyClient } +var withRetry = gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60 * time.Second, + Multiplier: 1.3, + }) +}) + func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { - proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + var proto *pb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + return err + }, withRetry) if err != nil { return nil, err } return proto, nil } + func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { - _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ - Resource: resource, - Policy: p, - }) - return err + return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err + }, withRetry) } func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { - res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ - Resource: resource, - Permissions: perms, - }) + var res *pb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + return err + }, withRetry) if err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/internal/btree/README.md b/vendor/cloud.google.com/go/internal/btree/README.md new file mode 100644 index 000000000..d32371510 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/README.md @@ -0,0 +1,11 @@ +This package is a fork of github.com/jba/btree at commit +aa53f88384b4d43de7e047ebe8d2c0fbb84fce89, which itself was a fork of +github.com/google/btree at 316fb6d3f031ae8f4d457c6c5186b9e3ded70435. + +This directory makes the following modifications: + +- Updated copyright notice. +- removed LICENSE (it is the same as the repo-wide license, Apache 2.0) +- Removed examples_test.go and .travis.yml. +- Added this file. + diff --git a/vendor/cloud.google.com/go/internal/btree/benchmarks_test.go b/vendor/cloud.google.com/go/internal/btree/benchmarks_test.go new file mode 100644 index 000000000..850eb583b --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/benchmarks_test.go @@ -0,0 +1,268 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package btree + +import ( + "fmt" + "sort" + "testing" +) + +const benchmarkTreeSize = 10000 + +var degrees = []int{2, 8, 32, 64} + +func BenchmarkInsert(b *testing.B) { + insertP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + i++ + if i >= b.N { + return + } + } + } + }) + } +} + +func BenchmarkDeleteInsert(b *testing.B) { + insertP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + m := insertP[i%benchmarkTreeSize] + tr.Delete(m.Key) + tr.Set(m.Key, m.Value) + } + }) + } +} + +func BenchmarkDeleteInsertCloneOnce(b *testing.B) { + insertP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + } + tr = tr.Clone() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m := insertP[i%benchmarkTreeSize] + tr.Delete(m.Key) + tr.Set(m.Key, m.Value) + } + }) + } +} + +func BenchmarkDeleteInsertCloneEachTime(b *testing.B) { + insertP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + tr = tr.Clone() + m := insertP[i%benchmarkTreeSize] + tr.Delete(m.Key) + tr.Set(m.Key, m.Value) + } + }) + } +} + +func BenchmarkDelete(b *testing.B) { + insertP := perm(benchmarkTreeSize) + removeP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + b.StopTimer() + tr := New(d, less) + for _, v := range insertP { + tr.Set(v.Key, v.Value) + } + b.StartTimer() + for _, m := range removeP { + tr.Delete(m.Key) + i++ + if i >= b.N { + return + } + } + if tr.Len() > 0 { + panic(tr.Len()) + } + } + }) + } +} + +func BenchmarkGet(b *testing.B) { + insertP := perm(benchmarkTreeSize) + getP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + b.StopTimer() + tr := New(d, less) + for _, v := range insertP { + tr.Set(v.Key, v.Value) + } + b.StartTimer() + for _, m := range getP { + tr.Get(m.Key) + i++ + if i >= b.N { + return + } + } + } + }) + } +} + +func BenchmarkGetWithIndex(b *testing.B) { + insertP := perm(benchmarkTreeSize) + getP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + b.StopTimer() + tr := New(d, less) + for _, v := range insertP { + tr.Set(v.Key, v.Value) + } + b.StartTimer() + for _, m := range getP { + tr.GetWithIndex(m.Key) + i++ + if i >= b.N { + return + } + } + } + }) + } +} + +func BenchmarkGetCloneEachTime(b *testing.B) { + insertP := perm(benchmarkTreeSize) + getP := perm(benchmarkTreeSize) + for _, d := range degrees { + b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) { + i := 0 + for i < b.N { + b.StopTimer() + tr := New(d, less) + for _, m := range insertP { + tr.Set(m.Key, m.Value) + } + b.StartTimer() + for _, m := range getP { + tr = tr.Clone() + tr.Get(m.Key) + i++ + if i >= b.N { + return + } + } + } + }) + } +} + +func BenchmarkFind(b *testing.B) { + for _, d := range degrees { + var items []item + for i := 0; i < 2*d; i++ { + items = append(items, item{i, i}) + } + b.Run(fmt.Sprintf("size=%d", len(items)), func(b *testing.B) { + for _, alg := range []struct { + name string + fun func(Key, []item) (int, bool) + }{ + {"binary", findBinary}, + {"linear", findLinear}, + } { + b.Run(alg.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + for j := 0; j < len(items); j++ { + alg.fun(items[j].key, items) + } + } + }) + } + }) + } +} + +func findBinary(k Key, s []item) (int, bool) { + i := sort.Search(len(s), func(i int) bool { return less(k, s[i].key) }) + // i is the smallest index of s for which key.Less(s[i].Key), or len(s). + if i > 0 && !less(s[i-1], k) { + return i - 1, true + } + return i, false +} + +func findLinear(k Key, s []item) (int, bool) { + var i int + for i = 0; i < len(s); i++ { + if less(k, s[i].key) { + break + } + } + if i > 0 && !less(s[i-1].key, k) { + return i - 1, true + } + return i, false +} + +type byInts []item + +func (a byInts) Len() int { + return len(a) +} + +func (a byInts) Less(i, j int) bool { + return a[i].key.(int) < a[j].key.(int) +} + +func (a byInts) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} diff --git a/vendor/cloud.google.com/go/internal/btree/btree.go b/vendor/cloud.google.com/go/internal/btree/btree.go new file mode 100644 index 000000000..4c81e6f91 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/btree.go @@ -0,0 +1,991 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// This implementation is based on google/btree (http://github.com/google/btree), and +// much of the code is taken from there. But the API has been changed significantly, +// particularly around iteration, and support for indexing by position has been +// added. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +package btree + +import ( + "fmt" + "sort" + "sync" +) + +// Key represents a key into the tree. +type Key interface{} + +type Value interface{} + +// item is a key-value pair. +type item struct { + key Key + value Value +} + +type lessFunc func(interface{}, interface{}) bool + +// New creates a new B-Tree with the given degree and comparison function. +// +// New(2, less), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +// +// The less function tests whether the current item is less than the given argument. +// It must provide a strict weak ordering. +// If !less(a, b) && !less(b, a), we treat this to mean a == b (i.e. the tree +// can hold only one of a or b). +func New(degree int, less func(interface{}, interface{}) bool) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + less: less, + cow: ©OnWriteContext{}, + } +} + +// items stores items in a node. +type items []item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, m item) { + *s = append(*s, item{}) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = m +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) item { + m := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = item{} + *s = (*s)[:len(*s)-1] + return m +} + +// pop removes and returns the last element in the list. +func (s *items) pop() item { + index := len(*s) - 1 + out := (*s)[index] + (*s)[index] = item{} + *s = (*s)[:index] + return out +} + +var nilItems = make(items, 16) + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where an item with key should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(k Key, less lessFunc) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { return less(k, s[i].key) }) + // i is the smallest index of s for which k.Less(s[i].Key), or len(s). + if i > 0 && !less(s[i-1].key, k) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +var nilChildren = make(children, 16) + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + size int // number of items in the subtree: len(items) + sum over i of children[i].size + cow *copyOnWriteContext +} + +func (n *node) computeSize() int { + sz := len(n.items) + for _, c := range n.children { + sz += c.size + } + return sz +} + +func (n *node) checkSize() { + sz := n.computeSize() + if n.size != sz { + panic(fmt.Sprintf("n.size = %d, computed size = %d", n.size, sz)) + } +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + out.size = n.size + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + n.size = n.computeSize() + next.size = next.computeSize() + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + // The size of n doesn't change. + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, its value will be returned. +func (n *node) insert(m item, maxItems int, less lessFunc) (old Value, present bool) { + i, found := n.items.find(m.key, less) + if found { + out := n.items[i] + n.items[i] = m + return out.value, true + } + if len(n.children) == 0 { + n.items.insertAt(i, m) + n.size++ + return old, false + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case less(m.key, inTree.key): + // no change, we want first split node + case less(inTree.key, m.key): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = m + return out.value, true + } + } + old, present = n.mutableChild(i).insert(m, maxItems, less) + if !present { + n.size++ + } + return old, present +} + +// get finds the given key in the subtree and returns the corresponding item, along with a boolean reporting +// whether it was found. +// If withIndex is true, it also returns the index of the key relative to the node's subtree. +func (n *node) get(k Key, withIndex bool, less lessFunc) (item, bool, int) { + i, found := n.items.find(k, less) + if found { + idx := i + if withIndex && len(n.children) > 0 { + idx = n.partialSize(i+1) - 1 + } + return n.items[i], true, idx + } + if len(n.children) > 0 { + m, found, idx := n.children[i].get(k, withIndex, less) + if withIndex && found { + idx += n.partialSize(i) + } + return m, found, idx + } + return item{}, false, -1 +} + +// Returns the size of the non-leaf node up to but not including child i. +func (n *node) partialSize(i int) int { + var sz int + for j, c := range n.children { + if j == i { + break + } + sz += c.size + 1 + } + return sz +} + +// cursorStackForKey returns a stack of cursors for the key, along with whether the key was found and the index. +func (n *node) cursorStackForKey(k Key, cs cursorStack, less lessFunc) (cursorStack, bool, int) { + i, found := n.items.find(k, less) + cs.push(cursor{n, i}) + idx := i + if found { + if len(n.children) > 0 { + idx = n.partialSize(i+1) - 1 + } + return cs, true, idx + } + if len(n.children) > 0 { + cs, found, idx := n.children[i].cursorStackForKey(k, cs, less) + return cs, found, idx + n.partialSize(i) + } + return cs, false, idx +} + +// at returns the item at the i'th position in the subtree rooted at n. +// It assumes i is in range. +func (n *node) at(i int) item { + if len(n.children) == 0 { + return n.items[i] + } + for j, c := range n.children { + if i < c.size { + return c.at(i) + } + i -= c.size + if i == 0 { + return n.items[j] + } + i-- + } + panic("impossible") +} + +// cursorStackForIndex returns a stack of cursors for the index. +// It assumes i is in range. +func (n *node) cursorStackForIndex(i int, cs cursorStack) cursorStack { + if len(n.children) == 0 { + return cs.push(cursor{n, i}) + } + for j, c := range n.children { + if i < c.size { + return c.cursorStackForIndex(i, cs.push(cursor{n, j})) + } + i -= c.size + if i == 0 { + return cs.push(cursor{n, j}) + } + i-- + } + panic("impossible") +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(key Key, minItems int, typ toRemove, less lessFunc) (item, bool) { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + n.size-- + return n.items.pop(), true + + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + n.size-- + return n.items.removeAt(0), true + } + i = 0 + case removeItem: + i, found = n.items.find(key, less) + if len(n.children) == 0 { + if found { + n.size-- + return n.items.removeAt(i), true + } + return item{}, false + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, key, minItems, typ, less) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i], _ = child.remove(nil, minItems, removeMax, less) + n.size-- + return out, true + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + m, removed := child.remove(key, minItems, typ, less) + if removed { + n.size-- + } + return m, removed +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, key Key, minItems int, typ toRemove, less lessFunc) (item, bool) { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + stealFrom.size-- + child.items.insertAt(0, n.items[i-1]) + child.size++ + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + c := stealFrom.children.pop() + stealFrom.size -= c.size + child.children.insertAt(0, c) + child.size += c.size + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + stealFrom.size-- + child.items = append(child.items, n.items[i]) + child.size++ + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + c := stealFrom.children.removeAt(0) + stealFrom.size -= c.size + child.children = append(child.children, c) + child.size += c.size + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + child.size = child.computeSize() + n.cow.freeNode(mergeChild) + } + return n.remove(key, minItems, typ, less) +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + less lessFunc + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership. A tree with a cow +// context equivalent to a node's cow context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct{ byte } // non-empty, because empty structs may have same addr + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() *BTree { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +var nodePool = sync.Pool{New: func() interface{} { return new(node) }} + +func (c *copyOnWriteContext) newNode() *node { + n := nodePool.Get().(*node) + n.cow = c + return n +} + +func (c *copyOnWriteContext) freeNode(n *node) { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + nodePool.Put(n) + } +} + +// Set sets the given key to the given value in the tree. If the key is present in +// the tree, its value is changed and the old value is returned along with a second +// return value of true. If the key is not in the tree, it is added, and the second +// return value is false. +func (t *BTree) Set(k Key, v Value) (old Value, present bool) { + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item{k, v}) + t.root.size = 1 + return old, false + } + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + sz := t.root.size + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + t.root.size = sz + } + + return t.root.insert(item{k, v}, t.maxItems(), t.less) +} + +// Delete removes the item with the given key, returning its value. The second return value +// reports whether the key was found. +func (t *BTree) Delete(k Key) (Value, bool) { + m, removed := t.deleteItem(k, removeItem) + return m.value, removed +} + +// DeleteMin removes the smallest item in the tree and returns its key and value. +// If the tree is empty, it returns zero values. +func (t *BTree) DeleteMin() (Key, Value) { + item, _ := t.deleteItem(nil, removeMin) + return item.key, item.value +} + +// DeleteMax removes the largest item in the tree and returns its key and value. +// If the tree is empty, it returns zero values. +func (t *BTree) DeleteMax() (Key, Value) { + item, _ := t.deleteItem(nil, removeMax) + return item.key, item.value +} + +func (t *BTree) deleteItem(key Key, typ toRemove) (item, bool) { + if t.root == nil || len(t.root.items) == 0 { + return item{}, false + } + t.root = t.root.mutableFor(t.cow) + out, removed := t.root.remove(key, t.minItems(), typ, t.less) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + return out, removed +} + +// Get returns the value for the given key in the tree, or the zero value if the +// key is not in the tree. +// +// To distinguish a zero value from a key that is not present, use GetWithIndex. +func (t *BTree) Get(k Key) Value { + var z Value + if t.root == nil { + return z + } + item, ok, _ := t.root.get(k, false, t.less) + if !ok { + return z + } + return item.value +} + +// GetWithIndex returns the value and index for the given key in the tree, or the +// zero value and -1 if the key is not in the tree. +func (t *BTree) GetWithIndex(k Key) (Value, int) { + var z Value + if t.root == nil { + return z, -1 + } + item, _, index := t.root.get(k, true, t.less) + return item.value, index +} + +// At returns the key and value at index i. The minimum item has index 0. +// If i is outside the range [0, t.Len()), At panics. +func (t *BTree) At(i int) (Key, Value) { + if i < 0 || i >= t.Len() { + panic("btree: index out of range") + } + item := t.root.at(i) + return item.key, item.value +} + +// Has reports whether the given key is in the tree. +func (t *BTree) Has(k Key) bool { + if t.root == nil { + return false + } + _, ok, _ := t.root.get(k, false, t.less) + return ok +} + +// Min returns the smallest key in the tree and its value. If the tree is empty, it +// returns zero values. +func (t *BTree) Min() (Key, Value) { + var k Key + var v Value + if t.root == nil { + return k, v + } + n := t.root + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return k, v + } + return n.items[0].key, n.items[0].value +} + +// Max returns the largest key in the tree and its value. If the tree is empty, both +// return values are zero values. +func (t *BTree) Max() (Key, Value) { + var k Key + var v Value + if t.root == nil { + return k, v + } + n := t.root + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return k, v + } + m := n.items[len(n.items)-1] + return m.key, m.value +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + if t.root == nil { + return 0 + } + return t.root.size +} + +// Before returns an iterator positioned just before k. After the first call to Next, +// the Iterator will be at k, or at the key just greater than k if k is not in the tree. +// Subsequent calls to Next will traverse the tree's items in ascending order. +func (t *BTree) Before(k Key) *Iterator { + if t.root == nil { + return &Iterator{} + } + var cs cursorStack + cs, found, idx := t.root.cursorStackForKey(k, cs, t.less) + // If we found the key, the cursor stack is pointing to it. Since that is + // the first element we want, don't advance the iterator on the initial call to Next. + // If we haven't found the key, then the top of the cursor stack is either pointing at the + // item just after k, in which case we do not want to move the iterator; or the index + // is past the end of the items slice, in which case we do. + var stay bool + top := cs[len(cs)-1] + if found { + stay = true + } else if top.index < len(top.node.items) { + stay = true + } else { + idx-- + } + return &Iterator{ + cursors: cs, + stay: stay, + descending: false, + Index: idx, + } +} + +// After returns an iterator positioned just after k. After the first call to Next, +// the Iterator will be at k, or at the key just less than k if k is not in the tree. +// Subsequent calls to Next will traverse the tree's items in descending order. +func (t *BTree) After(k Key) *Iterator { + if t.root == nil { + return &Iterator{} + } + var cs cursorStack + cs, found, idx := t.root.cursorStackForKey(k, cs, t.less) + // If we found the key, the cursor stack is pointing to it. Since that is + // the first element we want, don't advance the iterator on the initial call to Next. + // If we haven't found the key, the the cursor stack is pointing just after the first item, + // so we do want to advance. + return &Iterator{ + cursors: cs, + stay: found, + descending: true, + Index: idx, + } +} + +// BeforeIndex returns an iterator positioned just before the item with the given index. +// The iterator will traverse the tree's items in ascending order. +// If i is not in the range [0, tr.Len()], BeforeIndex panics. +// Note that it is not an error to provide an index of tr.Len(). +func (t *BTree) BeforeIndex(i int) *Iterator { + return t.indexIterator(i, false) +} + +// AfterIndex returns an iterator positioned just after the item with the given index. +// The iterator will traverse the tree's items in descending order. +// If i is not in the range [0, tr.Len()], AfterIndex panics. +// Note that it is not an error to provide an index of tr.Len(). +func (t *BTree) AfterIndex(i int) *Iterator { + return t.indexIterator(i, true) +} + +func (t *BTree) indexIterator(i int, descending bool) *Iterator { + if i < 0 || i > t.Len() { + panic("btree: index out of range") + } + if i == t.Len() { + return &Iterator{} + } + var cs cursorStack + return &Iterator{ + cursors: t.root.cursorStackForIndex(i, cs), + stay: true, + descending: descending, + Index: i, + } +} + +// An Iterator supports traversing the items in the tree. +type Iterator struct { + Key Key + Value Value + // Index is the position of the item in the tree viewed as a sequence. + // The minimum item has index zero. + Index int + + cursors cursorStack // stack of nodes with indices; last element is the top + stay bool // don't do anything on the first call to Next. + descending bool // traverse the items in descending order +} + +// Next advances the Iterator to the next item in the tree. If Next returns true, +// the Iterator's Key, Value and Index fields refer to the next item. If Next returns +// false, there are no more items and the values of Key, Value and Index are undefined. +// +// If the tree is modified during iteration, the behavior is undefined. +func (it *Iterator) Next() bool { + var more bool + switch { + case len(it.cursors) == 0: + more = false + case it.stay: + it.stay = false + more = true + case it.descending: + more = it.dec() + default: + more = it.inc() + } + if !more { + return false + } + top := it.cursors[len(it.cursors)-1] + item := top.node.items[top.index] + it.Key = item.key + it.Value = item.value + return true +} + +// When inc returns true, the top cursor on the stack refers to the new current item. +func (it *Iterator) inc() bool { + // Useful invariants for understanding this function: + // - Leaf nodes have zero children, and zero or more items. + // - Nonleaf nodes have one more child than item, and children[i] < items[i] < children[i+1]. + // - The current item in the iterator is top.node.items[top.index]. + + it.Index++ + // If we are at a non-leaf node, the current item is items[i], so + // now we want to continue with children[i+1], which must exist + // by the node invariant. We want the minimum item in that child's subtree. + top := it.cursors.incTop(1) + for len(top.node.children) > 0 { + top = cursor{top.node.children[top.index], 0} + it.cursors.push(top) + } + // Here, we are at a leaf node. top.index points to + // the new current item, if it's within the items slice. + for top.index >= len(top.node.items) { + // We've gone through everything in this node. Pop it off the stack. + it.cursors.pop() + // If the stack is now empty,we're past the last item in the tree. + if it.cursors.empty() { + return false + } + top = it.cursors.top() + // The new top's index points to a child, which we've just finished + // exploring. The next item is the one at the same index in the items slice. + } + // Here, the top cursor on the stack points to the new current item. + return true +} + +func (it *Iterator) dec() bool { + // See the invariants for inc, above. + it.Index-- + top := it.cursors.top() + // If we are at a non-leaf node, the current item is items[i], so + // now we want to continue with children[i]. We want the maximum item in that child's subtree. + for len(top.node.children) > 0 { + c := top.node.children[top.index] + top = cursor{c, len(c.items)} + it.cursors.push(top) + } + top = it.cursors.incTop(-1) + // Here, we are at a leaf node. top.index points to + // the new current item, if it's within the items slice. + for top.index < 0 { + // We've gone through everything in this node. Pop it off the stack. + it.cursors.pop() + // If the stack is now empty,we're past the last item in the tree. + if it.cursors.empty() { + return false + } + // The new top's index points to a child, which we've just finished + // exploring. That child is to the right of the item we want to advance to, + // so decrement the index. + top = it.cursors.incTop(-1) + } + return true +} + +// A cursor is effectively a pointer into a node. A stack of cursors identifies an item in the tree, +// and makes it possible to move to the next or previous item efficiently. +// +// If the cursor is on the top of the stack, its index points into the node's items slice, selecting +// the current item. Otherwise, the index points into the children slice and identifies the child +// that is next in the stack. +type cursor struct { + node *node + index int +} + +// A cursorStack is a stack of cursors, representing a path of nodes from the root of the tree. +type cursorStack []cursor + +func (s *cursorStack) push(c cursor) cursorStack { + *s = append(*s, c) + return *s +} + +func (s *cursorStack) pop() cursor { + last := len(*s) - 1 + t := (*s)[last] + *s = (*s)[:last] + return t +} + +func (s *cursorStack) top() cursor { + return (*s)[len(*s)-1] +} + +func (s *cursorStack) empty() bool { + return len(*s) == 0 +} + +// incTop increments top's index by n and returns it. +func (s *cursorStack) incTop(n int) cursor { + (*s)[len(*s)-1].index += n // Don't call top: modify the original, not a copy. + return s.top() +} diff --git a/vendor/cloud.google.com/go/internal/btree/btree_test.go b/vendor/cloud.google.com/go/internal/btree/btree_test.go new file mode 100644 index 000000000..0381edc9c --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/btree_test.go @@ -0,0 +1,396 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package btree + +import ( + "flag" + "fmt" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +func init() { + seed := time.Now().Unix() + fmt.Println(seed) + rand.Seed(seed) +} + +type itemWithIndex struct { + Key Key + Value Value + Index int +} + +// perm returns a random permutation of n Int items in the range [0, n). +func perm(n int) []itemWithIndex { + var out []itemWithIndex + for _, v := range rand.Perm(n) { + out = append(out, itemWithIndex{v, v, v}) + } + return out +} + +// rang returns an ordered list of Int items in the range [0, n). +func rang(n int) []itemWithIndex { + var out []itemWithIndex + for i := 0; i < n; i++ { + out = append(out, itemWithIndex{i, i, i}) + } + return out +} + +// all extracts all items from an iterator. +func all(it *Iterator) []itemWithIndex { + var out []itemWithIndex + for it.Next() { + out = append(out, itemWithIndex{it.Key, it.Value, it.Index}) + } + return out +} + +// rangerev returns a reversed ordered list of Int items in the range [0, n). +func rangrev(n int) []itemWithIndex { + var out []itemWithIndex + for i := n - 1; i >= 0; i-- { + out = append(out, itemWithIndex{i, i, i}) + } + return out +} + +func reverse(s []itemWithIndex) { + for i := 0; i < len(s)/2; i++ { + s[i], s[len(s)-i-1] = s[len(s)-i-1], s[i] + } +} + +var btreeDegree = flag.Int("degree", 32, "B-Tree degree") + +func TestBTree(t *testing.T) { + tr := New(*btreeDegree, less) + const treeSize = 10000 + for i := 0; i < 10; i++ { + if min, _ := tr.Min(); min != nil { + t.Fatalf("empty min, got %+v", min) + } + if max, _ := tr.Max(); max != nil { + t.Fatalf("empty max, got %+v", max) + } + for _, m := range perm(treeSize) { + if _, ok := tr.Set(m.Key, m.Value); ok { + t.Fatal("set found item", m) + } + } + for _, m := range perm(treeSize) { + if _, ok := tr.Set(m.Key, m.Value); !ok { + t.Fatal("set didn't find item", m) + } + } + mink, minv := tr.Min() + if want := 0; mink != want || minv != want { + t.Fatalf("min: want %+v, got %+v, %+v", want, mink, minv) + } + maxk, maxv := tr.Max() + if want := treeSize - 1; maxk != want || maxv != want { + t.Fatalf("max: want %+v, got %+v, %+v", want, maxk, maxv) + } + got := all(tr.BeforeIndex(0)) + want := rang(treeSize) + if !cmp.Equal(got, want) { + t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) + } + + for _, m := range perm(treeSize) { + if _, removed := tr.Delete(m.Key); !removed { + t.Fatalf("didn't find %v", m) + } + } + if got = all(tr.BeforeIndex(0)); len(got) > 0 { + t.Fatalf("some left!: %v", got) + } + } +} + +func TestAt(t *testing.T) { + tr := New(*btreeDegree, less) + for _, m := range perm(100) { + tr.Set(m.Key, m.Value) + } + for i := 0; i < tr.Len(); i++ { + gotk, gotv := tr.At(i) + if want := i; gotk != want || gotv != want { + t.Fatalf("At(%d) = (%v, %v), want (%v, %v)", i, gotk, gotv, want, want) + } + } +} + +func TestGetWithIndex(t *testing.T) { + tr := New(*btreeDegree, less) + for _, m := range perm(100) { + tr.Set(m.Key, m.Value) + } + for i := 0; i < tr.Len(); i++ { + gotv, goti := tr.GetWithIndex(i) + wantv, wanti := i, i + if gotv != wantv || goti != wanti { + t.Errorf("GetWithIndex(%d) = (%v, %v), want (%v, %v)", + i, gotv, goti, wantv, wanti) + } + } + _, got := tr.GetWithIndex(100) + if want := -1; got != want { + t.Errorf("got %d, want %d", got, want) + } +} + +func TestDeleteMin(t *testing.T) { + tr := New(3, less) + for _, m := range perm(100) { + tr.Set(m.Key, m.Value) + } + var got []itemWithIndex + for i := 0; tr.Len() > 0; i++ { + k, v := tr.DeleteMin() + got = append(got, itemWithIndex{k, v, i}) + } + if want := rang(100); !cmp.Equal(got, want) { + t.Fatalf("got: %v\nwant: %v", got, want) + } +} + +func TestDeleteMax(t *testing.T) { + tr := New(3, less) + for _, m := range perm(100) { + tr.Set(m.Key, m.Value) + } + var got []itemWithIndex + for tr.Len() > 0 { + k, v := tr.DeleteMax() + got = append(got, itemWithIndex{k, v, tr.Len()}) + } + reverse(got) + if want := rang(100); !cmp.Equal(got, want) { + t.Fatalf("got: %v\nwant: %v", got, want) + } +} + +func TestIterator(t *testing.T) { + const size = 10 + + tr := New(2, less) + // Empty tree. + for i, it := range []*Iterator{ + tr.BeforeIndex(0), + tr.Before(3), + tr.After(3), + } { + if got, want := it.Next(), false; got != want { + t.Errorf("empty, #%d: got %t, want %t", i, got, want) + } + } + + // Root with zero children. + tr.Set(1, nil) + tr.Delete(1) + if !(tr.root != nil && len(tr.root.children) == 0 && len(tr.root.items) == 0) { + t.Fatal("wrong shape tree") + } + for i, it := range []*Iterator{ + tr.BeforeIndex(0), + tr.Before(3), + tr.After(3), + } { + if got, want := it.Next(), false; got != want { + t.Errorf("zero root, #%d: got %t, want %t", i, got, want) + } + } + + // Tree with size elements. + p := perm(size) + for _, v := range p { + tr.Set(v.Key, v.Value) + } + + it := tr.BeforeIndex(0) + got := all(it) + want := rang(size) + if !cmp.Equal(got, want) { + t.Fatalf("got %+v\nwant %+v\n", got, want) + } + + for i, w := range want { + it := tr.Before(w.Key) + got = all(it) + wn := want[w.Key.(int):] + if !cmp.Equal(got, wn) { + t.Fatalf("got %+v\nwant %+v\n", got, wn) + } + + it = tr.BeforeIndex(i) + got = all(it) + if !cmp.Equal(got, wn) { + t.Fatalf("got %+v\nwant %+v\n", got, wn) + } + + it = tr.After(w.Key) + got = all(it) + wn = append([]itemWithIndex(nil), want[:w.Key.(int)+1]...) + reverse(wn) + if !cmp.Equal(got, wn) { + t.Fatalf("got %+v\nwant %+v\n", got, wn) + } + + it = tr.AfterIndex(i) + got = all(it) + if !cmp.Equal(got, wn) { + t.Fatalf("got %+v\nwant %+v\n", got, wn) + } + } + + // Non-existent keys. + tr = New(2, less) + for _, v := range p { + tr.Set(v.Key.(int)*2, v.Value) + } + // tr has only even keys: 0, 2, 4, ... Iterate from odd keys. + for i := -1; i <= size+1; i += 2 { + it := tr.Before(i) + got := all(it) + var want []itemWithIndex + for j := (i + 1) / 2; j < size; j++ { + want = append(want, itemWithIndex{j * 2, j, j}) + } + if !cmp.Equal(got, want) { + tr.print(os.Stdout) + t.Fatalf("%d: got %+v\nwant %+v\n", i, got, want) + } + + it = tr.After(i) + got = all(it) + want = nil + for j := (i - 1) / 2; j >= 0; j-- { + want = append(want, itemWithIndex{j * 2, j, j}) + } + if !cmp.Equal(got, want) { + t.Fatalf("%d: got %+v\nwant %+v\n", i, got, want) + } + } +} + +func TestMixed(t *testing.T) { + // Test random, mixed insertions and deletions. + const maxSize = 1000 + tr := New(3, less) + has := map[int]bool{} + for i := 0; i < 10000; i++ { + r := rand.Intn(maxSize) + if r >= tr.Len() { + old, ok := tr.Set(r, r) + if has[r] != ok { + t.Fatalf("%d: has=%t, ok=%t", r, has[r], ok) + } + if ok && old.(int) != r { + t.Fatalf("%d: bad old", r) + } + has[r] = true + if got, want := tr.Get(r), r; got != want { + t.Fatalf("Get(%d) = %d, want %d", r, got, want) + } + } else { + // Expoit random map iteration order. + var d int + for d = range has { + break + } + old, removed := tr.Delete(d) + if !removed { + t.Fatalf("%d not found", d) + } + if old.(int) != d { + t.Fatalf("%d: bad old", d) + } + delete(has, d) + } + } +} + +const cloneTestSize = 10000 + +func cloneTest(t *testing.T, b *BTree, start int, p []itemWithIndex, wg *sync.WaitGroup, treec chan<- *BTree) { + treec <- b + for i := start; i < cloneTestSize; i++ { + b.Set(p[i].Key, p[i].Value) + if i%(cloneTestSize/5) == 0 { + wg.Add(1) + go cloneTest(t, b.Clone(), i+1, p, wg, treec) + } + } + wg.Done() +} + +func TestCloneConcurrentOperations(t *testing.T) { + b := New(*btreeDegree, less) + treec := make(chan *BTree) + p := perm(cloneTestSize) + var wg sync.WaitGroup + wg.Add(1) + go cloneTest(t, b, 0, p, &wg, treec) + var trees []*BTree + donec := make(chan struct{}) + go func() { + for t := range treec { + trees = append(trees, t) + } + close(donec) + }() + wg.Wait() + close(treec) + <-donec + want := rang(cloneTestSize) + for i, tree := range trees { + if !cmp.Equal(want, all(tree.BeforeIndex(0))) { + t.Errorf("tree %v mismatch", i) + } + } + toRemove := rang(cloneTestSize)[cloneTestSize/2:] + for i := 0; i < len(trees)/2; i++ { + tree := trees[i] + wg.Add(1) + go func() { + for _, m := range toRemove { + tree.Delete(m.Key) + } + wg.Done() + }() + } + wg.Wait() + for i, tree := range trees { + var wantpart []itemWithIndex + if i < len(trees)/2 { + wantpart = want[:cloneTestSize/2] + } else { + wantpart = want + } + if got := all(tree.BeforeIndex(0)); !cmp.Equal(wantpart, got) { + t.Errorf("tree %v mismatch, want %v got %v", i, len(want), len(got)) + } + } +} + +func less(a, b interface{}) bool { return a.(int) < b.(int) } diff --git a/vendor/cloud.google.com/go/internal/btree/debug.go b/vendor/cloud.google.com/go/internal/btree/debug.go new file mode 100644 index 000000000..b983cd046 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/btree/debug.go @@ -0,0 +1,37 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package btree + +import ( + "fmt" + "io" + "strings" +) + +func (t *BTree) print(w io.Writer) { + t.root.print(w, 0) +} + +func (n *node) print(w io.Writer, level int) { + indent := strings.Repeat(" ", level) + if n == nil { + fmt.Fprintf(w, "%s\n", indent) + return + } + fmt.Fprintf(w, "%s%v\n", indent, n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server.go b/vendor/cloud.google.com/go/internal/testutil/server.go index 508cef7cf..66300fa14 100644 --- a/vendor/cloud.google.com/go/internal/testutil/server.go +++ b/vendor/cloud.google.com/go/internal/testutil/server.go @@ -20,8 +20,9 @@ import ( "net" "strconv" - grpc "google.golang.org/grpc" + "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // A Server is an in-process gRPC server, listening on a system-chosen port on @@ -90,7 +91,7 @@ func PageBounds(pageSize int, pageToken string, length int) (from, to int, nextP if pageToken != "" { from, err = strconv.Atoi(pageToken) if err != nil { - return 0, 0, "", grpc.Errorf(codes.InvalidArgument, "bad page token: %v", err) + return 0, 0, "", status.Errorf(codes.InvalidArgument, "bad page token: %v", err) } if from >= length { return length, length, "", nil diff --git a/vendor/cloud.google.com/go/internal/trace/go18.go b/vendor/cloud.google.com/go/internal/trace/go18.go new file mode 100644 index 000000000..1da412604 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/go18.go @@ -0,0 +1,83 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package trace + +import ( + "go.opencensus.io/trace" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc/status" +) + +func StartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name) + return ctx +} + +func EndSpan(ctx context.Context, err error) { + span := trace.FromContext(ctx) + if err != nil { + span.SetStatus(toStatus(err)) + } + span.End() +} + +// ToStatus interrogates an error and converts it to an appropriate +// OpenCensus status. +func toStatus(err error) trace.Status { + if err2, ok := err.(*googleapi.Error); ok { + return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} + } else if s, ok := status.FromError(err); ok { + return trace.Status{Code: int32(s.Code()), Message: s.Message()} + } else { + return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} + } +} + +// TODO (deklerk): switch to using OpenCensus function when it becomes available. +// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto +func httpStatusCodeToOCCode(httpStatusCode int) int32 { + switch httpStatusCode { + case 200: + return int32(code.Code_OK) + case 499: + return int32(code.Code_CANCELLED) + case 500: + return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS + case 400: + return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE + case 504: + return int32(code.Code_DEADLINE_EXCEEDED) + case 404: + return int32(code.Code_NOT_FOUND) + case 409: + return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED + case 403: + return int32(code.Code_PERMISSION_DENIED) + case 401: + return int32(code.Code_UNAUTHENTICATED) + case 429: + return int32(code.Code_RESOURCE_EXHAUSTED) + case 501: + return int32(code.Code_UNIMPLEMENTED) + case 503: + return int32(code.Code_UNAVAILABLE) + default: + return int32(code.Code_UNKNOWN) + } +} diff --git a/vendor/cloud.google.com/go/internal/trace/go18_test.go b/vendor/cloud.google.com/go/internal/trace/go18_test.go new file mode 100644 index 000000000..3f9f03037 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/go18_test.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package trace + +import ( + "errors" + "net/http" + "testing" + + "cloud.google.com/go/internal/testutil" + octrace "go.opencensus.io/trace" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestToStatus(t *testing.T) { + for _, testcase := range []struct { + input error + want octrace.Status + }{ + { + errors.New("some random error"), + octrace.Status{Code: int32(code.Code_UNKNOWN), Message: "some random error"}, + }, + { + &googleapi.Error{Code: http.StatusConflict, Message: "some specific googleapi http error"}, + octrace.Status{Code: int32(code.Code_ALREADY_EXISTS), Message: "some specific googleapi http error"}, + }, + { + status.Error(codes.DataLoss, "some specific grpc error"), + octrace.Status{Code: int32(code.Code_DATA_LOSS), Message: "some specific grpc error"}, + }, + } { + got := toStatus(testcase.input) + if r := testutil.Diff(got, testcase.want); r != "" { + t.Errorf("got -, want +:\n%s", r) + } + } +} diff --git a/vendor/cloud.google.com/go/internal/trace/not_go18.go b/vendor/cloud.google.com/go/internal/trace/not_go18.go new file mode 100644 index 000000000..50c1657ee --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/not_go18.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package trace + +import ( + "golang.org/x/net/context" +) + +// OpenCensus only supports go 1.8 and higher. + +func StartSpan(ctx context.Context, _ string) context.Context { + return ctx +} + +func EndSpan(context.Context, error) { +} diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index 1d2ad51bb..f5c23a564 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20180118" +const Repo = "20180226" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/cloud.google.com/go/issue_template.md b/vendor/cloud.google.com/go/issue_template.md new file mode 100644 index 000000000..e2ccef3e7 --- /dev/null +++ b/vendor/cloud.google.com/go/issue_template.md @@ -0,0 +1,17 @@ +(delete this for feature requests) + +## Client + +e.g. PubSub + +## Describe Your Environment + +e.g. Alpine Docker on GKE + +## Expected Behavior + +e.g. Messages arrive really fast. + +## Actual Behavior + +e.g. Messages arrive really slowly. \ No newline at end of file diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go index 32ca717f4..d5f425e55 100644 --- a/vendor/cloud.google.com/go/logging/doc.go +++ b/vendor/cloud.google.com/go/logging/doc.go @@ -38,7 +38,7 @@ Use a Client to interact with the Stackdriver Logging API. Basic Usage -For most use-cases, you'll want to add log entries to a buffer to be periodically +For most use cases, you'll want to add log entries to a buffer to be periodically flushed (automatically and asynchronously) to the Stackdriver Logging service. // Initialize a logger @@ -63,11 +63,28 @@ Synchronous Logging For critical errors, you may want to send your log entries immediately. LogSync is slow and will block until the log entry has been sent, so it is -not recommended for basic use. +not recommended for normal use. lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"}) +Payloads + +An entry payload can be a string, as in the examples above. It can also be any value +that can be marshaled to a JSON object, like a map[string]interface{} or a struct: + + type MyEntry struct { + Name string + Count int + } + lg.Log(logging.Entry{Payload: MyEntry{Name: "Bob", Count: 3}}) + +If you have a []byte of JSON, wrap it in json.RawMessage: + + j := []byte(`{"Name": "Bob", "Count": 3}`) + lg.Log(logging.Entry{Payload: json.RawMessage(j)}) + + The Standard Logger Interface You may want use a standard log.Logger in your program. @@ -86,5 +103,15 @@ An Entry may have one of a number of severity levels associated with it. Severity: logging.Critical, } + +Viewing Logs + +You can view Stackdriver logs for projects at +https://console.cloud.google.com/logs/viewer. Use the dropdown at the top left. When +running from a Google Cloud Platform VM, select "GCE VM Instance". Otherwise, select +"Google Project" and then the project ID. Logs for organizations, folders and billing +accounts can be viewed on the command line with the "gcloud logging read" command. + + */ package logging // import "cloud.google.com/go/logging" diff --git a/vendor/cloud.google.com/go/logging/examples_test.go b/vendor/cloud.google.com/go/logging/examples_test.go index 10ceee92a..dcee76db1 100644 --- a/vendor/cloud.google.com/go/logging/examples_test.go +++ b/vendor/cloud.google.com/go/logging/examples_test.go @@ -15,6 +15,7 @@ package logging_test import ( + "encoding/json" "fmt" "os" @@ -107,6 +108,35 @@ func ExampleLogger_Log() { lg.Log(logging.Entry{Payload: "something happened"}) } +// An Entry payload can be anything that marshals to a +// JSON object, like a struct. +func ExampleLogger_Log_struct() { + type MyEntry struct { + Name string + Count int + } + + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: MyEntry{Name: "Bob", Count: 3}}) +} + +// To log a JSON value, wrap it in json.RawMessage. +func ExampleLogger_Log_json() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + j := []byte(`{"Name": "Bob", "Count": 3}`) + lg.Log(logging.Entry{Payload: json.RawMessage(j)}) +} + func ExampleLogger_Flush() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go index 7b33c971d..b341f61f7 100644 --- a/vendor/cloud.google.com/go/logging/logging.go +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -79,6 +79,12 @@ const ( // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption. DefaultBufferedByteLimit = 1 << 30 // 1GiB + + // defaultWriteTimeout is the timeout for the underlying write API calls. As + // write API calls are not idempotent, they are not retried on timeout. This + // timeout is to allow clients to degrade gracefully if underlying logging + // service is temporarily impaired for some reason. + defaultWriteTimeout = 10 * time.Minute ) // For testing: @@ -228,6 +234,7 @@ type Logger struct { // Options commonResource *mrpb.MonitoredResource commonLabels map[string]string + writeTimeout time.Duration } // A LoggerOption is a configuration option for a Logger. @@ -321,6 +328,15 @@ type commonLabels map[string]string func (c commonLabels) set(l *Logger) { l.commonLabels = c } +// ConcurrentWriteLimit determines how many goroutines will send log entries to the +// underlying service. The default is 1. Set ConcurrentWriteLimit to a higher value to +// increase throughput. +func ConcurrentWriteLimit(n int) LoggerOption { return concurrentWriteLimit(n) } + +type concurrentWriteLimit int + +func (c concurrentWriteLimit) set(l *Logger) { l.bundler.HandlerLimit = int(c) } + // DelayThreshold is the maximum amount of time that an entry should remain // buffered in memory before a call to the logging service is triggered. Larger // values of DelayThreshold will generally result in fewer calls to the logging @@ -397,10 +413,8 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { logName: internal.LogPath(c.parent, logID), commonResource: r, } - // TODO(jba): determine the right context for the bundle handler. - ctx := context.TODO() l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { - l.writeLogEntries(ctx, entries.([]*logpb.LogEntry)) + l.writeLogEntries(entries.([]*logpb.LogEntry)) }) l.bundler.DelayThreshold = DefaultDelayThreshold l.bundler.BundleCountThreshold = DefaultEntryCountThreshold @@ -409,12 +423,14 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { for _, opt := range opts { opt.set(l) } - l.stdLoggers = map[Severity]*log.Logger{} for s := range severityName { l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0) } + c.loggers.Add(1) + // Start a goroutine that cleans up the bundler, its channel + // and the writer goroutines when the client is closed. go func() { defer c.loggers.Done() <-c.donec @@ -445,7 +461,7 @@ func (c *Client) Close() error { c.loggers.Wait() // wait for all bundlers to flush and close // Now there can be no more errors. close(c.errc) // terminate error goroutine - // Prefer logging errors to close errors. + // Prefer errors arising from logging to the error returned from Close. err := c.extractErrorInfo() err2 := c.client.Close() if err == nil { @@ -525,9 +541,8 @@ type Entry struct { // The zero value is Default. Severity Severity - // Payload must be either a string or something that - // marshals via the encoding/json package to a JSON object - // (and not any other type of JSON value). + // Payload must be either a string, or something that marshals via the + // encoding/json package to a JSON object (and not any other type of JSON value). Payload interface{} // Labels optionally specifies key/value labels for the log entry. @@ -556,9 +571,7 @@ type Entry struct { // reading entries. It is an error to set it when writing entries. LogName string - // Resource is the monitored resource associated with the entry. It is set - // by the client when reading entries. It is an error to set it when - // writing entries. + // Resource is the monitored resource associated with the entry. Resource *mrpb.MonitoredResource // Trace is the resource name of the trace associated with the log entry, @@ -642,13 +655,19 @@ func toProtoStruct(v interface{}) (*structpb.Struct, error) { if s, ok := v.(*structpb.Struct); ok { return s, nil } - // v is a Go struct that supports JSON marshalling. We want a Struct + // v is a Go value that supports JSON marshalling. We want a Struct // protobuf. Some day we may have a more direct way to get there, but right - // now the only way is to marshal the Go struct to JSON, unmarshal into a + // now the only way is to marshal the Go value to JSON, unmarshal into a // map, and then build the Struct proto from the map. - jb, err := json.Marshal(v) - if err != nil { - return nil, fmt.Errorf("logging: json.Marshal: %v", err) + var jb []byte + var err error + if raw, ok := v.(json.RawMessage); ok { // needed for Go 1.7 and below + jb = []byte(raw) + } else { + jb, err = json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("logging: json.Marshal: %v", err) + } } var m map[string]interface{} err = json.Unmarshal(jb, &m) @@ -730,13 +749,15 @@ func (l *Logger) Flush() error { return l.client.extractErrorInfo() } -func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) { +func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) { req := &logpb.WriteLogEntriesRequest{ LogName: l.logName, Resource: l.commonResource, Labels: l.commonLabels, Entries: entries, } + ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout) + defer cancel() _, err := l.client.client.WriteLogEntries(ctx, req) if err != nil { l.client.error(err) @@ -777,8 +798,8 @@ func toLogEntry(e Entry) (*logpb.LogEntry, error) { Operation: e.Operation, Labels: e.Labels, Trace: e.Trace, + Resource: e.Resource, } - switch p := e.Payload.(type) { case string: ent.Payload = &logpb.LogEntry_TextPayload{TextPayload: p} diff --git a/vendor/cloud.google.com/go/logging/logging_test.go b/vendor/cloud.google.com/go/logging/logging_test.go index af6ab2921..66964a28b 100644 --- a/vendor/cloud.google.com/go/logging/logging_test.go +++ b/vendor/cloud.google.com/go/logging/logging_test.go @@ -20,8 +20,10 @@ import ( "flag" "fmt" "log" + "math/rand" "os" "strings" + "sync" "testing" "time" @@ -139,7 +141,6 @@ func TestMain(m *testing.M) { client.OnError = func(e error) { errorc <- e } exit := m.Run() - client.Close() os.Exit(exit) } @@ -424,7 +425,9 @@ func TestPing(t *testing.T) { t.Errorf("project %s, #2: got %v, expected nil", testProjectID, err) } // nonexistent project - c, _ := newClients(ctx, testProjectID+"-BAD") + c, a := newClients(ctx, testProjectID+"-BAD") + defer c.Close() + defer a.Close() if err := c.Ping(ctx); err == nil { t.Errorf("nonexistent project: want error pinging logging api, got nil") } @@ -469,9 +472,14 @@ func TestLogsAndDelete(t *testing.T) { } if strings.HasPrefix(logID, testLogIDPrefix) { if err := aclient.DeleteLog(ctx, logID); err != nil { - t.Fatalf("deleting %q: %v", logID, err) + // Ignore NotFound. Sometimes, amazingly, DeleteLog cannot find + // a log that is returned by Logs. + if status.Code(err) != codes.NotFound { + t.Fatalf("deleting %q: %v", logID, err) + } + } else { + nDeleted++ } - nDeleted++ } } t.Logf("deleted %d logs", nDeleted) @@ -483,6 +491,8 @@ func TestNonProjectParent(t *testing.T) { const orgID = "433637338589" // org ID for google.com parent := "organizations/" + orgID c, a := newClients(ctx, parent) + defer c.Close() + defer a.Close() lg := c.Logger(testLogID) err := lg.LogSync(ctx, logging.Entry{Payload: "hello"}) if integrationTest { @@ -534,3 +544,87 @@ func waitFor(f func() bool) bool { func() (bool, error) { return f(), nil }) return err == nil } + +// Interleave a lot of Log and Flush calls, to induce race conditions. +// Run this test with: +// go test -run LogFlushRace -race -count 100 +func TestLogFlushRace(t *testing.T) { + initLogs(ctx) // Generate new testLogID + lg := client.Logger(testLogID, + logging.ConcurrentWriteLimit(5), // up to 5 concurrent log writes + logging.EntryCountThreshold(100)) // small bundle size to increase interleaving + var wgf, wgl sync.WaitGroup + donec := make(chan struct{}) + for i := 0; i < 10; i++ { + wgl.Add(1) + go func() { + defer wgl.Done() + for j := 0; j < 1e4; j++ { + lg.Log(logging.Entry{Payload: "the payload"}) + } + }() + } + for i := 0; i < 5; i++ { + wgf.Add(1) + go func() { + defer wgf.Done() + for { + select { + case <-donec: + return + case <-time.After(time.Duration(rand.Intn(5)) * time.Millisecond): + lg.Flush() + } + } + }() + } + wgl.Wait() + close(donec) + wgf.Wait() +} + +// Test the throughput of concurrent writers. +// TODO(jba): when 1.8 is out, use sub-benchmarks. +func BenchmarkConcurrentWrites1(b *testing.B) { + benchmarkConcurrentWrites(b, 1) +} + +func BenchmarkConcurrentWrites2(b *testing.B) { + benchmarkConcurrentWrites(b, 2) +} + +func BenchmarkConcurrentWrites4(b *testing.B) { + benchmarkConcurrentWrites(b, 4) +} + +func BenchmarkConcurrentWrites8(b *testing.B) { + benchmarkConcurrentWrites(b, 8) +} + +func BenchmarkConcurrentWrites16(b *testing.B) { + benchmarkConcurrentWrites(b, 16) +} + +func BenchmarkConcurrentWrites32(b *testing.B) { + benchmarkConcurrentWrites(b, 32) +} + +func benchmarkConcurrentWrites(b *testing.B, c int) { + if !integrationTest { + b.Skip("only makes sense when running against production service") + } + b.StopTimer() + lg := client.Logger(testLogID, logging.ConcurrentWriteLimit(c), logging.EntryCountThreshold(1000)) + const ( + nEntries = 1e5 + payload = "the quick brown fox jumps over the lazy dog" + ) + b.SetBytes(int64(nEntries * len(payload))) + b.StartTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < nEntries; j++ { + lg.Log(logging.Entry{Payload: payload}) + } + lg.Flush() + } +} diff --git a/vendor/cloud.google.com/go/logging/logging_unexported_test.go b/vendor/cloud.google.com/go/logging/logging_unexported_test.go index 5dacf1625..4fa42cb85 100644 --- a/vendor/cloud.google.com/go/logging/logging_unexported_test.go +++ b/vendor/cloud.google.com/go/logging/logging_unexported_test.go @@ -17,6 +17,7 @@ package logging import ( + "encoding/json" "net/http" "net/url" "testing" @@ -179,6 +180,53 @@ func TestToProtoStruct(t *testing.T) { } } +func TestToLogEntryPayload(t *testing.T) { + for _, test := range []struct { + in interface{} + wantText string + wantStruct *structpb.Struct + }{ + { + in: "string", + wantText: "string", + }, + { + in: map[string]interface{}{"a": 1, "b": true}, + wantStruct: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": {Kind: &structpb.Value_NumberValue{NumberValue: 1}}, + "b": {Kind: &structpb.Value_BoolValue{BoolValue: true}}, + }, + }, + }, + { + in: json.RawMessage([]byte(`{"a": 1, "b": true}`)), + wantStruct: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": {Kind: &structpb.Value_NumberValue{NumberValue: 1}}, + "b": {Kind: &structpb.Value_BoolValue{BoolValue: true}}, + }, + }, + }, + } { + e, err := toLogEntry(Entry{Payload: test.in}) + if err != nil { + t.Fatalf("%+v: %v", test.in, err) + } + if test.wantStruct != nil { + got := e.GetJsonPayload() + if !proto.Equal(got, test.wantStruct) { + t.Errorf("%+v: got %s, want %s", test.in, got, test.wantStruct) + } + } else { + got := e.GetTextPayload() + if got != test.wantText { + t.Errorf("%+v: got %s, want %s", test.in, got, test.wantText) + } + } + } +} + func TestFromHTTPRequest(t *testing.T) { const testURL = "http:://example.com/path?q=1" u, err := url.Parse(testURL) diff --git a/vendor/cloud.google.com/go/longrunning/longrunning.go b/vendor/cloud.google.com/go/longrunning/longrunning.go index 3054d2305..20d3b75ac 100644 --- a/vendor/cloud.google.com/go/longrunning/longrunning.go +++ b/vendor/cloud.google.com/go/longrunning/longrunning.go @@ -28,13 +28,13 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - gax "github.com/googleapis/gax-go" + "github.com/googleapis/gax-go" + "google.golang.org/grpc/status" "golang.org/x/net/context" autogen "cloud.google.com/go/longrunning/autogen" pb "google.golang.org/genproto/googleapis/longrunning" - "google.golang.org/grpc" "google.golang.org/grpc/codes" ) @@ -108,7 +108,7 @@ func (op *Operation) Poll(ctx context.Context, resp proto.Message, opts ...gax.C switch r := op.proto.Result.(type) { case *pb.Operation_Error: // TODO (pongad): r.Details may contain further information - return grpc.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message) + return status.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message) case *pb.Operation_Response: if resp == nil { return nil diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go new file mode 100644 index 000000000..7cb10a51f --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go @@ -0,0 +1,274 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient. +type AlertPolicyCallOptions struct { + ListAlertPolicies []gax.CallOption + GetAlertPolicy []gax.CallOption + CreateAlertPolicy []gax.CallOption + DeleteAlertPolicy []gax.CallOption + UpdateAlertPolicy []gax.CallOption +} + +func defaultAlertPolicyClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &AlertPolicyCallOptions{ + ListAlertPolicies: retry[[2]string{"default", "idempotent"}], + GetAlertPolicy: retry[[2]string{"default", "idempotent"}], + CreateAlertPolicy: retry[[2]string{"default", "non_idempotent"}], + DeleteAlertPolicy: retry[[2]string{"default", "idempotent"}], + UpdateAlertPolicy: retry[[2]string{"default", "non_idempotent"}], + } +} + +// AlertPolicyClient is a client for interacting with Stackdriver Monitoring API. +type AlertPolicyClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + alertPolicyClient monitoringpb.AlertPolicyServiceClient + + // The call options for this service. + CallOptions *AlertPolicyCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewAlertPolicyClient creates a new alert policy service client. +// +// The AlertPolicyService API is used to manage (list, create, delete, +// edit) alert policies in Stackdriver Monitoring. An alerting policy is +// a description of the conditions under which some aspect of your +// system is considered to be "unhealthy" and the ways to notify +// people or services about this state. In addition to using this API, alert +// policies can also be managed through +// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/), +// which can be reached by clicking the "Monitoring" tab in +// Cloud Console (at https://console.cloud.google.com/). +func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultAlertPolicyClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &AlertPolicyClient{ + conn: conn, + CallOptions: defaultAlertPolicyCallOptions(), + + alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *AlertPolicyClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *AlertPolicyClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListAlertPolicies lists the existing alerting policies for the project. +func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...) + it := &AlertPolicyIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) { + var resp *monitoringpb.ListAlertPoliciesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.AlertPolicies, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetAlertPolicy gets a single alerting policy. +func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateAlertPolicy creates a new alerting policy. +func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteAlertPolicy deletes an alerting policy. +func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with +// a new one or replace only certain fields in the current alerting policy by +// specifying the fields to be updated via updateMask. Returns the +// updated alerting policy. +func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...) + var resp *monitoringpb.AlertPolicy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy. +type AlertPolicyIterator struct { + items []*monitoringpb.AlertPolicy + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) { + var item *monitoringpb.AlertPolicy + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *AlertPolicyIterator) bufLen() int { + return len(it.items) +} + +func (it *AlertPolicyIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client_example_test.go new file mode 100644 index 000000000..4ddc33a78 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client_example_test.go @@ -0,0 +1,128 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewAlertPolicyClient() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleAlertPolicyClient_ListAlertPolicies() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListAlertPoliciesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListAlertPolicies(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleAlertPolicyClient_GetAlertPolicy() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetAlertPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetAlertPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleAlertPolicyClient_CreateAlertPolicy() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateAlertPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateAlertPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleAlertPolicyClient_DeleteAlertPolicy() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteAlertPolicyRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteAlertPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleAlertPolicyClient_UpdateAlertPolicy() { + ctx := context.Background() + c, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.UpdateAlertPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateAlertPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go index 66e152231..be38e7fcd 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go @@ -48,6 +48,81 @@ var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status +type mockAlertPolicyServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.AlertPolicyServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockAlertPolicyServer) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest) (*monitoringpb.ListAlertPoliciesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListAlertPoliciesResponse), nil +} + +func (s *mockAlertPolicyServer) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.AlertPolicy), nil +} + +func (s *mockAlertPolicyServer) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.AlertPolicy), nil +} + +func (s *mockAlertPolicyServer) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockAlertPolicyServer) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.AlertPolicy), nil +} + type mockGroupServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added @@ -246,6 +321,105 @@ func (s *mockMetricServer) CreateTimeSeries(ctx context.Context, req *monitoring return s.resps[0].(*emptypb.Empty), nil } +type mockNotificationChannelServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.NotificationChannelServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockNotificationChannelServer) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest) (*monitoringpb.ListNotificationChannelDescriptorsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListNotificationChannelDescriptorsResponse), nil +} + +func (s *mockNotificationChannelServer) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest) (*monitoringpb.NotificationChannelDescriptor, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.NotificationChannelDescriptor), nil +} + +func (s *mockNotificationChannelServer) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest) (*monitoringpb.ListNotificationChannelsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListNotificationChannelsResponse), nil +} + +func (s *mockNotificationChannelServer) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.NotificationChannel), nil +} + +func (s *mockNotificationChannelServer) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.NotificationChannel), nil +} + +func (s *mockNotificationChannelServer) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.NotificationChannel), nil +} + +func (s *mockNotificationChannelServer) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + type mockUptimeCheckServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added @@ -338,17 +512,21 @@ func (s *mockUptimeCheckServer) ListUptimeCheckIps(ctx context.Context, req *mon var clientOpt option.ClientOption var ( - mockGroup mockGroupServer - mockMetric mockMetricServer - mockUptimeCheck mockUptimeCheckServer + mockAlertPolicy mockAlertPolicyServer + mockGroup mockGroupServer + mockMetric mockMetricServer + mockNotificationChannel mockNotificationChannelServer + mockUptimeCheck mockUptimeCheckServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() + monitoringpb.RegisterAlertPolicyServiceServer(serv, &mockAlertPolicy) monitoringpb.RegisterGroupServiceServer(serv, &mockGroup) monitoringpb.RegisterMetricServiceServer(serv, &mockMetric) + monitoringpb.RegisterNotificationChannelServiceServer(serv, &mockNotificationChannel) monitoringpb.RegisterUptimeCheckServiceServer(serv, &mockUptimeCheck) lis, err := net.Listen("tcp", "localhost:0") @@ -366,6 +544,317 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func TestAlertPolicyServiceListAlertPolicies(t *testing.T) { + var nextPageToken string = "" + var alertPoliciesElement *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var alertPolicies = []*monitoringpb.AlertPolicy{alertPoliciesElement} + var expectedResponse = &monitoringpb.ListAlertPoliciesResponse{ + NextPageToken: nextPageToken, + AlertPolicies: alertPolicies, + } + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListAlertPoliciesRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListAlertPolicies(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.AlertPolicies[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestAlertPolicyServiceListAlertPoliciesError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListAlertPoliciesRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListAlertPolicies(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestAlertPolicyServiceGetAlertPolicy(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.AlertPolicy{ + Name: name2, + DisplayName: displayName, + } + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]") + var request = &monitoringpb.GetAlertPolicyRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetAlertPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestAlertPolicyServiceGetAlertPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]") + var request = &monitoringpb.GetAlertPolicyRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetAlertPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestAlertPolicyServiceCreateAlertPolicy(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.AlertPolicy{ + Name: name2, + DisplayName: displayName, + } + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var request = &monitoringpb.CreateAlertPolicyRequest{ + Name: formattedName, + AlertPolicy: alertPolicy, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateAlertPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestAlertPolicyServiceCreateAlertPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var request = &monitoringpb.CreateAlertPolicyRequest{ + Name: formattedName, + AlertPolicy: alertPolicy, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateAlertPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestAlertPolicyServiceDeleteAlertPolicy(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]") + var request = &monitoringpb.DeleteAlertPolicyRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteAlertPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestAlertPolicyServiceDeleteAlertPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]") + var request = &monitoringpb.DeleteAlertPolicyRequest{ + Name: formattedName, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteAlertPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestAlertPolicyServiceUpdateAlertPolicy(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var expectedResponse = &monitoringpb.AlertPolicy{ + Name: name, + DisplayName: displayName, + } + + mockAlertPolicy.err = nil + mockAlertPolicy.reqs = nil + + mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse) + + var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var request = &monitoringpb.UpdateAlertPolicyRequest{ + AlertPolicy: alertPolicy, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateAlertPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestAlertPolicyServiceUpdateAlertPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockAlertPolicy.err = gstatus.Error(errCode, "test error") + + var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{} + var request = &monitoringpb.UpdateAlertPolicyRequest{ + AlertPolicy: alertPolicy, + } + + c, err := NewAlertPolicyClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateAlertPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} func TestGroupServiceListGroups(t *testing.T) { var nextPageToken string = "" var groupElement *monitoringpb.Group = &monitoringpb.Group{} @@ -1308,6 +1797,466 @@ func TestMetricServiceCreateTimeSeriesError(t *testing.T) { t.Errorf("got error code %q, want %q", c, errCode) } } +func TestNotificationChannelServiceListNotificationChannelDescriptors(t *testing.T) { + var nextPageToken string = "" + var channelDescriptorsElement *monitoringpb.NotificationChannelDescriptor = &monitoringpb.NotificationChannelDescriptor{} + var channelDescriptors = []*monitoringpb.NotificationChannelDescriptor{channelDescriptorsElement} + var expectedResponse = &monitoringpb.ListNotificationChannelDescriptorsResponse{ + NextPageToken: nextPageToken, + ChannelDescriptors: channelDescriptors, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListNotificationChannelDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNotificationChannelDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ChannelDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceListNotificationChannelDescriptorsError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListNotificationChannelDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNotificationChannelDescriptors(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceGetNotificationChannelDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoringpb.NotificationChannelDescriptor{ + Name: name2, + Type: type_, + DisplayName: displayName, + Description: description, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannelDescriptors/%s", "[PROJECT]", "[CHANNEL_DESCRIPTOR]") + var request = &monitoringpb.GetNotificationChannelDescriptorRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNotificationChannelDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceGetNotificationChannelDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannelDescriptors/%s", "[PROJECT]", "[CHANNEL_DESCRIPTOR]") + var request = &monitoringpb.GetNotificationChannelDescriptorRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNotificationChannelDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceListNotificationChannels(t *testing.T) { + var nextPageToken string = "" + var notificationChannelsElement *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var notificationChannels = []*monitoringpb.NotificationChannel{notificationChannelsElement} + var expectedResponse = &monitoringpb.ListNotificationChannelsResponse{ + NextPageToken: nextPageToken, + NotificationChannels: notificationChannels, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListNotificationChannelsRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNotificationChannels(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.NotificationChannels[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceListNotificationChannelsError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &monitoringpb.ListNotificationChannelsRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNotificationChannels(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceGetNotificationChannel(t *testing.T) { + var type_ string = "type3575610" + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoringpb.NotificationChannel{ + Type: type_, + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]") + var request = &monitoringpb.GetNotificationChannelRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNotificationChannel(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceGetNotificationChannelError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]") + var request = &monitoringpb.GetNotificationChannelRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNotificationChannel(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceCreateNotificationChannel(t *testing.T) { + var type_ string = "type3575610" + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoringpb.NotificationChannel{ + Type: type_, + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var request = &monitoringpb.CreateNotificationChannelRequest{ + Name: formattedName, + NotificationChannel: notificationChannel, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateNotificationChannel(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceCreateNotificationChannelError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]") + var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var request = &monitoringpb.CreateNotificationChannelRequest{ + Name: formattedName, + NotificationChannel: notificationChannel, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateNotificationChannel(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceUpdateNotificationChannel(t *testing.T) { + var type_ string = "type3575610" + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoringpb.NotificationChannel{ + Type: type_, + Name: name, + DisplayName: displayName, + Description: description, + } + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var request = &monitoringpb.UpdateNotificationChannelRequest{ + NotificationChannel: notificationChannel, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateNotificationChannel(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestNotificationChannelServiceUpdateNotificationChannelError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{} + var request = &monitoringpb.UpdateNotificationChannelRequest{ + NotificationChannel: notificationChannel, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateNotificationChannel(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestNotificationChannelServiceDeleteNotificationChannel(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockNotificationChannel.err = nil + mockNotificationChannel.reqs = nil + + mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]") + var request = &monitoringpb.DeleteNotificationChannelRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteNotificationChannel(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestNotificationChannelServiceDeleteNotificationChannelError(t *testing.T) { + errCode := codes.PermissionDenied + mockNotificationChannel.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]") + var request = &monitoringpb.DeleteNotificationChannelRequest{ + Name: formattedName, + } + + c, err := NewNotificationChannelClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteNotificationChannel(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} func TestUptimeCheckServiceListUptimeCheckConfigs(t *testing.T) { var nextPageToken string = "" var uptimeCheckConfigsElement *monitoringpb.UptimeCheckConfig = &monitoringpb.UptimeCheckConfig{} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go new file mode 100644 index 000000000..54fdbc617 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go @@ -0,0 +1,369 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient. +type NotificationChannelCallOptions struct { + ListNotificationChannelDescriptors []gax.CallOption + GetNotificationChannelDescriptor []gax.CallOption + ListNotificationChannels []gax.CallOption + GetNotificationChannel []gax.CallOption + CreateNotificationChannel []gax.CallOption + UpdateNotificationChannel []gax.CallOption + DeleteNotificationChannel []gax.CallOption +} + +func defaultNotificationChannelClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &NotificationChannelCallOptions{ + ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}], + GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}], + ListNotificationChannels: retry[[2]string{"default", "idempotent"}], + GetNotificationChannel: retry[[2]string{"default", "idempotent"}], + CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}], + UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}], + DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}], + } +} + +// NotificationChannelClient is a client for interacting with Stackdriver Monitoring API. +type NotificationChannelClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + notificationChannelClient monitoringpb.NotificationChannelServiceClient + + // The call options for this service. + CallOptions *NotificationChannelCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewNotificationChannelClient creates a new notification channel service client. +// +// The Notification Channel API provides access to configuration that +// controls how messages related to incidents are sent. +func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultNotificationChannelClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &NotificationChannelClient{ + conn: conn, + CallOptions: defaultNotificationChannelCallOptions(), + + notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *NotificationChannelClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *NotificationChannelClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors +// makes it possible for new channel types to be dynamically added. +func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...) + it := &NotificationChannelDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) { + var resp *monitoringpb.ListNotificationChannelDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ChannelDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields +// are expected / permitted for a notification channel of the given type. +func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...) + var resp *monitoringpb.NotificationChannelDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListNotificationChannels lists the notification channels that have been created for the project. +func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...) + it := &NotificationChannelIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) { + var resp *monitoringpb.ListNotificationChannelsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.NotificationChannels, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetNotificationChannel gets a single notification channel. The channel includes the relevant +// configuration details with which the channel was created. However, the +// response may truncate or omit passwords, API keys, or other private key +// matter and thus the response may not be 100% identical to the information +// that was supplied in the call to the create method. +func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateNotificationChannel creates a new notification channel, representing a single notification +// endpoint such as an email address, SMS number, or pagerduty service. +func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask +// remain unchanged. +func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...) + var resp *monitoringpb.NotificationChannel + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteNotificationChannel deletes a notification channel. +func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor. +type NotificationChannelDescriptorIterator struct { + items []*monitoringpb.NotificationChannelDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) { + var item *monitoringpb.NotificationChannelDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel. +type NotificationChannelIterator struct { + items []*monitoringpb.NotificationChannel + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) { + var item *monitoringpb.NotificationChannel + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationChannelIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationChannelIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client_example_test.go new file mode 100644 index 000000000..eab117903 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client_example_test.go @@ -0,0 +1,170 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewNotificationChannelClient() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleNotificationChannelClient_ListNotificationChannelDescriptors() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListNotificationChannelDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListNotificationChannelDescriptors(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleNotificationChannelClient_GetNotificationChannelDescriptor() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetNotificationChannelDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetNotificationChannelDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleNotificationChannelClient_ListNotificationChannels() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListNotificationChannelsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListNotificationChannels(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleNotificationChannelClient_GetNotificationChannel() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetNotificationChannelRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetNotificationChannel(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleNotificationChannelClient_CreateNotificationChannel() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateNotificationChannelRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateNotificationChannel(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleNotificationChannelClient_UpdateNotificationChannel() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.UpdateNotificationChannelRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateNotificationChannel(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleNotificationChannelClient_DeleteNotificationChannel() { + ctx := context.Background() + c, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteNotificationChannelRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteNotificationChannel(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/old-news.md b/vendor/cloud.google.com/go/old-news.md index 0c8a224d0..a0bd83be9 100644 --- a/vendor/cloud.google.com/go/old-news.md +++ b/vendor/cloud.google.com/go/old-news.md @@ -1,3 +1,42 @@ +_October 30, 2017_ + +*v0.16.0* + +- Other bigquery changes: + - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). + - UseStandardSQL is deprecated; set UseLegacySQL to true if you need + Legacy SQL. + - Uploader.Put will generate a random insert ID if you do not provide one. + - Support time partitioning for load jobs. + - Support dry-run queries. + - A `Job` remembers its last retrieved status. + - Support retrieving job configuration. + - Support labels for jobs and tables. + - Support dataset access lists. + - Improve support for external data sources, including data from Bigtable and + Google Sheets, and tables with external data. + - Support updating a table's view configuration. + - Fix uploading civil times with nanoseconds. + +- storage: + - Support PubSub notifications. + - Support Requester Pays buckets. + +- profiler: Support goroutine and mutex profile types. + + +_October 3, 2017_ + +*v0.15.0* + +- firestore: beta release. See the + [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). + +- errorreporting: The existing package has been redesigned. + +- errors: This package has been removed. Use errorreporting. + + _September 28, 2017_ *v0.14.0* diff --git a/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client.go b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client.go new file mode 100644 index 000000000..4c3282d68 --- /dev/null +++ b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client.go @@ -0,0 +1,681 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + InspectContent []gax.CallOption + RedactImage []gax.CallOption + DeidentifyContent []gax.CallOption + ReidentifyContent []gax.CallOption + InspectDataSource []gax.CallOption + AnalyzeDataSourceRisk []gax.CallOption + ListInfoTypes []gax.CallOption + CreateInspectTemplate []gax.CallOption + UpdateInspectTemplate []gax.CallOption + GetInspectTemplate []gax.CallOption + ListInspectTemplates []gax.CallOption + DeleteInspectTemplate []gax.CallOption + CreateDeidentifyTemplate []gax.CallOption + UpdateDeidentifyTemplate []gax.CallOption + GetDeidentifyTemplate []gax.CallOption + ListDeidentifyTemplates []gax.CallOption + DeleteDeidentifyTemplate []gax.CallOption + ListDlpJobs []gax.CallOption + GetDlpJob []gax.CallOption + DeleteDlpJob []gax.CallOption + CancelDlpJob []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("dlp.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + InspectContent: retry[[2]string{"default", "idempotent"}], + RedactImage: retry[[2]string{"default", "idempotent"}], + DeidentifyContent: retry[[2]string{"default", "idempotent"}], + ReidentifyContent: retry[[2]string{"default", "idempotent"}], + InspectDataSource: retry[[2]string{"default", "non_idempotent"}], + AnalyzeDataSourceRisk: retry[[2]string{"default", "non_idempotent"}], + ListInfoTypes: retry[[2]string{"default", "idempotent"}], + CreateInspectTemplate: retry[[2]string{"default", "non_idempotent"}], + UpdateInspectTemplate: retry[[2]string{"default", "non_idempotent"}], + GetInspectTemplate: retry[[2]string{"default", "idempotent"}], + ListInspectTemplates: retry[[2]string{"default", "idempotent"}], + DeleteInspectTemplate: retry[[2]string{"default", "idempotent"}], + CreateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}], + UpdateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}], + GetDeidentifyTemplate: retry[[2]string{"default", "idempotent"}], + ListDeidentifyTemplates: retry[[2]string{"default", "idempotent"}], + DeleteDeidentifyTemplate: retry[[2]string{"default", "idempotent"}], + ListDlpJobs: retry[[2]string{"default", "idempotent"}], + GetDlpJob: retry[[2]string{"default", "idempotent"}], + DeleteDlpJob: retry[[2]string{"default", "idempotent"}], + CancelDlpJob: retry[[2]string{"default", "non_idempotent"}], + } +} + +// Client is a client for interacting with DLP API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client dlppb.DlpServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new dlp service client. +// +// The DLP API is a service that allows clients +// to detect the presence of Personally Identifiable Information (PII) and other +// privacy-sensitive data in user-supplied, unstructured data streams, like text +// blocks or images. +// The service also includes methods for sensitive data redaction and +// scheduling of data scans on Google Cloud Platform based data sets. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: dlppb.NewDlpServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// InspectContent finds potentially sensitive info in content. +// This method has limits on input size, processing time, and output size. +// How-to guide for text (at /dlp/docs/inspecting-text), How-to guide for +// images (at /dlp/docs/inspecting-images) +func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...) + var resp *dlppb.InspectContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.InspectContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RedactImage redacts potentially sensitive info from an image. +// This method has limits on input size, processing time, and output size. +// How-to guide (at /dlp/docs/redacting-sensitive-data-images) +func (c *Client) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest, opts ...gax.CallOption) (*dlppb.RedactImageResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RedactImage[0:len(c.CallOptions.RedactImage):len(c.CallOptions.RedactImage)], opts...) + var resp *dlppb.RedactImageResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RedactImage(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeidentifyContent de-identifies potentially sensitive info from a ContentItem. +// This method has limits on input size and output size. +// How-to guide (at /dlp/docs/deidentify-sensitive-data) +func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...) + var resp *dlppb.DeidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ReidentifyContent re-identify content that has been de-identified. +func (c *Client) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest, opts ...gax.CallOption) (*dlppb.ReidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ReidentifyContent[0:len(c.CallOptions.ReidentifyContent):len(c.CallOptions.ReidentifyContent)], opts...) + var resp *dlppb.ReidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ReidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// InspectDataSource schedules a job scanning content in a Google Cloud Platform data +// repository. How-to guide (at /dlp/docs/inspecting-storage) +func (c *Client) InspectDataSource(ctx context.Context, req *dlppb.InspectDataSourceRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.InspectDataSource[0:len(c.CallOptions.InspectDataSource):len(c.CallOptions.InspectDataSource)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.InspectDataSource(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google +// Cloud Platform repository. [How-to guide}(/dlp/docs/compute-risk-analysis) +func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInfoTypes returns sensitive information types DLP supports. +func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...) + var resp *dlppb.ListInfoTypesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateInspectTemplate creates an inspect template for re-using frequently used configuration +// for inspecting content, images, and storage. +func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateInspectTemplate[0:len(c.CallOptions.CreateInspectTemplate):len(c.CallOptions.CreateInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateInspectTemplate updates the inspect template. +func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateInspectTemplate[0:len(c.CallOptions.UpdateInspectTemplate):len(c.CallOptions.UpdateInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetInspectTemplate gets an inspect template. +func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetInspectTemplate[0:len(c.CallOptions.GetInspectTemplate):len(c.CallOptions.GetInspectTemplate)], opts...) + var resp *dlppb.InspectTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInspectTemplates lists inspect templates. +func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest, opts ...gax.CallOption) *InspectTemplateIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInspectTemplates[0:len(c.CallOptions.ListInspectTemplates):len(c.CallOptions.ListInspectTemplates)], opts...) + it := &InspectTemplateIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.InspectTemplate, string, error) { + var resp *dlppb.ListInspectTemplatesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInspectTemplates(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.InspectTemplates, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteInspectTemplate deletes inspect templates. +func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteInspectTemplate[0:len(c.CallOptions.DeleteInspectTemplate):len(c.CallOptions.DeleteInspectTemplate)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteInspectTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateDeidentifyTemplate creates an Deidentify template for re-using frequently used configuration +// for Deidentifying content, images, and storage. +func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDeidentifyTemplate[0:len(c.CallOptions.CreateDeidentifyTemplate):len(c.CallOptions.CreateDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDeidentifyTemplate updates the inspect template. +func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateDeidentifyTemplate[0:len(c.CallOptions.UpdateDeidentifyTemplate):len(c.CallOptions.UpdateDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetDeidentifyTemplate gets an inspect template. +func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDeidentifyTemplate[0:len(c.CallOptions.GetDeidentifyTemplate):len(c.CallOptions.GetDeidentifyTemplate)], opts...) + var resp *dlppb.DeidentifyTemplate + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDeidentifyTemplates lists inspect templates. +func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest, opts ...gax.CallOption) *DeidentifyTemplateIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDeidentifyTemplates[0:len(c.CallOptions.ListDeidentifyTemplates):len(c.CallOptions.ListDeidentifyTemplates)], opts...) + it := &DeidentifyTemplateIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DeidentifyTemplate, string, error) { + var resp *dlppb.ListDeidentifyTemplatesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDeidentifyTemplates(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.DeidentifyTemplates, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteDeidentifyTemplate deletes inspect templates. +func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDeidentifyTemplate[0:len(c.CallOptions.DeleteDeidentifyTemplate):len(c.CallOptions.DeleteDeidentifyTemplate)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDeidentifyTemplate(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListDlpJobs lists DlpJobs that match the specified filter in the request. +func (c *Client) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest, opts ...gax.CallOption) *DlpJobIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDlpJobs[0:len(c.CallOptions.ListDlpJobs):len(c.CallOptions.ListDlpJobs)], opts...) + it := &DlpJobIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DlpJob, string, error) { + var resp *dlppb.ListDlpJobsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDlpJobs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Jobs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetDlpJob gets the latest state of a long-running DlpJob. +func (c *Client) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDlpJob[0:len(c.CallOptions.GetDlpJob):len(c.CallOptions.GetDlpJob)], opts...) + var resp *dlppb.DlpJob + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteDlpJob deletes a long-running DlpJob. This method indicates that the client is +// no longer interested in the DlpJob result. The job will be cancelled if +// possible. +func (c *Client) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDlpJob[0:len(c.CallOptions.DeleteDlpJob):len(c.CallOptions.DeleteDlpJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CancelDlpJob starts asynchronous cancellation on a long-running DlpJob. The server +// makes a best effort to cancel the DlpJob, but success is not +// guaranteed. +func (c *Client) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelDlpJob[0:len(c.CallOptions.CancelDlpJob):len(c.CallOptions.CancelDlpJob)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.CancelDlpJob(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// DeidentifyTemplateIterator manages a stream of *dlppb.DeidentifyTemplate. +type DeidentifyTemplateIterator struct { + items []*dlppb.DeidentifyTemplate + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DeidentifyTemplate, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DeidentifyTemplateIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DeidentifyTemplateIterator) Next() (*dlppb.DeidentifyTemplate, error) { + var item *dlppb.DeidentifyTemplate + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DeidentifyTemplateIterator) bufLen() int { + return len(it.items) +} + +func (it *DeidentifyTemplateIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// DlpJobIterator manages a stream of *dlppb.DlpJob. +type DlpJobIterator struct { + items []*dlppb.DlpJob + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DlpJob, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DlpJobIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DlpJobIterator) Next() (*dlppb.DlpJob, error) { + var item *dlppb.DlpJob + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DlpJobIterator) bufLen() int { + return len(it.items) +} + +func (it *DlpJobIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InspectTemplateIterator manages a stream of *dlppb.InspectTemplate. +type InspectTemplateIterator struct { + items []*dlppb.InspectTemplate + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*dlppb.InspectTemplate, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InspectTemplateIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InspectTemplateIterator) Next() (*dlppb.InspectTemplate, error) { + var item *dlppb.InspectTemplate + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InspectTemplateIterator) bufLen() int { + return len(it.items) +} + +func (it *InspectTemplateIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client_example_test.go b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client_example_test.go new file mode 100644 index 000000000..17527d7a6 --- /dev/null +++ b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/dlp_client_example_test.go @@ -0,0 +1,422 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp_test + +import ( + "cloud.google.com/go/privacy/dlp/apiv2beta2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_InspectContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.InspectContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.InspectContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_RedactImage() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.RedactImageRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RedactImage(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ReidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ReidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ReidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_InspectDataSource() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.InspectDataSourceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.InspectDataSource(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeDataSourceRisk() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.AnalyzeDataSourceRiskRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeDataSourceRisk(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInfoTypes() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInfoTypesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListInfoTypes(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_CreateInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInspectTemplates() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInspectTemplatesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInspectTemplates(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteInspectTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteInspectTemplateRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteInspectTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.UpdateDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDeidentifyTemplates() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListDeidentifyTemplatesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDeidentifyTemplates(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteDeidentifyTemplate() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteDeidentifyTemplateRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDeidentifyTemplate(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_ListDlpJobs() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListDlpJobsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDlpJobs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_GetDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.GetDlpJobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeleteDlpJobRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CancelDlpJob() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CancelDlpJobRequest{ + // TODO: Fill request struct fields. + } + err = c.CancelDlpJob(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/doc.go b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/doc.go new file mode 100644 index 000000000..43eb41e42 --- /dev/null +++ b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/doc.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package dlp is an auto-generated package for the +// DLP API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// The Google Data Loss Prevention API provides methods for detection of +// privacy-sensitive fragments in text, images, and Google Cloud Platform +// storage repositories. +package dlp // import "cloud.google.com/go/privacy/dlp/apiv2beta2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/mock_test.go b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/mock_test.go new file mode 100644 index 000000000..64517378a --- /dev/null +++ b/vendor/cloud.google.com/go/privacy/dlp/apiv2beta2/mock_test.go @@ -0,0 +1,1596 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDlpServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + dlppb.DlpServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectContentResponse), nil +} + +func (s *mockDlpServer) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest) (*dlppb.RedactImageResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.RedactImageResponse), nil +} + +func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyContentResponse), nil +} + +func (s *mockDlpServer) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest) (*dlppb.ReidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ReidentifyContentResponse), nil +} + +func (s *mockDlpServer) InspectDataSource(ctx context.Context, req *dlppb.InspectDataSourceRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInfoTypesResponse), nil +} + +func (s *mockDlpServer) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest) (*dlppb.InspectTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectTemplate), nil +} + +func (s *mockDlpServer) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest) (*dlppb.ListInspectTemplatesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInspectTemplatesResponse), nil +} + +func (s *mockDlpServer) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest) (*dlppb.DeidentifyTemplate, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyTemplate), nil +} + +func (s *mockDlpServer) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest) (*dlppb.ListDeidentifyTemplatesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListDeidentifyTemplatesResponse), nil +} + +func (s *mockDlpServer) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest) (*dlppb.ListDlpJobsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListDlpJobsResponse), nil +} + +func (s *mockDlpServer) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest) (*dlppb.DlpJob, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DlpJob), nil +} + +func (s *mockDlpServer) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDlpServer) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDlp mockDlpServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + dlppb.RegisterDlpServiceServer(serv, &mockDlp) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDlpServiceInspectContent(t *testing.T) { + var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceInspectContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceRedactImage(t *testing.T) { + var redactedImage []byte = []byte("28") + var extractedText string = "extractedText998260012" + var expectedResponse = &dlppb.RedactImageResponse{ + RedactedImage: redactedImage, + ExtractedText: extractedText, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.RedactImageRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactImage(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceRedactImageError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.RedactImageRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactImage(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeidentifyContent(t *testing.T) { + var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.DeidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceDeidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.DeidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceReidentifyContent(t *testing.T) { + var expectedResponse *dlppb.ReidentifyContentResponse = &dlppb.ReidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ReidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceReidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ReidentifyContentRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceInspectDataSource(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &dlppb.DlpJob{ + Name: name, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectDataSourceRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectDataSource(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceInspectDataSourceError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.InspectDataSourceRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectDataSource(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &dlppb.DlpJob{ + Name: name, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.AnalyzeDataSourceRiskRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeDataSourceRisk(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.AnalyzeDataSourceRiskRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeDataSourceRisk(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInfoTypes(t *testing.T) { + var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInfoTypesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var request *dlppb.ListInfoTypesRequest = &dlppb.ListInfoTypesRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceCreateInspectTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateInspectTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateInspectTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceUpdateInspectTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.UpdateInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.UpdateInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetInspectTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.InspectTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var request *dlppb.GetInspectTemplateRequest = &dlppb.GetInspectTemplateRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInspectTemplates(t *testing.T) { + var nextPageToken string = "" + var inspectTemplatesElement *dlppb.InspectTemplate = &dlppb.InspectTemplate{} + var inspectTemplates = []*dlppb.InspectTemplate{inspectTemplatesElement} + var expectedResponse = &dlppb.ListInspectTemplatesResponse{ + NextPageToken: nextPageToken, + InspectTemplates: inspectTemplates, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListInspectTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectTemplates(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.InspectTemplates[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInspectTemplatesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListInspectTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectTemplates(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteInspectTemplate(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.DeleteInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInspectTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteInspectTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/inspectTemplates/%s", "[ORGANIZATION]", "[INSPECT_TEMPLATE]") + var request = &dlppb.DeleteInspectTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInspectTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCreateDeidentifyTemplate(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateDeidentifyTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.CreateDeidentifyTemplateRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceUpdateDeidentifyTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.UpdateDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceUpdateDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.UpdateDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetDeidentifyTemplate(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &dlppb.DeidentifyTemplate{ + Name: name2, + DisplayName: displayName, + Description: description, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.GetDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.GetDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListDeidentifyTemplates(t *testing.T) { + var nextPageToken string = "" + var deidentifyTemplatesElement *dlppb.DeidentifyTemplate = &dlppb.DeidentifyTemplate{} + var deidentifyTemplates = []*dlppb.DeidentifyTemplate{deidentifyTemplatesElement} + var expectedResponse = &dlppb.ListDeidentifyTemplatesResponse{ + NextPageToken: nextPageToken, + DeidentifyTemplates: deidentifyTemplates, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListDeidentifyTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.DeidentifyTemplates[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListDeidentifyTemplatesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("organizations/%s", "[ORGANIZATION]") + var request = &dlppb.ListDeidentifyTemplatesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDeidentifyTemplates(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteDeidentifyTemplate(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.DeleteDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDeidentifyTemplate(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteDeidentifyTemplateError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("organizations/%s/deidentifyTemplates/%s", "[ORGANIZATION]", "[DEIDENTIFY_TEMPLATE]") + var request = &dlppb.DeleteDeidentifyTemplateRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDeidentifyTemplate(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceListDlpJobs(t *testing.T) { + var nextPageToken string = "" + var jobsElement *dlppb.DlpJob = &dlppb.DlpJob{} + var jobs = []*dlppb.DlpJob{jobsElement} + var expectedResponse = &dlppb.ListDlpJobsResponse{ + NextPageToken: nextPageToken, + Jobs: jobs, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListDlpJobsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDlpJobs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Jobs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListDlpJobsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]") + var request = &dlppb.ListDlpJobsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDlpJobs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceGetDlpJob(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &dlppb.DlpJob{ + Name: name2, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.GetDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceGetDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.GetDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeleteDlpJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.DeleteDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceDeleteDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.DeleteDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDlpServiceCancelDlpJob(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.CancelDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelDlpJob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDlpServiceCancelDlpJobError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = fmt.Sprintf("projects/%s/dlpJobs/%s", "[PROJECT]", "[DLP_JOB]") + var request = &dlppb.CancelDlpJobRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelDlpJob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/profiler/busybench/busybench.go b/vendor/cloud.google.com/go/profiler/busybench/busybench.go index 03b9589df..44d25b1e2 100644 --- a/vendor/cloud.google.com/go/profiler/busybench/busybench.go +++ b/vendor/cloud.google.com/go/profiler/busybench/busybench.go @@ -28,14 +28,14 @@ import ( var ( service = flag.String("service", "", "service name") mutexProfiling = flag.Bool("mutex_profiling", false, "enable mutex profiling") + duration = flag.Int("duration", 600, "duration of the benchmark in seconds") + apiAddr = flag.String("api_address", "", "API address of the profiler (e.g. 'cloudprofiler.googleapis.com:443')") ) -const duration = time.Minute * 10 - // busywork continuously generates 1MiB of random data and compresses it // throwing away the result. func busywork(mu *sync.Mutex) { - ticker := time.NewTicker(duration) + ticker := time.NewTicker(time.Duration(*duration) * time.Second) defer ticker.Stop() for { select { @@ -79,6 +79,7 @@ func main() { Service: *service, MutexProfiling: *mutexProfiling, DebugLogging: true, + APIAddr: *apiAddr, }); err != nil { log.Printf("Failed to start the profiler: %v", err) } else { diff --git a/vendor/cloud.google.com/go/profiler/integration-test.sh b/vendor/cloud.google.com/go/profiler/integration-test.sh index 928a82530..15772d497 100644 --- a/vendor/cloud.google.com/go/profiler/integration-test.sh +++ b/vendor/cloud.google.com/go/profiler/integration-test.sh @@ -7,19 +7,6 @@ set -eo pipefail set -x cd git/gocloud - -# Run test only if profiler directory is touched. -profiler_test=false -for f in $(git diff-tree --no-commit-id --name-only -r HEAD); do - if [[ "$(dirname $f)" == "profiler" ]]; then - profiler_test=true - fi -done - -if [[ "$profiler_test" = false ]]; then - exit 0 -fi - COMMIT=$(git rev-parse HEAD) # Set $GOPATH diff --git a/vendor/cloud.google.com/go/profiler/integration_test.go b/vendor/cloud.google.com/go/profiler/integration_test.go index d874a0ebf..dd31f4dc4 100644 --- a/vendor/cloud.google.com/go/profiler/integration_test.go +++ b/vendor/cloud.google.com/go/profiler/integration_test.go @@ -17,30 +17,18 @@ package profiler import ( - "archive/zip" "bytes" - "encoding/json" "flag" "fmt" - "io/ioutil" - "log" - "net/http" "os" - "strings" "testing" "text/template" "time" - "cloud.google.com/go/storage" - "golang.org/x/build/kubernetes" - k8sapi "golang.org/x/build/kubernetes/api" - "golang.org/x/build/kubernetes/gke" + "cloud.google.com/go/profiler/proftest" "golang.org/x/net/context" "golang.org/x/oauth2/google" - cloudbuild "google.golang.org/api/cloudbuild/v1" compute "google.golang.org/api/compute/v1" - container "google.golang.org/api/container/v1" - "google.golang.org/api/googleapi" ) var ( @@ -50,15 +38,16 @@ var ( const ( cloudScope = "https://www.googleapis.com/auth/cloud-platform" - monitorWriteScope = "https://www.googleapis.com/auth/monitoring.write" - storageReadScope = "https://www.googleapis.com/auth/devstorage.read_only" - // benchFinishString should keep in sync with the finish string in busybench. benchFinishString = "busybench finished profiling" ) const startupTemplate = ` #! /bin/bash +# Shut down the VM in 5 minutes after this script exits +# to stop accounting the VM for billing and cores quota. +trap "sleep 300 && poweroff" EXIT + # Fail on any error. set -eo pipefail @@ -66,8 +55,8 @@ set -eo pipefail set -x # Install git -sudo apt-get update -sudo apt-get -y -q install git-all +apt-get update +apt-get -y -q install git-all # Install desired Go version mkdir -p /tmp/bin @@ -101,113 +90,15 @@ RUN git clone https://code.googlesource.com/gocloud /go/src/cloud.google.com/go CMD ["busybench", "--service", "%s"] ` -type testRunner struct { - client *http.Client - startupTemplate *template.Template - containerService *container.Service - computeService *compute.Service - storageClient *storage.Client +type goGCETestCase struct { + proftest.InstanceConfig + name string + goVersion string + mutexProfiling bool + wantProfileTypes []string } -type profileResponse struct { - Profile profileData `json:"profile"` - NumProfiles int32 `json:"numProfiles"` - Deployments []interface{} `json:"deployments"` -} - -type profileData struct { - Samples []int32 `json:"samples"` - SampleMetrics interface{} `json:"sampleMetrics"` - DefaultMetricType string `json:"defaultMetricType"` - TreeNodes interface{} `json:"treeNodes"` - Functions functionArray `json:"functions"` - SourceFiles interface{} `json:"sourceFiles"` -} - -type functionArray struct { - Name []string `json:"name"` - Sourcefile []int32 `json:"sourceFile"` -} - -func validateProfileData(rawData []byte, wantFunctionName string) error { - var pr profileResponse - if err := json.Unmarshal(rawData, &pr); err != nil { - return err - } - - if pr.NumProfiles == 0 { - return fmt.Errorf("profile response contains zero profiles: %v", pr) - } - - if len(pr.Deployments) == 0 { - return fmt.Errorf("profile response contains zero deployments: %v", pr) - } - - if len(pr.Profile.Functions.Name) == 0 { - return fmt.Errorf("profile does not have function data") - } - - for _, name := range pr.Profile.Functions.Name { - if strings.Contains(name, wantFunctionName) { - return nil - } - } - return fmt.Errorf("wanted function name %s not found in profile", wantFunctionName) -} - -type instanceConfig struct { - name string - service string - goVersion string - mutexProfiling bool -} - -func newInstanceConfigs() []instanceConfig { - return []instanceConfig{ - { - name: fmt.Sprintf("profiler-test-go19-%d", runID), - service: fmt.Sprintf("profiler-test-go19-%d-gce", runID), - goVersion: "1.9", - mutexProfiling: true, - }, - { - name: fmt.Sprintf("profiler-test-go18-%d", runID), - service: fmt.Sprintf("profiler-test-go18-%d-gce", runID), - goVersion: "1.8", - mutexProfiling: true, - }, - { - name: fmt.Sprintf("profiler-test-go17-%d", runID), - service: fmt.Sprintf("profiler-test-go17-%d-gce", runID), - goVersion: "1.7", - }, - { - name: fmt.Sprintf("profiler-test-go16-%d", runID), - service: fmt.Sprintf("profiler-test-go16-%d-gce", runID), - goVersion: "1.6", - }, - } -} - -type clusterConfig struct { - clusterName string - podName string - imageSourceName string - imageName string - service string -} - -func newClusterConfig(projectID string) clusterConfig { - return clusterConfig{ - clusterName: fmt.Sprintf("profiler-test-cluster-%d", runID), - podName: fmt.Sprintf("profiler-test-pod-%d", runID), - imageSourceName: fmt.Sprintf("profiler-test/%d/Dockerfile.zip", runID), - imageName: fmt.Sprintf("%s/profiler-test-%d", projectID, runID), - service: fmt.Sprintf("profiler-test-%d-gke", runID), - } -} - -func renderStartupScript(template *template.Template, inst instanceConfig) (string, error) { +func (tc *goGCETestCase) initializeStartupScript(template *template.Template) error { var buf bytes.Buffer err := template.Execute(&buf, struct { @@ -216,419 +107,18 @@ func renderStartupScript(template *template.Template, inst instanceConfig) (stri Commit string MutexProfiling bool }{ - Service: inst.service, - GoVersion: inst.goVersion, + Service: tc.name, + GoVersion: tc.goVersion, Commit: *commit, - MutexProfiling: inst.mutexProfiling, + MutexProfiling: tc.mutexProfiling, }) if err != nil { - return "", fmt.Errorf("failed to render startup script for %s: %v", inst.name, err) + return fmt.Errorf("failed to render startup script for %s: %v", tc.name, err) } - - return buf.String(), nil -} - -func (tr *testRunner) startInstance(ctx context.Context, inst instanceConfig, projectID, zone string) error { - img, err := tr.computeService.Images.GetFromFamily("debian-cloud", "debian-9").Context(ctx).Do() - if err != nil { - return err - } - - startupScript, err := renderStartupScript(tr.startupTemplate, inst) - if err != nil { - return err - } - - _, err = tr.computeService.Instances.Insert(projectID, zone, &compute.Instance{ - MachineType: fmt.Sprintf("zones/%s/machineTypes/n1-standard-1", zone), - Name: inst.name, - Disks: []*compute.AttachedDisk{{ - AutoDelete: true, // delete the disk when the VM is deleted. - Boot: true, - Type: "PERSISTENT", - Mode: "READ_WRITE", - InitializeParams: &compute.AttachedDiskInitializeParams{ - SourceImage: img.SelfLink, - DiskType: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/pd-standard", projectID, zone), - }, - }}, - NetworkInterfaces: []*compute.NetworkInterface{{ - Network: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/default", projectID), - AccessConfigs: []*compute.AccessConfig{{ - Name: "External NAT", - }}, - }}, - Metadata: &compute.Metadata{ - Items: []*compute.MetadataItems{{ - Key: "startup-script", - Value: googleapi.String(startupScript), - }}, - }, - ServiceAccounts: []*compute.ServiceAccount{{ - Email: "default", - Scopes: []string{ - monitorWriteScope, - }, - }}, - }).Do() - - return err -} - -func (tr *testRunner) pollForSerialOutput(ctx context.Context, projectID, zone, instanceName string) error { - var output string - defer func() { - log.Printf("Serial port output for %s:\n%s", instanceName, output) - }() - - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timed out waiting for profiling finishing on instance %s", instanceName) - - case <-time.After(20 * time.Second): - resp, err := tr.computeService.Instances.GetSerialPortOutput(projectID, zone, instanceName).Context(ctx).Do() - if err != nil { - // Transient failure. - log.Printf("Transient error getting serial port output from instance %s (will retry): %v", instanceName, err) - continue - } - - if output = resp.Contents; strings.Contains(output, benchFinishString) { - return nil - } - } - } -} - -func (tr *testRunner) queryAndCheckProfile(service, startTime, endTime, profileType, projectID string) error { - queryURL := fmt.Sprintf("https://cloudprofiler.googleapis.com/v2/projects/%s/profiles:query", projectID) - const queryJsonFmt = `{"endTime": "%s", "profileType": "%s","startTime": "%s", "target": "%s"}` - - queryRequest := fmt.Sprintf(queryJsonFmt, endTime, profileType, startTime, service) - - resp, err := tr.client.Post(queryURL, "application/json", strings.NewReader(queryRequest)) - if err != nil { - return fmt.Errorf("failed to query API: %v", err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("failed to read response body: %v", err) - } - - if err := validateProfileData(body, "busywork"); err != nil { - return fmt.Errorf("failed to validate profile %v", err) - } - + tc.StartupScript = buf.String() return nil } -func (tr *testRunner) runTestOnGCE(ctx context.Context, t *testing.T, inst instanceConfig, projectID, zone string) { - if err := tr.startInstance(ctx, inst, projectID, zone); err != nil { - t.Fatalf("startInstance(%s) got error: %v", inst.name, err) - } - defer func() { - if _, err := tr.computeService.Instances.Delete(projectID, zone, inst.name).Context(ctx).Do(); err != nil { - t.Errorf("Instances.Delete(%s) got error: %v", inst.name, err) - } - }() - - timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*25) - defer cancel() - if err := tr.pollForSerialOutput(timeoutCtx, projectID, zone, inst.name); err != nil { - t.Fatalf("pollForSerialOutput(%s) got error: %v", inst.name, err) - } - - timeNow := time.Now() - endTime := timeNow.Format(time.RFC3339) - startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339) - - profileTypes := []string{"CPU", "HEAP", "THREADS"} - if inst.mutexProfiling { - profileTypes = append(profileTypes, "CONTENTION") - } - for _, pType := range profileTypes { - if err := tr.queryAndCheckProfile(inst.service, startTime, endTime, pType, projectID); err != nil { - t.Errorf("queryAndCheckProfile(%s, %s, %s, %s) got error: %v", inst.service, startTime, endTime, pType, err) - } - } -} - -// createAndPublishDockerImage creates a docker image from source code in a GCS -// bucket and pushes the image to Google Container Registry. -func (tr *testRunner) createAndPublishDockerImage(ctx context.Context, projectID, sourceBucket, sourceObject, imageName string) error { - cloudbuildService, err := cloudbuild.New(tr.client) - - build := &cloudbuild.Build{ - Source: &cloudbuild.Source{ - StorageSource: &cloudbuild.StorageSource{ - Bucket: sourceBucket, - Object: sourceObject, - }, - }, - Steps: []*cloudbuild.BuildStep{ - { - Name: "gcr.io/cloud-builders/docker", - Args: []string{"build", "-t", imageName, "."}, - }, - }, - Images: []string{imageName}, - } - - op, err := cloudbuildService.Projects.Builds.Create(projectID, build).Context(ctx).Do() - if err != nil { - return fmt.Errorf("failed to create image: %v", err) - } - opID := op.Name - - // Wait for creating image. - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timed out waiting creating image") - - case <-time.After(10 * time.Second): - op, err := cloudbuildService.Operations.Get(opID).Context(ctx).Do() - if err != nil { - log.Printf("Transient error getting operation (will retry): %v", err) - break - } - if op.Done == true { - log.Printf("Published image %s to Google Container Registry.", imageName) - return nil - } - } - } -} - -type imageResponse struct { - Manifest map[string]interface{} `json:"manifest"` - Name string `json:"name"` - Tags []string `json:"tags"` -} - -// deleteDockerImage deletes a docker image from Google Container Registry. -func (tr *testRunner) deleteDockerImage(ctx context.Context, imageName string) []error { - queryImageURL := fmt.Sprintf("https://gcr.io/v2/%s/tags/list", imageName) - resp, err := tr.client.Get(queryImageURL) - if err != nil { - return []error{fmt.Errorf("failed to list tags: %v", err)} - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return []error{err} - } - var ir imageResponse - if err := json.Unmarshal(body, &ir); err != nil { - return []error{err} - } - - const deleteImageURLFmt = "https://gcr.io/v2/%s/manifests/%s" - var errs []error - for _, tag := range ir.Tags { - if err := deleteDockerImageResource(tr.client, fmt.Sprintf(deleteImageURLFmt, imageName, tag)); err != nil { - errs = append(errs, fmt.Errorf("failed to delete tag %s: %v", tag, err)) - } - } - - for manifest := range ir.Manifest { - if err := deleteDockerImageResource(tr.client, fmt.Sprintf(deleteImageURLFmt, imageName, manifest)); err != nil { - errs = append(errs, fmt.Errorf("failed to delete manifest %s: %v", manifest, err)) - } - } - return errs -} - -func deleteDockerImageResource(client *http.Client, url string) error { - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return fmt.Errorf("failed to get request: %v", err) - } - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("failed to delete resource: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { - return fmt.Errorf("failed to delete resource: status code = %d", resp.StatusCode) - } - return nil -} - -func (tr *testRunner) createCluster(ctx context.Context, client *http.Client, projectID, zone, clusterName string) error { - request := &container.CreateClusterRequest{Cluster: &container.Cluster{ - Name: clusterName, - InitialNodeCount: 3, - NodeConfig: &container.NodeConfig{ - OauthScopes: []string{ - storageReadScope, - }, - }, - }} - op, err := tr.containerService.Projects.Zones.Clusters.Create(projectID, zone, request).Context(ctx).Do() - if err != nil { - return fmt.Errorf("failed to create cluster %s: %v", clusterName, err) - } - opID := op.Name - - // Wait for creating cluster. - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timed out waiting creating cluster") - - case <-time.After(10 * time.Second): - op, err := tr.containerService.Projects.Zones.Operations.Get(projectID, zone, opID).Context(ctx).Do() - if err != nil { - log.Printf("Transient error getting operation (will retry): %v", err) - break - } - if op.Status == "DONE" { - log.Printf("Created cluster %s.", clusterName) - return nil - } - if op.Status == "ABORTING" { - return fmt.Errorf("create cluster operation is aborted") - } - } - } -} - -func (tr *testRunner) deployContainer(ctx context.Context, kubernetesClient *kubernetes.Client, podName, imageName string) error { - pod := &k8sapi.Pod{ - ObjectMeta: k8sapi.ObjectMeta{ - Name: podName, - }, - Spec: k8sapi.PodSpec{ - Containers: []k8sapi.Container{ - { - Name: "profiler-test", - Image: fmt.Sprintf("gcr.io/%s:latest", imageName), - }, - }, - }, - } - if _, err := kubernetesClient.RunLongLivedPod(ctx, pod); err != nil { - return fmt.Errorf("failed to run pod %s: %v", podName, err) - } - return nil -} - -func (tr *testRunner) pollPodLog(ctx context.Context, kubernetesClient *kubernetes.Client, podName string) error { - var output string - defer func() { - log.Printf("Log for pod %s:\n%s", podName, output) - }() - - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timed out waiting profiling finishing on container") - - case <-time.After(20 * time.Second): - var err error - output, err = kubernetesClient.PodLog(ctx, podName) - if err != nil { - // Transient failure. - log.Printf("Transient error getting log (will retry): %v", err) - continue - } - if strings.Contains(output, benchFinishString) { - return nil - } - } - } -} - -func (tr *testRunner) runTestOnGKE(ctx context.Context, t *testing.T, cfg clusterConfig, projectID, zone, bucket string) { - if err := tr.uploadImageSource(ctx, bucket, cfg.imageSourceName, *commit, cfg.service); err != nil { - t.Fatalf("uploadImageSource() got error: %v", err) - } - defer func() { - if err := tr.storageClient.Bucket(bucket).Object(cfg.imageSourceName).Delete(ctx); err != nil { - t.Errorf("Bucket(%s).Object(%s).Delete() got error: %v", bucket, cfg.imageSourceName, err) - } - }() - - createImageCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - if err := tr.createAndPublishDockerImage(createImageCtx, projectID, bucket, cfg.imageSourceName, fmt.Sprintf("gcr.io/%s", cfg.imageName)); err != nil { - t.Fatalf("createAndPublishDockerImage(%s) got error: %v", cfg.imageName, err) - } - defer func() { - for _, err := range tr.deleteDockerImage(ctx, cfg.imageName) { - t.Errorf("deleteDockerImage(%s) got error: %v", cfg.imageName, err) - } - }() - - createClusterCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - if err := tr.createCluster(createClusterCtx, tr.client, projectID, zone, cfg.clusterName); err != nil { - t.Fatalf("createCluster(%s) got error: %v", cfg.clusterName, err) - } - defer func() { - if _, err := tr.containerService.Projects.Zones.Clusters.Delete(projectID, zone, cfg.clusterName).Context(ctx).Do(); err != nil { - t.Errorf("Clusters.Delete(%s) got error: %v", cfg.clusterName, err) - } - }() - - kubernetesClient, err := gke.NewClient(ctx, cfg.clusterName, gke.OptZone(zone), gke.OptProject(projectID)) - if err != nil { - t.Fatalf("gke.NewClient() got error: %v", err) - } - - deployContainerCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - if err := tr.deployContainer(deployContainerCtx, kubernetesClient, cfg.podName, cfg.imageName); err != nil { - t.Fatalf("deployContainer(%s, %s) got error: %v", cfg.podName, cfg.imageName, err) - } - - pollLogCtx, cancel := context.WithTimeout(ctx, 20*time.Minute) - defer cancel() - if err := tr.pollPodLog(pollLogCtx, kubernetesClient, cfg.podName); err != nil { - t.Fatalf("pollPodLog(%s) got error: %v", cfg.podName, err) - } - - timeNow := time.Now() - endTime := timeNow.Format(time.RFC3339) - startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339) - for _, pType := range []string{"CPU", "HEAP", "THREADS"} { - if err := tr.queryAndCheckProfile(cfg.service, startTime, endTime, pType, projectID); err != nil { - t.Errorf("queryAndCheckProfile(%s, %s, %s, %s) got error: %v", cfg.service, startTime, endTime, pType, err) - } - } -} - -// uploadImageSource uploads source code for building docker image to GCS. -func (tr *testRunner) uploadImageSource(ctx context.Context, bucket, objectName, commit, service string) error { - zipBuf := new(bytes.Buffer) - z := zip.NewWriter(zipBuf) - f, err := z.Create("Dockerfile") - if err != nil { - return err - } - - dockerfile := fmt.Sprintf(dockerfileFmt, commit, service) - if _, err := f.Write([]byte(dockerfile)); err != nil { - return err - } - - if err := z.Close(); err != nil { - return err - } - wc := tr.storageClient.Bucket(bucket).Object(objectName).NewWriter(ctx) - wc.ContentType = "application/zip" - wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} - if _, err := wc.Write(zipBuf.Bytes()); err != nil { - return err - } - return wc.Close() -} - func TestAgentIntegration(t *testing.T) { projectID := os.Getenv("GCLOUD_TESTS_GOLANG_PROJECT_ID") if projectID == "" { @@ -640,11 +130,6 @@ func TestAgentIntegration(t *testing.T) { t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_ZONE) got empty string") } - bucket := os.Getenv("GCLOUD_TESTS_GOLANG_BUCKET") - if bucket == "" { - t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_BUCKET) got empty string") - } - if *commit == "" { t.Fatal("commit flag is not set") } @@ -656,45 +141,110 @@ func TestAgentIntegration(t *testing.T) { t.Fatalf("failed to get default client: %v", err) } - storageClient, err := storage.NewClient(ctx) - if err != nil { - t.Fatalf("storage.NewClient() error: %v", err) - } - computeService, err := compute.New(client) if err != nil { t.Fatalf("failed to initialize compute service: %v", err) } - containerService, err := container.New(client) - if err != nil { - t.Fatalf("failed to create container client: %v", err) - } - template, err := template.New("startupScript").Parse(startupTemplate) if err != nil { t.Fatalf("failed to parse startup script template: %v", err) } - tr := testRunner{ - computeService: computeService, - client: client, - startupTemplate: template, - containerService: containerService, - storageClient: storageClient, + + tr := proftest.TestRunner{ + Client: client, } - cluster := newClusterConfig(projectID) - t.Run(cluster.service, func(t *testing.T) { - t.Parallel() - tr.runTestOnGKE(ctx, t, cluster, projectID, zone, bucket) - }) + gceTr := proftest.GCETestRunner{ + TestRunner: tr, + ComputeService: computeService, + } - instances := newInstanceConfigs() - for _, instance := range instances { - inst := instance // capture range variable - t.Run(inst.service, func(t *testing.T) { + testcases := []goGCETestCase{ + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go19-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go19-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION"}, + goVersion: "1.9", + mutexProfiling: true, + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go18-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go18-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION"}, + goVersion: "1.8", + mutexProfiling: true, + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go17-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go17-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS"}, + goVersion: "1.7", + }, + { + InstanceConfig: proftest.InstanceConfig{ + ProjectID: projectID, + Zone: zone, + Name: fmt.Sprintf("profiler-test-go16-%d", runID), + MachineType: "n1-standard-1", + }, + name: fmt.Sprintf("profiler-test-go16-%d-gce", runID), + wantProfileTypes: []string{"CPU", "HEAP", "THREADS"}, + goVersion: "1.6", + }, + } + + for _, tc := range testcases { + tc := tc // capture range variable + t.Run(tc.name, func(t *testing.T) { t.Parallel() - tr.runTestOnGCE(ctx, t, inst, projectID, zone) + if err := tc.initializeStartupScript(template); err != nil { + t.Fatalf("failed to initialize startup script") + } + + if err := gceTr.StartInstance(ctx, &tc.InstanceConfig); err != nil { + t.Fatal(err) + } + defer func() { + if gceTr.DeleteInstance(ctx, &tc.InstanceConfig); err != nil { + t.Fatal(err) + } + }() + + timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*25) + defer cancel() + if err := gceTr.PollForSerialOutput(timeoutCtx, &tc.InstanceConfig, benchFinishString); err != nil { + t.Fatal(err) + } + + timeNow := time.Now() + endTime := timeNow.Format(time.RFC3339) + startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339) + for _, pType := range tc.wantProfileTypes { + pr, err := tr.QueryProfiles(tc.ProjectID, tc.name, startTime, endTime, pType) + if err != nil { + t.Errorf("QueryProfiles(%s, %s, %s, %s, %s) got error: %v", tc.ProjectID, tc.name, startTime, endTime, pType, err) + continue + } + if err := pr.HasFunction("busywork"); err != nil { + t.Error(err) + } + } }) } } diff --git a/vendor/cloud.google.com/go/profiler/profiler.go b/vendor/cloud.google.com/go/profiler/profiler.go index 5ea2bff2c..3317f2888 100644 --- a/vendor/cloud.google.com/go/profiler/profiler.go +++ b/vendor/cloud.google.com/go/profiler/profiler.go @@ -123,6 +123,12 @@ type Config struct { // than Go 1.8. MutexProfiling bool + // When true, collecting the heap profiles is disabled. + NoHeapProfiling bool + + // When true, collecting the goroutine profiles is disabled. + NoGoroutineProfiling bool + // ProjectID is the Cloud Console project ID to use instead of // the one read from the VM metadata server. // @@ -256,6 +262,9 @@ func (a *agent) createProfile(ctx context.Context) *pb.Profile { gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error p, err = a.client.CreateProfile(ctx, &req, grpc.Trailer(&md)) + if err != nil { + debugLog("failed to create a profile, will retry: %v", err) + } return err }, gax.WithRetry(func() gax.Retryer { return &retryer{ @@ -411,7 +420,13 @@ func initializeAgent(c pb.ProfilerServiceClient) *agent { profileLabels[instanceLabel] = config.instance } - profileTypes := []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS} + profileTypes := []pb.ProfileType{pb.ProfileType_CPU} + if !config.NoHeapProfiling { + profileTypes = append(profileTypes, pb.ProfileType_HEAP) + } + if !config.NoGoroutineProfiling { + profileTypes = append(profileTypes, pb.ProfileType_THREADS) + } if mutexEnabled { profileTypes = append(profileTypes, pb.ProfileType_CONTENTION) } @@ -442,6 +457,15 @@ func initializeConfig(cfg Config) error { config.ServiceVersion = os.Getenv("GAE_VERSION") } + if projectID := os.Getenv("GOOGLE_CLOUD_PROJECT"); config.ProjectID == "" && projectID != "" { + // Cloud Shell and App Engine set this environment variable to the project + // ID, so use it if present. In case of App Engine the project ID is also + // available from the GCE metadata server, but by using the environment + // variable saves one request to the metadata server. The environment + // project ID is only used if no project ID is provided in the + // configuration. + config.ProjectID = projectID + } if onGCE() { var err error if config.ProjectID == "" { @@ -474,6 +498,7 @@ func initializeConfig(cfg Config) error { // server for instructions, and collects and uploads profiles as // requested. func pollProfilerService(ctx context.Context, a *agent) { + debugLog("profiler has started") for { p := a.createProfile(ctx) a.profileAndUpload(ctx, p) diff --git a/vendor/cloud.google.com/go/profiler/profiler_test.go b/vendor/cloud.google.com/go/profiler/profiler_test.go index c506b0b05..3d30c3927 100644 --- a/vendor/cloud.google.com/go/profiler/profiler_test.go +++ b/vendor/cloud.google.com/go/profiler/profiler_test.go @@ -345,35 +345,53 @@ func TestInitializeAgent(t *testing.T) { for _, tt := range []struct { config Config enableMutex bool + wantProfileTypes []pb.ProfileType wantDeploymentLabels map[string]string wantProfileLabels map[string]string }{ { config: Config{ServiceVersion: testSvcVersion, zone: testZone}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, wantDeploymentLabels: map[string]string{zoneNameLabel: testZone, versionLabel: testSvcVersion}, wantProfileLabels: map[string]string{}, }, { config: Config{zone: testZone}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, wantDeploymentLabels: map[string]string{zoneNameLabel: testZone}, wantProfileLabels: map[string]string{}, }, { config: Config{ServiceVersion: testSvcVersion}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, wantDeploymentLabels: map[string]string{versionLabel: testSvcVersion}, wantProfileLabels: map[string]string{}, }, { config: Config{instance: testInstance}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, wantDeploymentLabels: map[string]string{}, wantProfileLabels: map[string]string{instanceLabel: testInstance}, }, { config: Config{instance: testInstance}, enableMutex: true, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS, pb.ProfileType_CONTENTION}, wantDeploymentLabels: map[string]string{}, wantProfileLabels: map[string]string{instanceLabel: testInstance}, }, + { + config: Config{NoHeapProfiling: true}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_THREADS}, + wantDeploymentLabels: map[string]string{}, + wantProfileLabels: map[string]string{}, + }, + { + config: Config{NoHeapProfiling: true, NoGoroutineProfiling: true}, + wantProfileTypes: []pb.ProfileType{pb.ProfileType_CPU}, + wantDeploymentLabels: map[string]string{}, + wantProfileLabels: map[string]string{}, + }, } { config = tt.config @@ -390,24 +408,17 @@ func TestInitializeAgent(t *testing.T) { if !testutil.Equal(a.deployment, wantDeployment) { t.Errorf("initializeAgent() got deployment: %v, want %v", a.deployment, wantDeployment) } - if !testutil.Equal(a.profileLabels, tt.wantProfileLabels) { t.Errorf("initializeAgent() got profile labels: %v, want %v", a.profileLabels, tt.wantProfileLabels) } - - wantProfileTypes := []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS} - if tt.enableMutex { - wantProfileTypes = append(wantProfileTypes, pb.ProfileType_CONTENTION) + if !testutil.Equal(a.profileTypes, tt.wantProfileTypes) { + t.Errorf("initializeAgent() got profile types: %v, want %v", a.profileTypes, tt.wantProfileTypes) } - if !testutil.Equal(a.profileTypes, wantProfileTypes) { - t.Errorf("initializeAgent() got profile types: %v, want %v", a.profileTypes, wantProfileTypes) - } - } } func TestInitializeConfig(t *testing.T) { - oldConfig, oldService, oldVersion, oldGetProjectID, oldGetInstanceName, oldGetZone, oldOnGCE := config, os.Getenv("GAE_SERVICE"), os.Getenv("GAE_VERSION"), getProjectID, getInstanceName, getZone, onGCE + oldConfig, oldService, oldVersion, oldEnvProjectID, oldGetProjectID, oldGetInstanceName, oldGetZone, oldOnGCE := config, os.Getenv("GAE_SERVICE"), os.Getenv("GAE_VERSION"), os.Getenv("GOOGLE_CLOUD_PROJECT"), getProjectID, getInstanceName, getZone, onGCE defer func() { config, getProjectID, getInstanceName, getZone, onGCE = oldConfig, oldGetProjectID, oldGetInstanceName, oldGetZone, oldOnGCE if err := os.Setenv("GAE_SERVICE", oldService); err != nil { @@ -416,88 +427,135 @@ func TestInitializeConfig(t *testing.T) { if err := os.Setenv("GAE_VERSION", oldVersion); err != nil { t.Fatal(err) } + if err := os.Setenv("GOOGLE_CLOUD_PROJECT", oldEnvProjectID); err != nil { + t.Fatal(err) + } }() - testGAEService := "test-gae-service" - testGAEVersion := "test-gae-version" - testGCEProjectID := "test-gce-project-id" + const ( + testGAEService = "test-gae-service" + testGAEVersion = "test-gae-version" + testGCEProjectID = "test-gce-project-id" + testEnvProjectID = "test-env-project-id" + ) for _, tt := range []struct { + desc string config Config wantConfig Config wantErrorString string onGAE bool onGCE bool + envProjectID bool }{ { + "accepts service name", Config{Service: testService}, Config{Target: testService, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, "", false, true, + false, }, { + "accepts target name", Config{Target: testTarget}, Config{Target: testTarget, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, "", false, true, + false, }, { + "env project overrides GCE project", + Config{Service: testService}, + Config{Target: testService, ProjectID: testEnvProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + true, + }, + { + "requires service name", Config{}, Config{}, "service name must be specified in the configuration", false, true, + false, }, { + "accepts service name from config and service version from GAE", Config{Service: testService}, Config{Target: testService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, "", true, true, + false, }, { + "accepts target name from config and service version from GAE", Config{Target: testTarget}, Config{Target: testTarget, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, "", true, true, + false, }, { + "reads both service name and version from GAE env vars", Config{}, Config{Target: testGAEService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, "", true, true, + false, }, { + "accepts service version from config", Config{Service: testService, ServiceVersion: testSvcVersion}, Config{Target: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, "", false, true, + false, }, { + "configured version has priority over GAE-provided version", Config{Service: testService, ServiceVersion: testSvcVersion}, Config{Target: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, "", true, true, + false, }, { + "configured project ID has priority over metadata-provided project ID", Config{Service: testService, ProjectID: testProjectID}, Config{Target: testService, ProjectID: testProjectID, zone: testZone, instance: testInstance}, "", false, true, + false, }, { + "configured project ID has priority over environment project ID", + Config{Service: testService, ProjectID: testProjectID}, + Config{Target: testService, ProjectID: testProjectID}, + "", + false, + false, + true, + }, + { + "requires project ID if not on GCE", Config{Service: testService}, Config{Target: testService}, "project ID must be specified in the configuration if running outside of GCP", false, false, + false, }, } { + t.Logf("Running test: %s", tt.desc) envService, envVersion := "", "" if tt.onGAE { envService, envVersion = testGAEService, testGAEVersion @@ -519,6 +577,13 @@ func TestInitializeConfig(t *testing.T) { getZone = func() (string, error) { return "", fmt.Errorf("test get zone error") } getInstanceName = func() (string, error) { return "", fmt.Errorf("test get instance error") } } + envProjectID := "" + if tt.envProjectID { + envProjectID = testEnvProjectID + } + if err := os.Setenv("GOOGLE_CLOUD_PROJECT", envProjectID); err != nil { + t.Fatal(err) + } errorString := "" if err := initializeConfig(tt.config); err != nil { @@ -528,7 +593,6 @@ func TestInitializeConfig(t *testing.T) { if !strings.Contains(errorString, tt.wantErrorString) { t.Errorf("initializeConfig(%v) got error: %v, want contain %v", tt.config, errorString, tt.wantErrorString) } - if tt.wantErrorString == "" { tt.wantConfig.APIAddr = apiAddress } diff --git a/vendor/cloud.google.com/go/profiler/proftest/proftest.go b/vendor/cloud.google.com/go/profiler/proftest/proftest.go new file mode 100644 index 000000000..699ff1b50 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/proftest/proftest.go @@ -0,0 +1,503 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Package proftest contains test helpers for profiler agent integration tests. +// This package is experimental. + +// golang.org/x/build/kubernetes/dialer.go imports "context" package (rather +// than "golang.org/x/net/context") and that does not exist in Go 1.6 or +// earlier. +// +build go1.7 + +package proftest + +import ( + "archive/zip" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "cloud.google.com/go/storage" + "golang.org/x/build/kubernetes" + k8sapi "golang.org/x/build/kubernetes/api" + "golang.org/x/build/kubernetes/gke" + "golang.org/x/net/context" + cloudbuild "google.golang.org/api/cloudbuild/v1" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + "google.golang.org/api/googleapi" +) + +const ( + monitorWriteScope = "https://www.googleapis.com/auth/monitoring.write" + storageReadScope = "https://www.googleapis.com/auth/devstorage.read_only" +) + +// TestRunner has common elements used for testing profiling agents on a range +// of environments. +type TestRunner struct { + Client *http.Client +} + +// GCETestRunner supports testing a profiling agent on GCE. +type GCETestRunner struct { + TestRunner + ComputeService *compute.Service +} + +// GKETestRunner supports testing a profiling agent on GKE. +type GKETestRunner struct { + TestRunner + ContainerService *container.Service + StorageClient *storage.Client + Dockerfile string +} + +// ProfileResponse contains the response produced when querying profile server. +type ProfileResponse struct { + Profile ProfileData `json:"profile"` + NumProfiles int32 `json:"numProfiles"` + Deployments []interface{} `json:"deployments"` +} + +// ProfileData has data of a single profile. +type ProfileData struct { + Samples []int32 `json:"samples"` + SampleMetrics interface{} `json:"sampleMetrics"` + DefaultMetricType string `json:"defaultMetricType"` + TreeNodes interface{} `json:"treeNodes"` + Functions functionArray `json:"functions"` + SourceFiles interface{} `json:"sourceFiles"` +} + +type functionArray struct { + Name []string `json:"name"` + Sourcefile []int32 `json:"sourceFile"` +} + +// InstanceConfig is configuration for starting single GCE instance for +// profiling agent test case. +type InstanceConfig struct { + ProjectID string + Zone string + Name string + StartupScript string + MachineType string +} + +// ClusterConfig is configuration for starting single GKE cluster for profiling +// agent test case. +type ClusterConfig struct { + ProjectID string + Zone string + ClusterName string + PodName string + ImageSourceName string + ImageName string + Bucket string + Dockerfile string +} + +// HasFunction returns nil if the function is present, or, if the function is +// not present, and error providing more details why the function is not +// present. +func (pr *ProfileResponse) HasFunction(functionName string) error { + if pr.NumProfiles == 0 { + return fmt.Errorf("failed to find function name %s in profile: profile response contains zero profiles: %v", functionName, pr) + } + if len(pr.Deployments) == 0 { + return fmt.Errorf("failed to find function name %s in profile: profile response contains zero deployments: %v", functionName, pr) + } + if len(pr.Profile.Functions.Name) == 0 { + return fmt.Errorf("failed to find function name %s in profile: profile does not have function data", functionName) + } + + for _, name := range pr.Profile.Functions.Name { + if strings.Contains(name, functionName) { + return nil + } + } + return fmt.Errorf("failed to find function name %s in profile", functionName) +} + +// StartInstance starts a GCE Instance with name, zone, and projectId specified +// by the inst, and which runs the startup script specified in inst. +func (tr *GCETestRunner) StartInstance(ctx context.Context, inst *InstanceConfig) error { + img, err := tr.ComputeService.Images.GetFromFamily("debian-cloud", "debian-9").Context(ctx).Do() + if err != nil { + return err + } + + _, err = tr.ComputeService.Instances.Insert(inst.ProjectID, inst.Zone, &compute.Instance{ + MachineType: fmt.Sprintf("zones/%s/machineTypes/%s", inst.Zone, inst.MachineType), + Name: inst.Name, + Disks: []*compute.AttachedDisk{{ + AutoDelete: true, // delete the disk when the VM is deleted. + Boot: true, + Type: "PERSISTENT", + Mode: "READ_WRITE", + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: img.SelfLink, + DiskType: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/pd-standard", inst.ProjectID, inst.Zone), + }, + }}, + NetworkInterfaces: []*compute.NetworkInterface{{ + Network: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/default", inst.ProjectID), + AccessConfigs: []*compute.AccessConfig{{ + Name: "External NAT", + }}, + }}, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{{ + Key: "startup-script", + Value: googleapi.String(inst.StartupScript), + }}, + }, + ServiceAccounts: []*compute.ServiceAccount{{ + Email: "default", + Scopes: []string{ + monitorWriteScope, + }, + }}, + }).Do() + + return err +} + +// DeleteInstance deletes an instance with project id, name, and zone matched +// by inst. +func (tr *GCETestRunner) DeleteInstance(ctx context.Context, inst *InstanceConfig) error { + if _, err := tr.ComputeService.Instances.Delete(inst.ProjectID, inst.Zone, inst.Name).Context(ctx).Do(); err != nil { + return fmt.Errorf("Instances.Delete(%s) got error: %v", inst.Name, err) + } + return nil +} + +// PollForSerialOutput polls the serial output of the GCE instance specified by +// inst and returns when the finishString appears in the serial output +// of the instance, or when the context times out. +func (tr *GCETestRunner) PollForSerialOutput(ctx context.Context, inst *InstanceConfig, finishString string) error { + var output string + defer func() { + log.Printf("Serial port output for %s:\n%s", inst.Name, output) + }() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for profiling finishing on instance %s", inst.Name) + + case <-time.After(20 * time.Second): + resp, err := tr.ComputeService.Instances.GetSerialPortOutput(inst.ProjectID, inst.Zone, inst.Name).Context(ctx).Do() + if err != nil { + // Transient failure. + log.Printf("Transient error getting serial port output from instance %s (will retry): %v", inst.Name, err) + continue + } + + if output = resp.Contents; strings.Contains(output, finishString) { + return nil + } + } + } +} + +// QueryProfiles retrieves profiles of a specific type, from a specific time +// range, associated with a particular service and project. +func (tr *TestRunner) QueryProfiles(projectID, service, startTime, endTime, profileType string) (ProfileResponse, error) { + queryURL := fmt.Sprintf("https://cloudprofiler.googleapis.com/v2/projects/%s/profiles:query", projectID) + const queryJSONFmt = `{"endTime": "%s", "profileType": "%s","startTime": "%s", "target": "%s"}` + + queryRequest := fmt.Sprintf(queryJSONFmt, endTime, profileType, startTime, service) + + resp, err := tr.Client.Post(queryURL, "application/json", strings.NewReader(queryRequest)) + if err != nil { + return ProfileResponse{}, fmt.Errorf("failed to query API: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return ProfileResponse{}, fmt.Errorf("failed to read response body: %v", err) + } + + var pr ProfileResponse + if err := json.Unmarshal(body, &pr); err != nil { + return ProfileResponse{}, err + } + + return pr, nil +} + +// createAndPublishDockerImage creates a docker image from source code in a GCS +// bucket and pushes the image to Google Container Registry. +func (tr *GKETestRunner) createAndPublishDockerImage(ctx context.Context, projectID, sourceBucket, sourceObject, ImageName string) error { + cloudbuildService, err := cloudbuild.New(tr.Client) + + build := &cloudbuild.Build{ + Source: &cloudbuild.Source{ + StorageSource: &cloudbuild.StorageSource{ + Bucket: sourceBucket, + Object: sourceObject, + }, + }, + Steps: []*cloudbuild.BuildStep{ + { + Name: "gcr.io/cloud-builders/docker", + Args: []string{"build", "-t", ImageName, "."}, + }, + }, + Images: []string{ImageName}, + } + + op, err := cloudbuildService.Projects.Builds.Create(projectID, build).Context(ctx).Do() + if err != nil { + return fmt.Errorf("failed to create image: %v", err) + } + opID := op.Name + + // Wait for creating image. + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting creating image") + + case <-time.After(10 * time.Second): + op, err := cloudbuildService.Operations.Get(opID).Context(ctx).Do() + if err != nil { + log.Printf("Transient error getting operation (will retry): %v", err) + break + } + if op.Done == true { + log.Printf("Published image %s to Google Container Registry.", ImageName) + return nil + } + } + } +} + +type imageResponse struct { + Manifest map[string]interface{} `json:"manifest"` + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// deleteDockerImage deletes a docker image from Google Container Registry. +func (tr *GKETestRunner) deleteDockerImage(ctx context.Context, ImageName string) []error { + queryImageURL := fmt.Sprintf("https://gcr.io/v2/%s/tags/list", ImageName) + resp, err := tr.Client.Get(queryImageURL) + if err != nil { + return []error{fmt.Errorf("failed to list tags: %v", err)} + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return []error{err} + } + var ir imageResponse + if err := json.Unmarshal(body, &ir); err != nil { + return []error{err} + } + + const deleteImageURLFmt = "https://gcr.io/v2/%s/manifests/%s" + var errs []error + for _, tag := range ir.Tags { + if err := deleteDockerImageResource(tr.Client, fmt.Sprintf(deleteImageURLFmt, ImageName, tag)); err != nil { + errs = append(errs, fmt.Errorf("failed to delete tag %s: %v", tag, err)) + } + } + + for manifest := range ir.Manifest { + if err := deleteDockerImageResource(tr.Client, fmt.Sprintf(deleteImageURLFmt, ImageName, manifest)); err != nil { + errs = append(errs, fmt.Errorf("failed to delete manifest %s: %v", manifest, err)) + } + } + return errs +} + +func deleteDockerImageResource(client *http.Client, url string) error { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return fmt.Errorf("failed to get request: %v", err) + } + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("failed to delete resource: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { + return fmt.Errorf("failed to delete resource: status code = %d", resp.StatusCode) + } + return nil +} + +func (tr *GKETestRunner) createCluster(ctx context.Context, client *http.Client, projectID, zone, ClusterName string) error { + request := &container.CreateClusterRequest{Cluster: &container.Cluster{ + Name: ClusterName, + InitialNodeCount: 3, + NodeConfig: &container.NodeConfig{ + OauthScopes: []string{ + storageReadScope, + }, + }, + }} + op, err := tr.ContainerService.Projects.Zones.Clusters.Create(projectID, zone, request).Context(ctx).Do() + if err != nil { + return fmt.Errorf("failed to create cluster %s: %v", ClusterName, err) + } + opID := op.Name + + // Wait for creating cluster. + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting creating cluster") + + case <-time.After(10 * time.Second): + op, err := tr.ContainerService.Projects.Zones.Operations.Get(projectID, zone, opID).Context(ctx).Do() + if err != nil { + log.Printf("Transient error getting operation (will retry): %v", err) + break + } + if op.Status == "DONE" { + log.Printf("Created cluster %s.", ClusterName) + return nil + } + if op.Status == "ABORTING" { + return fmt.Errorf("create cluster operation is aborted") + } + } + } +} + +func (tr *GKETestRunner) deployContainer(ctx context.Context, kubernetesClient *kubernetes.Client, podName, ImageName string) error { + pod := &k8sapi.Pod{ + ObjectMeta: k8sapi.ObjectMeta{ + Name: podName, + }, + Spec: k8sapi.PodSpec{ + Containers: []k8sapi.Container{ + { + Name: "profiler-test", + Image: fmt.Sprintf("gcr.io/%s:latest", ImageName), + }, + }, + }, + } + if _, err := kubernetesClient.RunLongLivedPod(ctx, pod); err != nil { + return fmt.Errorf("failed to run pod %s: %v", podName, err) + } + return nil +} + +// PollPodLog polls the log of the kubernetes client and returns when the +// finishString appears in the log, or when the context times out. +func (tr *GKETestRunner) PollPodLog(ctx context.Context, kubernetesClient *kubernetes.Client, podName, finishString string) error { + var output string + defer func() { + log.Printf("Log for pod %s:\n%s", podName, output) + }() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting profiling finishing on container") + + case <-time.After(20 * time.Second): + var err error + output, err = kubernetesClient.PodLog(ctx, podName) + if err != nil { + // Transient failure. + log.Printf("Transient error getting log (will retry): %v", err) + continue + } + if strings.Contains(output, finishString) { + return nil + } + } + } +} + +// DeleteClusterAndImage deletes cluster and images used to create cluster. +func (tr *GKETestRunner) DeleteClusterAndImage(ctx context.Context, cfg *ClusterConfig) []error { + var errs []error + if err := tr.StorageClient.Bucket(cfg.Bucket).Object(cfg.ImageSourceName).Delete(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to delete storage client: %v", err)) + } + for _, err := range tr.deleteDockerImage(ctx, cfg.ImageName) { + errs = append(errs, fmt.Errorf("failed to delete docker image: %v", err)) + } + if _, err := tr.ContainerService.Projects.Zones.Clusters.Delete(cfg.ProjectID, cfg.Zone, cfg.ClusterName).Context(ctx).Do(); err != nil { + errs = append(errs, fmt.Errorf("failed to delete cluster %s: %v", cfg.ClusterName, err)) + } + + return errs +} + +// StartAndDeployCluster creates image needed for cluster, then starts and +// deploys to cluster. +func (tr *GKETestRunner) StartAndDeployCluster(ctx context.Context, cfg *ClusterConfig) error { + if err := tr.uploadImageSource(ctx, cfg.Bucket, cfg.ImageSourceName, cfg.Dockerfile); err != nil { + return fmt.Errorf("failed to upload image source: %v", err) + } + + createImageCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if err := tr.createAndPublishDockerImage(createImageCtx, cfg.ProjectID, cfg.Bucket, cfg.ImageSourceName, fmt.Sprintf("gcr.io/%s", cfg.ImageName)); err != nil { + return fmt.Errorf("failed to create and publish docker image %s: %v", cfg.ImageName, err) + } + + kubernetesClient, err := gke.NewClient(ctx, cfg.ClusterName, gke.OptZone(cfg.Zone), gke.OptProject(cfg.ProjectID)) + if err != nil { + return fmt.Errorf("failed to create new GKE client: %v", err) + } + + deployContainerCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if err := tr.deployContainer(deployContainerCtx, kubernetesClient, cfg.PodName, cfg.ImageName); err != nil { + return fmt.Errorf("failed to deploy image %q to pod %q: %v", cfg.PodName, cfg.ImageName, err) + } + return nil +} + +// uploadImageSource uploads source code for building docker image to GCS. +func (tr *GKETestRunner) uploadImageSource(ctx context.Context, bucket, objectName, dockerfile string) error { + zipBuf := new(bytes.Buffer) + z := zip.NewWriter(zipBuf) + f, err := z.Create("Dockerfile") + if err != nil { + return err + } + + if _, err := f.Write([]byte(dockerfile)); err != nil { + return err + } + + if err := z.Close(); err != nil { + return err + } + wc := tr.StorageClient.Bucket(bucket).Object(objectName).NewWriter(ctx) + wc.ContentType = "application/zip" + wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} + if _, err := wc.Write(zipBuf.Bytes()); err != nil { + return err + } + return wc.Close() +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/pubsub_pull_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/pubsub_pull_example_test.go new file mode 100644 index 000000000..df13c3d0e --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/pubsub_pull_example_test.go @@ -0,0 +1,106 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + "log" + "time" + + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleSubscriberClient_Pull_lengthyClientProcessing() { + projectID := "some-project" + subscriptionID := "some-subscription" + + ctx := context.Background() + client, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + log.Fatal(err) + } + defer client.Close() + + sub := fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subscriptionID) + // Be sure to tune the MaxMessages parameter per your project's needs, and accordingly + // adjust the ack behavior below to batch acknowledgements. + req := pubsubpb.PullRequest{ + Subscription: sub, + MaxMessages: 1, + } + + fmt.Println("Listening..") + + for { + res, err := client.Pull(ctx, &req) + if err != nil { + log.Fatal(err) + } + + // client.Pull returns an empty list if there are no messages available in the + // backlog. We should skip processing steps when that happens. + if len(res.ReceivedMessages) == 0 { + continue + } + + var recvdAckIDs []string + for _, m := range res.ReceivedMessages { + recvdAckIDs = append(recvdAckIDs, m.AckId) + } + + var done = make(chan struct{}) + var delay = 0 * time.Second // Tick immediately upon reception + var ackDeadline = 10 * time.Second + + // Continuously notify the server that processing is still happening on this batch. + go func() { + for { + select { + case <-ctx.Done(): + return + case <-done: + return + case <-time.After(delay): + err := client.ModifyAckDeadline(ctx, &pubsubpb.ModifyAckDeadlineRequest{ + Subscription: sub, + AckIds: recvdAckIDs, + AckDeadlineSeconds: int32(ackDeadline.Seconds()), + }) + if err != nil { + log.Fatal(err) + } + delay = ackDeadline - 5*time.Second // 5 seconds grace period. + } + } + }() + + for _, m := range res.ReceivedMessages { + // Process the message here, possibly in a goroutine. + log.Printf("Got message: %s", string(m.Message.Data)) + + err := client.Acknowledge(ctx, &pubsubpb.AcknowledgeRequest{ + Subscription: sub, + AckIds: []string{m.AckId}, + }) + if err != nil { + log.Fatal(err) + } + } + + close(done) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go index b00f36416..7995065b4 100644 --- a/vendor/cloud.google.com/go/pubsub/doc.go +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -116,5 +116,11 @@ Authentication See examples of authorization and authentication at https://godoc.org/cloud.google.com/go#pkg-examples. + +Slow Message Processing + +For use cases where message processing exceeds 30 minutes, we recommend using +the base client in a pull model, since long-lived streams are periodically killed +by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example-SubscriberClient-Pull-LengthyClientProcessing */ package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/endtoend_test.go b/vendor/cloud.google.com/go/pubsub/endtoend_test.go index a30bdfb5c..6fe0d210d 100644 --- a/vendor/cloud.google.com/go/pubsub/endtoend_test.go +++ b/vendor/cloud.google.com/go/pubsub/endtoend_test.go @@ -30,10 +30,13 @@ import ( "google.golang.org/api/option" ) -const timeout = time.Minute * 10 -const ackDeadline = time.Second * 10 - -const nMessages = 1e4 +const ( + timeout = time.Minute * 10 + ackDeadline = time.Second * 10 + nMessages = 1e4 + acceptableDupPercentage = .05 + numAcceptableDups = int(nMessages * acceptableDupPercentage / 100) +) // Buffer log messages to debug failures. var logBuf bytes.Buffer @@ -81,15 +84,11 @@ func TestEndToEnd(t *testing.T) { defer subs[i].Delete(ctx) } - ids, err := publish(ctx, topic, nMessages) + err = publish(ctx, topic, nMessages) topic.Stop() if err != nil { t.Fatalf("publish: %v", err) } - wantCounts := make(map[string]int) - for _, id := range ids { - wantCounts[id] = 1 - } // recv provides an indication that messages are still arriving. recv := make(chan struct{}) @@ -146,8 +145,20 @@ loop: wg.Wait() ok := true for i, con := range consumers { - if got, want := con.counts, wantCounts; !testutil.Equal(got, want) { - t.Errorf("%d: message counts: %v\n", i, diff(got, want)) + var numDups int + var zeroes int + for _, v := range con.counts { + if v == 0 { + zeroes += 1 + } + numDups += v - 1 + } + + if zeroes > 0 { + t.Errorf("Consumer %d: %d messages never arrived", i, zeroes) + ok = false + } else if numDups > numAcceptableDups { + t.Errorf("Consumer %d: Willing to accept %d dups (%f%% duplicated of %d messages), but got %d", i, numAcceptableDups, acceptableDupPercentage, int(nMessages), numDups) ok = false } } @@ -157,7 +168,7 @@ loop: } // publish publishes n messages to topic, and returns the published message IDs. -func publish(ctx context.Context, topic *Topic, n int) ([]string, error) { +func publish(ctx context.Context, topic *Topic, n int) error { var rs []*PublishResult for i := 0; i < n; i++ { m := &Message{Data: []byte(fmt.Sprintf("msg %d", i))} @@ -167,11 +178,11 @@ func publish(ctx context.Context, topic *Topic, n int) ([]string, error) { for _, r := range rs { id, err := r.Get(ctx) if err != nil { - return nil, err + return err } ids = append(ids, id) } - return ids, nil + return nil } // consumer consumes messages according to its configuration. @@ -221,24 +232,3 @@ func (c *consumer) process(_ context.Context, m *Message) { delay := rand.Intn(int(ackDeadline * 3)) time.AfterFunc(time.Duration(delay), m.Ack) } - -// diff returns counts of the differences between got and want. -func diff(got, want map[string]int) map[string]int { - ids := make(map[string]struct{}) - for k := range got { - ids[k] = struct{}{} - } - for k := range want { - ids[k] = struct{}{} - } - - gotWantCount := make(map[string]int) - for k := range ids { - if got[k] == want[k] { - continue - } - desc := fmt.Sprintf("", got[k], want[k]) - gotWantCount[desc] += 1 - } - return gotWantCount -} diff --git a/vendor/cloud.google.com/go/pubsub/fake_test.go b/vendor/cloud.google.com/go/pubsub/fake_test.go index 16e8cb521..330d33438 100644 --- a/vendor/cloud.google.com/go/pubsub/fake_test.go +++ b/vendor/cloud.google.com/go/pubsub/fake_test.go @@ -30,8 +30,8 @@ import ( emptypb "github.com/golang/protobuf/ptypes/empty" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/pubsub/v1" - "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type fakeServer struct { @@ -158,7 +158,7 @@ var defaultMessageRetentionDuration = ptypes.DurationProto(maxMessageRetentionDu func checkMRD(pmrd *durpb.Duration) error { mrd, err := ptypes.Duration(pmrd) if err != nil || mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { - return grpc.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) + return status.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) } return nil } @@ -166,14 +166,14 @@ func checkMRD(pmrd *durpb.Duration) error { func checkAckDeadline(ads int32) error { if ads < 10 || ads > 600 { // PubSub service returns Unknown. - return grpc.Errorf(codes.Unknown, "bad ack_deadline_seconds: %d", ads) + return status.Errorf(codes.Unknown, "bad ack_deadline_seconds: %d", ads) } return nil } func (s *fakeServer) CreateSubscription(ctx context.Context, sub *pb.Subscription) (*pb.Subscription, error) { if s.subs[sub.Name] != nil { - return nil, grpc.Errorf(codes.AlreadyExists, "subscription %q", sub.Name) + return nil, status.Errorf(codes.AlreadyExists, "subscription %q", sub.Name) } sub2 := proto.Clone(sub).(*pb.Subscription) if err := checkAckDeadline(sub.AckDeadlineSeconds); err != nil { @@ -196,13 +196,13 @@ func (s *fakeServer) GetSubscription(ctx context.Context, req *pb.GetSubscriptio if sub := s.subs[req.Subscription]; sub != nil { return sub, nil } - return nil, grpc.Errorf(codes.NotFound, "subscription %q", req.Subscription) + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription) } func (s *fakeServer) UpdateSubscription(ctx context.Context, req *pb.UpdateSubscriptionRequest) (*pb.Subscription, error) { sub := s.subs[req.Subscription.Name] if sub == nil { - return nil, grpc.Errorf(codes.NotFound, "subscription %q", req.Subscription.Name) + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription.Name) } for _, path := range req.UpdateMask.Paths { switch path { @@ -227,7 +227,7 @@ func (s *fakeServer) UpdateSubscription(ctx context.Context, req *pb.UpdateSubsc // TODO(jba): labels default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknown field name %q", path) + return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) } } return sub, nil @@ -235,7 +235,7 @@ func (s *fakeServer) UpdateSubscription(ctx context.Context, req *pb.UpdateSubsc func (s *fakeServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { if s.subs[req.Subscription] == nil { - return nil, grpc.Errorf(codes.NotFound, "subscription %q", req.Subscription) + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription) } delete(s.subs, req.Subscription) return &emptypb.Empty{}, nil @@ -243,7 +243,7 @@ func (s *fakeServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscri func (s *fakeServer) CreateTopic(_ context.Context, t *pb.Topic) (*pb.Topic, error) { if s.topics[t.Name] != nil { - return nil, grpc.Errorf(codes.AlreadyExists, "topic %q", t.Name) + return nil, status.Errorf(codes.AlreadyExists, "topic %q", t.Name) } t2 := proto.Clone(t).(*pb.Topic) s.topics[t.Name] = t2 @@ -254,12 +254,12 @@ func (s *fakeServer) GetTopic(_ context.Context, req *pb.GetTopicRequest) (*pb.T if t := s.topics[req.Topic]; t != nil { return t, nil } - return nil, grpc.Errorf(codes.NotFound, "topic %q", req.Topic) + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } func (s *fakeServer) DeleteTopic(_ context.Context, req *pb.DeleteTopicRequest) (*emptypb.Empty, error) { if s.topics[req.Topic] == nil { - return nil, grpc.Errorf(codes.NotFound, "topic %q", req.Topic) + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } delete(s.topics, req.Topic) return &emptypb.Empty{}, nil diff --git a/vendor/cloud.google.com/go/pubsub/go18.go b/vendor/cloud.google.com/go/pubsub/go18.go new file mode 100644 index 000000000..25bdc777f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/go18.go @@ -0,0 +1,168 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package pubsub + +import ( + "log" + "sync" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func openCensusOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), + } +} + +var subscriptionKey tag.Key + +func init() { + var err error + if subscriptionKey, err = tag.NewKey("subscription"); err != nil { + log.Fatal("cannot create 'subscription' key") + } +} + +var ( + // PullCount is a measure of the number of messages pulled. + // It is EXPERIMENTAL and subject to change or removal without notice. + PullCount *stats.Int64Measure + + // AckCount is a measure of the number of messages acked. + // It is EXPERIMENTAL and subject to change or removal without notice. + AckCount *stats.Int64Measure + + // NackCount is a measure of the number of messages nacked. + // It is EXPERIMENTAL and subject to change or removal without notice. + NackCount *stats.Int64Measure + + // ModAckCount is a measure of the number of messages whose ack-deadline was modified. + // It is EXPERIMENTAL and subject to change or removal without notice. + ModAckCount *stats.Int64Measure + + // StreamOpenCount is a measure of the number of times a streaming-pull stream was opened. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamOpenCount *stats.Int64Measure + + // StreamRetryCount is a measure of the number of times a streaming-pull operation was retried. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRetryCount *stats.Int64Measure + + // StreamRequestCount is a measure of the number of requests sent on a streaming-pull stream. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRequestCount *stats.Int64Measure + + // StreamRequestCount is a measure of the number of responses received on a streaming-pull stream. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamResponseCount *stats.Int64Measure + + // PullCountView is a cumulative sum of PullCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + PullCountView *view.View + + // AckCountView is a cumulative sum of AckCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + AckCountView *view.View + + // NackCountView is a cumulative sum of NackCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + NackCountView *view.View + + // ModAckCountView is a cumulative sum of ModAckCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + ModAckCountView *view.View + + // StreamOpenCountView is a cumulative sum of StreamOpenCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamOpenCountView *view.View + + // StreamRetryCountView is a cumulative sum of StreamRetryCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRetryCountView *view.View + + // StreamRequestCountView is a cumulative sum of StreamRequestCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRequestCountView *view.View + + // StreamResponseCountView is a cumulative sum of StreamResponseCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamResponseCountView *view.View +) + +const statsPrefix = "cloud.google.com/go/pubsub/" + +func init() { + PullCount = mustNewMeasure("pull_count", "Number of PubSub messages pulled") + AckCount = mustNewMeasure("ack_count", "Number of PubSub messages acked") + NackCount = mustNewMeasure("nack_count", "Number of PubSub messages nacked") + ModAckCount = mustNewMeasure("mod_ack_count", "Number of ack-deadlines modified") + StreamOpenCount = mustNewMeasure("stream_open_count", "Number of calls opening a new streaming pull") + StreamRetryCount = mustNewMeasure("stream_retry_count", "Number of retries of a stream send or receive") + StreamRequestCount = mustNewMeasure("stream_request_count", "Number gRPC StreamingPull request messages sent") + StreamResponseCount = mustNewMeasure("stream_response_count", "Number of gRPC StreamingPull response messages received") + + PullCountView = mustNewView(PullCount) + AckCountView = mustNewView(AckCount) + NackCountView = mustNewView(NackCount) + ModAckCountView = mustNewView(ModAckCount) + StreamOpenCountView = mustNewView(StreamOpenCount) + StreamRetryCountView = mustNewView(StreamRetryCount) + StreamRequestCountView = mustNewView(StreamRequestCount) + StreamResponseCountView = mustNewView(StreamResponseCount) +} + +func mustNewMeasure(name, desc string) *stats.Int64Measure { + const unitCount = "1" + name = statsPrefix + name + m, err := stats.Int64(name, desc, unitCount) + if err != nil { + log.Fatalf("creating %q: %v", name, err) + } + return m +} + +func mustNewView(m *stats.Int64Measure) *view.View { + v, err := view.New(m.Name(), "cumulative "+m.Description(), + []tag.Key{subscriptionKey}, m, view.Sum()) + if err != nil { + log.Fatalf("creating view for %q: %v", m.Name(), err) + } + return v +} + +var logOnce sync.Once + +func withSubscriptionKey(ctx context.Context, subName string) context.Context { + ctx, err := tag.New(ctx, tag.Upsert(subscriptionKey, subName)) + if err != nil { + logOnce.Do(func() { + log.Printf("pubsub: error creating tag map: %v", err) + }) + } + return ctx +} + +func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) { + stats.Record(ctx, m.M(n)) +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go index c7d387732..78934e2c8 100644 --- a/vendor/cloud.google.com/go/pubsub/iterator.go +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -19,6 +19,7 @@ import ( "time" vkit "cloud.google.com/go/pubsub/apiv1" + "cloud.google.com/go/pubsub/internal/distribution" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) @@ -45,6 +46,7 @@ type streamingMessageIterator struct { wg sync.WaitGroup mu sync.Mutex + ackTimeDist *distribution.D keepAliveDeadlines map[string]time.Time pendingReq *pb.StreamingPullRequest pendingModAcks map[string]int32 // ack IDs whose ack deadline is to be modified @@ -71,6 +73,7 @@ func newStreamingMessageIterator(ctx context.Context, ps *pullStream, po *pullOp failed: make(chan struct{}), stopped: make(chan struct{}), drained: make(chan struct{}), + ackTimeDist: distribution.New(int(maxAckDeadline/time.Second) + 1), keepAliveDeadlines: map[string]time.Time{}, pendingReq: &pb.StreamingPullRequest{}, pendingModAcks: map[string]int32{}, @@ -116,7 +119,8 @@ func (it *streamingMessageIterator) checkDrained() { } // Called when a message is acked/nacked. -func (it *streamingMessageIterator) done(ackID string, ack bool) { +func (it *streamingMessageIterator) done(ackID string, ack bool, receiveTime time.Time) { + it.ackTimeDist.Record(int(time.Since(receiveTime) / time.Second)) it.mu.Lock() defer it.mu.Unlock() delete(it.keepAliveDeadlines, ackID) @@ -173,7 +177,9 @@ func (it *streamingMessageIterator) receive() ([]*Message, error) { maxExt := time.Now().Add(it.po.maxExtension) deadline := trunc32(int64(it.po.ackDeadline.Seconds())) it.mu.Lock() + now := time.Now() for _, m := range msgs { + m.receiveTime = now m.doneFunc = it.done it.keepAliveDeadlines[m.ackID] = maxExt // The receipt mod-ack uses the subscription's configured ack deadline. Don't diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go index f6bb5e09c..ac2cecca3 100644 --- a/vendor/cloud.google.com/go/pubsub/message.go +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -43,13 +43,16 @@ type Message struct { // This field is read-only. PublishTime time.Time + // receiveTime is the time the message was received by the client. + receiveTime time.Time + // size is the approximate size of the message's data and attributes. size int calledDone bool // The done method of the iterator that created this Message. - doneFunc func(string, bool) + doneFunc func(string, bool, time.Time) } func toMessage(resp *pb.ReceivedMessage) (*Message, error) { @@ -93,5 +96,5 @@ func (m *Message) done(ack bool) { return } m.calledDone = true - m.doneFunc(m.ackID, ack) + m.doneFunc(m.ackID, ack, m.receiveTime) } diff --git a/vendor/cloud.google.com/go/pubsub/not_go18.go b/vendor/cloud.google.com/go/pubsub/not_go18.go new file mode 100644 index 000000000..09fd4bf58 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/not_go18.go @@ -0,0 +1,54 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package pubsub + +import ( + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +// OpenCensus only supports go 1.8 and higher. + +func openCensusOptions() []option.ClientOption { return nil } + +func withSubscriptionKey(ctx context.Context, _ string) context.Context { + return ctx +} + +type dummy struct{} + +var ( + // Not supported below Go 1.8. + PullCount dummy + // Not supported below Go 1.8. + AckCount dummy + // Not supported below Go 1.8. + NackCount dummy + // Not supported below Go 1.8. + ModAckCount dummy + // Not supported below Go 1.8. + StreamOpenCount dummy + // Not supported below Go 1.8. + StreamRetryCount dummy + // Not supported below Go 1.8. + StreamRequestCount dummy + // Not supported below Go 1.8. + StreamResponseCount dummy +) + +func recordStat(context.Context, dummy, int64) { +} diff --git a/vendor/cloud.google.com/go/pubsub/pstest/fake.go b/vendor/cloud.google.com/go/pubsub/pstest/fake.go index 4c967a63a..d20dddcda 100644 --- a/vendor/cloud.google.com/go/pubsub/pstest/fake.go +++ b/vendor/cloud.google.com/go/pubsub/pstest/fake.go @@ -25,6 +25,7 @@ package pstest import ( "fmt" "io" + "path" "sort" "strings" "sync" @@ -37,8 +38,8 @@ import ( emptypb "github.com/golang/protobuf/ptypes/empty" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/pubsub/v1" - "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // For testing. Note that even though changes to the now variable are atomic, a call @@ -100,6 +101,14 @@ func NewServer() *Server { // // Publish panics if there is an error, which is appropriate for testing. func (s *Server) Publish(topic string, data []byte, attrs map[string]string) string { + const topicPattern = "projects/*/topics/*" + ok, err := path.Match(topicPattern, topic) + if err != nil { + panic(err) + } + if !ok { + panic(fmt.Sprintf("topic name must be of the form %q", topicPattern)) + } _, _ = s.gServer.CreateTopic(nil, &pb.Topic{Name: topic}) req := &pb.PublishRequest{ Topic: topic, @@ -174,7 +183,7 @@ func (s *gServer) CreateTopic(_ context.Context, t *pb.Topic) (*pb.Topic, error) defer s.mu.Unlock() if s.topics[t.Name] != nil { - return nil, grpc.Errorf(codes.AlreadyExists, "topic %q", t.Name) + return nil, status.Errorf(codes.AlreadyExists, "topic %q", t.Name) } top := newTopic(t) s.topics[t.Name] = top @@ -188,11 +197,11 @@ func (s *gServer) GetTopic(_ context.Context, req *pb.GetTopicRequest) (*pb.Topi if t := s.topics[req.Topic]; t != nil { return t.proto, nil } - return nil, grpc.Errorf(codes.NotFound, "topic %q", req.Topic) + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } func (s *gServer) UpdateTopic(_ context.Context, req *pb.UpdateTopicRequest) (*pb.Topic, error) { - return nil, grpc.Errorf(codes.Unimplemented, "unimplemented") + return nil, status.Errorf(codes.Unimplemented, "unimplemented") } func (s *gServer) ListTopics(_ context.Context, req *pb.ListTopicsRequest) (*pb.ListTopicsResponse, error) { @@ -244,7 +253,7 @@ func (s *gServer) DeleteTopic(_ context.Context, req *pb.DeleteTopicRequest) (*e t := s.topics[req.Topic] if t == nil { - return nil, grpc.Errorf(codes.NotFound, "topic %q", req.Topic) + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } t.stop() delete(s.topics, req.Topic) @@ -256,17 +265,17 @@ func (s *gServer) CreateSubscription(_ context.Context, ps *pb.Subscription) (*p defer s.mu.Unlock() if ps.Name == "" { - return nil, grpc.Errorf(codes.InvalidArgument, "missing name") + return nil, status.Errorf(codes.InvalidArgument, "missing name") } if s.subs[ps.Name] != nil { - return nil, grpc.Errorf(codes.AlreadyExists, "subscription %q", ps.Name) + return nil, status.Errorf(codes.AlreadyExists, "subscription %q", ps.Name) } if ps.Topic == "" { - return nil, grpc.Errorf(codes.InvalidArgument, "missing topic") + return nil, status.Errorf(codes.InvalidArgument, "missing topic") } top := s.topics[ps.Topic] if top == nil { - return nil, grpc.Errorf(codes.NotFound, "topic %q", ps.Topic) + return nil, status.Errorf(codes.NotFound, "topic %q", ps.Topic) } if err := checkAckDeadline(ps.AckDeadlineSeconds); err != nil { return nil, err @@ -288,10 +297,13 @@ func (s *gServer) CreateSubscription(_ context.Context, ps *pb.Subscription) (*p return ps, nil } +// Can be set for testing. +var minAckDeadlineSecs int32 = 10 + func checkAckDeadline(ads int32) error { - if ads < 10 || ads > 600 { + if ads < minAckDeadlineSecs || ads > 600 { // PubSub service returns Unknown. - return grpc.Errorf(codes.Unknown, "bad ack_deadline_seconds: %d", ads) + return status.Errorf(codes.Unknown, "bad ack_deadline_seconds: %d", ads) } return nil } @@ -306,7 +318,7 @@ var defaultMessageRetentionDuration = ptypes.DurationProto(maxMessageRetentionDu func checkMRD(pmrd *durpb.Duration) error { mrd, err := ptypes.Duration(pmrd) if err != nil || mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { - return grpc.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) + return status.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) } return nil } @@ -318,7 +330,7 @@ func (s *gServer) GetSubscription(_ context.Context, req *pb.GetSubscriptionRequ if sub := s.subs[req.Subscription]; sub != nil { return sub.proto, nil } - return nil, grpc.Errorf(codes.NotFound, "subscription %q", req.Subscription) + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription) } func (s *gServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscriptionRequest) (*pb.Subscription, error) { @@ -327,7 +339,7 @@ func (s *gServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscripti sub := s.subs[req.Subscription.Name] if sub == nil { - return nil, grpc.Errorf(codes.NotFound, "subscription %q", req.Subscription.Name) + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription.Name) } for _, path := range req.UpdateMask.Paths { @@ -353,7 +365,7 @@ func (s *gServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscripti // TODO(jba): labels default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknown field name %q", path) + return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) } } return sub.proto, nil @@ -387,7 +399,7 @@ func (s *gServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscripti sub := s.subs[req.Subscription] if sub == nil { - return nil, grpc.Errorf(codes.NotFound, "subscription %q", req.Subscription) + return nil, status.Errorf(codes.NotFound, "subscription %q", req.Subscription) } sub.stop() delete(s.subs, req.Subscription) @@ -400,11 +412,11 @@ func (s *gServer) Publish(_ context.Context, req *pb.PublishRequest) (*pb.Publis defer s.mu.Unlock() if req.Topic == "" { - return nil, grpc.Errorf(codes.InvalidArgument, "missing topic") + return nil, status.Errorf(codes.InvalidArgument, "missing topic") } top := s.topics[req.Topic] if top == nil { - return nil, grpc.Errorf(codes.NotFound, "topic %q", req.Topic) + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) } var ids []string for _, pm := range req.Messages { @@ -414,7 +426,7 @@ func (s *gServer) Publish(_ context.Context, req *pb.PublishRequest) (*pb.Publis pubTime := timeNow() tsPubTime, err := ptypes.TimestampProto(pubTime) if err != nil { - return nil, grpc.Errorf(codes.Internal, err.Error()) + return nil, status.Errorf(codes.Internal, err.Error()) } pm.PublishTime = tsPubTime m := &Message{ @@ -480,11 +492,15 @@ type subscription struct { } func newSubscription(t *topic, mu *sync.Mutex, ps *pb.Subscription) *subscription { + at := time.Duration(ps.AckDeadlineSeconds) * time.Second + if at == 0 { + at = 10 * time.Second + } return &subscription{ topic: t, mu: mu, proto: ps, - ackTimeout: 10 * time.Second, + ackTimeout: at, msgs: map[string]*message{}, done: make(chan struct{}), } @@ -498,7 +514,7 @@ func (s *subscription) start(wg *sync.WaitGroup) { select { case <-s.done: return - case <-time.After(1 * time.Second): + case <-time.After(10 * time.Millisecond): s.deliver() } } @@ -516,13 +532,13 @@ func (s *gServer) StreamingPull(sps pb.Subscriber_StreamingPullServer) error { return err } if req.Subscription == "" { - return grpc.Errorf(codes.InvalidArgument, "missing subscription") + return status.Errorf(codes.InvalidArgument, "missing subscription") } s.mu.Lock() sub := s.subs[req.Subscription] s.mu.Unlock() if sub == nil { - return grpc.Errorf(codes.NotFound, "subscription %s", req.Subscription) + return status.Errorf(codes.NotFound, "subscription %s", req.Subscription) } // Create a new stream to handle the pull. st := sub.newStream(sps, s.streamTimeout) @@ -555,6 +571,9 @@ func (s *subscription) deliver() { // Try to deliver each remaining message. curIndex := 0 for _, m := range s.msgs { + if m.outstanding() { + continue + } // If the message was never delivered before, start with the stream at // curIndex. If it was delivered before, start with the stream after the one // that owned it. @@ -640,6 +659,7 @@ type message struct { streamIndex int // index of stream that currently owns msg, for round-robin delivery } +// A message is outstanding if it is owned by some stream. func (m *message) outstanding() bool { return !m.ackDeadline.IsZero() } diff --git a/vendor/cloud.google.com/go/pubsub/pstest/fake_test.go b/vendor/cloud.google.com/go/pubsub/pstest/fake_test.go index d5a82125a..5b4a78ec8 100644 --- a/vendor/cloud.google.com/go/pubsub/pstest/fake_test.go +++ b/vendor/cloud.google.com/go/pubsub/pstest/fake_test.go @@ -131,7 +131,7 @@ func TestPublish(t *testing.T) { s := NewServer() var ids []string for i := 0; i < 3; i++ { - ids = append(ids, s.Publish("t", []byte("hello"), nil)) + ids = append(ids, s.Publish("projects/p/topics/t", []byte("hello"), nil)) } s.Wait() ms := s.Messages() @@ -200,6 +200,98 @@ func TestStreamingPull(t *testing.T) { } } +func TestAck(t *testing.T) { + // Ack each message as it arrives. Make sure we don't see dups. + minAckDeadlineSecs = 1 + pclient, sclient, _ := newFake(t) + top := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + sub := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S", + Topic: top.Name, + AckDeadlineSeconds: 1, + }) + + _ = publish(t, pclient, top, []*pb.PubsubMessage{ + {Data: []byte("d1")}, + {Data: []byte("d2")}, + {Data: []byte("d3")}, + }) + + got := map[string]bool{} + spc := mustStartPull(t, sclient, sub) + time.AfterFunc(time.Duration(3*minAckDeadlineSecs)*time.Second, func() { + if err := spc.CloseSend(); err != nil { + t.Errorf("CloseSend: %v", err) + } + }) + + for { + res, err := spc.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + req := &pb.StreamingPullRequest{} + for _, m := range res.ReceivedMessages { + if got[m.Message.MessageId] { + t.Fatal("duplicate message") + } + got[m.Message.MessageId] = true + req.AckIds = append(req.AckIds, m.AckId) + } + if err := spc.Send(req); err != nil { + t.Fatal(err) + } + } +} + +func TestAckDeadline(t *testing.T) { + // Messages should be resent after they expire. + pclient, sclient, _ := newFake(t) + minAckDeadlineSecs = 2 + top := mustCreateTopic(t, pclient, &pb.Topic{Name: "projects/P/topics/T"}) + sub := mustCreateSubscription(t, sclient, &pb.Subscription{ + Name: "projects/P/subscriptions/S", + Topic: top.Name, + AckDeadlineSeconds: minAckDeadlineSecs, + }) + + _ = publish(t, pclient, top, []*pb.PubsubMessage{ + {Data: []byte("d1")}, + {Data: []byte("d2")}, + {Data: []byte("d3")}, + }) + + got := map[string]int{} + spc := mustStartPull(t, sclient, sub) + // In 5 seconds the ack deadline will expire twice, so we should see each message + // exactly three times. + time.AfterFunc(5*time.Second, func() { + if err := spc.CloseSend(); err != nil { + t.Errorf("CloseSend: %v", err) + } + }) + for { + res, err := spc.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + for _, m := range res.ReceivedMessages { + got[m.Message.MessageId]++ + } + } + for id, n := range got { + if n != 3 { + t.Errorf("message %s: saw %d times, want 3", id, n) + } + } +} + func TestMultiSubs(t *testing.T) { // Each subscription gets every message. pclient, sclient, _ := newFake(t) @@ -309,9 +401,9 @@ func pullN(t *testing.T, n int, sc pb.SubscriberClient, sub *pb.Subscription) ma if err := spc.CloseSend(); err != nil { t.Fatal(err) } - _, err := spc.Recv() + res, err := spc.Recv() if err != io.EOF { - t.Fatal(err) + t.Fatalf("Recv returned <%v> instead of EOF; res = %v", err, res) } return got } diff --git a/vendor/cloud.google.com/go/pubsub/pstest_test.go b/vendor/cloud.google.com/go/pubsub/pstest_test.go new file mode 100644 index 000000000..5c6193081 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pstest_test.go @@ -0,0 +1,76 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "strconv" + "sync" + "testing" + + "golang.org/x/net/context" + + "cloud.google.com/go/pubsub" + "cloud.google.com/go/pubsub/pstest" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func TestPSTest(t *testing.T) { + ctx := context.Background() + srv := pstest.NewServer() + + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + panic(err) + } + + client, err := pubsub.NewClient(ctx, "some-project", option.WithGRPCConn(conn)) + if err != nil { + panic(err) + } + defer client.Close() + + topic, err := client.CreateTopic(ctx, "test-topic") + if err != nil { + panic(err) + } + + sub, err := client.CreateSubscription(ctx, "sub-name", pubsub.SubscriptionConfig{Topic: topic}) + if err != nil { + panic(err) + } + + go func() { + for i := 0; i < 10; i++ { + srv.Publish("projects/some-project/topics/test-topic", []byte(strconv.Itoa(i)), nil) + } + }() + + ctx, cancel := context.WithCancel(ctx) + var mu sync.Mutex + count := 0 + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + mu.Lock() + count++ + if count >= 10 { + cancel() + } + mu.Unlock() + m.Ack() + }) + if err != nil { + panic(err) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go index d8ec92db7..8475186d7 100644 --- a/vendor/cloud.google.com/go/pubsub/pubsub.go +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -38,7 +38,11 @@ const ( ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" ) -const prodAddr = "https://pubsub.googleapis.com/" +const ( + prodAddr = "https://pubsub.googleapis.com/" + minAckDeadline = 10 * time.Second + maxAckDeadline = 10 * time.Minute +) // Client is a Google Pub/Sub client scoped to a single project. // @@ -51,7 +55,7 @@ type Client struct { } // NewClient creates a new PubSub client. -func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (c *Client, err error) { var o []option.ClientOption // Environment variables for gcloud emulator: // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ @@ -69,6 +73,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio Time: 5 * time.Minute, })), } + o = append(o, openCensusOptions()...) } o = append(o, opts...) pubc, err := vkit.NewPublisherClient(ctx, o...) diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go index af7934f82..d9318a130 100644 --- a/vendor/cloud.google.com/go/pubsub/pullstream.go +++ b/vendor/cloud.google.com/go/pubsub/pullstream.go @@ -37,11 +37,13 @@ type pullStream struct { } func newPullStream(ctx context.Context, subc *vkit.SubscriberClient, subName string, ackDeadlineSecs int32) *pullStream { + ctx = withSubscriptionKey(ctx, subName) return &pullStream{ ctx: ctx, open: func() (pb.Subscriber_StreamingPullClient, error) { spc, err := subc.StreamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) if err == nil { + recordStat(ctx, StreamRequestCount, 1) err = spc.Send(&pb.StreamingPullRequest{ Subscription: subName, StreamAckDeadlineSeconds: ackDeadlineSecs, @@ -91,6 +93,7 @@ func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber // The lock is held here for a long time, but it doesn't matter because no callers could get // anything done anyway. s.spc = new(pb.Subscriber_StreamingPullClient) + recordStat(s.ctx, StreamOpenCount, 1) *s.spc, s.err = s.open() // Setting s.err means any error from open is permanent. Reconsider. return s.spc, s.err } @@ -111,10 +114,13 @@ func (s *pullStream) call(f func(pb.Subscriber_StreamingPullClient) error) error err = f(*spc) if err != nil { if isRetryable(err) { + recordStat(s.ctx, StreamRetryCount, 1) gax.Sleep(s.ctx, bo.Pause()) continue } + s.mu.Lock() s.err = err + s.mu.Unlock() } return err } @@ -122,6 +128,16 @@ func (s *pullStream) call(f func(pb.Subscriber_StreamingPullClient) error) error func (s *pullStream) Send(req *pb.StreamingPullRequest) error { return s.call(func(spc pb.Subscriber_StreamingPullClient) error { + recordStat(s.ctx, AckCount, int64(len(req.AckIds))) + zeroes := 0 + for _, mds := range req.ModifyDeadlineSeconds { + if mds == 0 { + zeroes++ + } + } + recordStat(s.ctx, NackCount, int64(zeroes)) + recordStat(s.ctx, ModAckCount, int64(len(req.ModifyDeadlineSeconds)-zeroes)) + recordStat(s.ctx, StreamRequestCount, 1) return spc.Send(req) }) } @@ -130,7 +146,11 @@ func (s *pullStream) Recv() (*pb.StreamingPullResponse, error) { var res *pb.StreamingPullResponse err := s.call(func(spc pb.Subscriber_StreamingPullClient) error { var err error + recordStat(s.ctx, StreamResponseCount, 1) res, err = spc.Recv() + if err == nil { + recordStat(s.ctx, PullCount, int64(len(res.ReceivedMessages))) + } return err }) return res, err diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go index 5bbf34aa8..c63e4d92d 100644 --- a/vendor/cloud.google.com/go/pubsub/service.go +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -61,104 +61,6 @@ func trunc32(i int64) int32 { return int32(i) } -// func newStreamingPuller(ctx context.Context, subc *vkit.SubscriberClient, subName string, ackDeadlineSecs int32) *streamingPuller { -// p := &streamingPuller{ -// ctx: ctx, -// subName: subName, -// ackDeadlineSecs: ackDeadlineSecs, -// subc: subc, -// } -// p.c = sync.NewCond(&p.mu) -// return p -// } - -// type streamingPuller struct { -// ctx context.Context -// subName string -// ackDeadlineSecs int32 -// subc *vkit.SubscriberClient - -// mu sync.Mutex -// c *sync.Cond -// inFlight bool -// closed bool // set after CloseSend called -// spc pb.Subscriber_StreamingPullClient -// err error -// } - -// // open establishes (or re-establishes) a stream for pulling messages. -// // It takes care that only one RPC is in flight at a time. -// func (p *streamingPuller) open() error { -// p.c.L.Lock() -// defer p.c.L.Unlock() -// p.openLocked() -// return p.err -// } - -// func (p *streamingPuller) openLocked() { -// if p.inFlight { -// // Another goroutine is opening; wait for it. -// for p.inFlight { -// p.c.Wait() -// } -// return -// } -// // No opens in flight; start one. -// // Keep the lock held, to avoid a race where we -// // close the old stream while opening a new one. -// p.inFlight = true -// spc, err := p.subc.StreamingPull(p.ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) -// if err == nil { -// err = spc.Send(&pb.StreamingPullRequest{ -// Subscription: p.subName, -// StreamAckDeadlineSeconds: p.ackDeadlineSecs, -// }) -// } -// p.spc = spc -// p.err = err -// p.inFlight = false -// p.c.Broadcast() -// } - -// func (p *streamingPuller) call(f func(pb.Subscriber_StreamingPullClient) error) error { -// p.c.L.Lock() -// defer p.c.L.Unlock() -// // Wait for an open in flight. -// for p.inFlight { -// p.c.Wait() -// } -// var err error -// var bo gax.Backoff -// for { -// select { -// case <-p.ctx.Done(): -// p.err = p.ctx.Err() -// default: -// } -// if p.err != nil { -// return p.err -// } -// spc := p.spc -// // Do not call f with the lock held. Only one goroutine calls Send -// // (streamingMessageIterator.sender) and only one calls Recv -// // (streamingMessageIterator.receiver). If we locked, then a -// // blocked Recv would prevent a Send from happening. -// p.c.L.Unlock() -// err = f(spc) -// p.c.L.Lock() -// if !p.closed && err != nil && isRetryable(err) { -// // Sleep with exponential backoff. Normally we wouldn't hold the lock while sleeping, -// // but here it can't do any harm, since the stream is broken anyway. -// gax.Sleep(p.ctx, bo.Pause()) -// p.openLocked() -// continue -// } -// // Not an error, or not a retryable error; stop retrying. -// p.err = err -// return err -// } -// } - // Logic from https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java. func isRetryable(err error) bool { s, ok := status.FromError(err) @@ -175,43 +77,6 @@ func isRetryable(err error) bool { } } -// func (p *streamingPuller) fetchMessages() ([]*Message, error) { -// var res *pb.StreamingPullResponse -// err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { -// var err error -// res, err = spc.Recv() -// return err -// }) -// if err != nil { -// return nil, err -// } -// return convertMessages(res.ReceivedMessages) -// } - -// func (p *streamingPuller) send(req *pb.StreamingPullRequest) error { -// // Note: len(modAckIDs) == len(modSecs) -// var rest *pb.StreamingPullRequest -// for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 { -// req, rest = splitRequest(req, maxPayload) -// err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { -// x := spc.Send(req) -// return x -// }) -// if err != nil { -// return err -// } -// req = rest -// } -// return nil -// } - -// func (p *streamingPuller) closeSend() { -// p.mu.Lock() -// p.closed = true -// p.spc.CloseSend() -// p.mu.Unlock() -// } - // Split req into a prefix that is smaller than maxSize, and a remainder. func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) { const int32Bytes = 4 diff --git a/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go index d618bcd4f..b7b119c36 100644 --- a/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go +++ b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go @@ -27,6 +27,7 @@ import ( "time" "cloud.google.com/go/internal/testutil" + "google.golang.org/grpc/status" tspb "github.com/golang/protobuf/ptypes/timestamp" "github.com/google/go-cmp/cmp" @@ -102,7 +103,7 @@ func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer t.Errorf("%d: no message for ackID %q", i, want.ackID) continue } - if !testutil.Equal(got, want, cmp.AllowUnexported(Message{}), cmpopts.IgnoreTypes(func(string, bool) {})) { + if !testutil.Equal(got, want, cmp.AllowUnexported(Message{}), cmpopts.IgnoreTypes(time.Time{}, func(string, bool, time.Time) {})) { t.Errorf("%d: got\n%#v\nwant\n%#v", i, got, want) } } @@ -127,7 +128,7 @@ func TestStreamingPullError(t *testing.T) { // acked. client, server := newFake(t) server.addStreamingPullMessages(testMessages[:1]) - server.addStreamingPullError(grpc.Errorf(codes.Unknown, "")) + server.addStreamingPullError(status.Errorf(codes.Unknown, "")) sub := newTestSubscription(t, client, "s") // Use only one goroutine, since the fake server is configured to // return only one error. @@ -180,8 +181,8 @@ func TestStreamingPullRetry(t *testing.T) { server.addStreamingPullError(io.EOF) server.addStreamingPullError(io.EOF) server.addStreamingPullMessages(testMessages[1:2]) - server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) - server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) + server.addStreamingPullError(status.Errorf(codes.Unavailable, "")) + server.addStreamingPullError(status.Errorf(codes.Unavailable, "")) server.addStreamingPullMessages(testMessages[2:]) testStreamingPullIteration(t, client, server, testMessages) diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go index c1fd65323..93054d536 100644 --- a/vendor/cloud.google.com/go/pubsub/subscription.go +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -50,9 +50,14 @@ type Subscription struct { // Subscription creates a reference to a subscription. func (c *Client) Subscription(id string) *Subscription { + return c.SubscriptionInProject(id, c.projectID) +} + +// SubscriptionInProject creates a reference to a subscription in a given project. +func (c *Client) SubscriptionInProject(id, projectID string) *Subscription { return &Subscription{ c: c, - name: fmt.Sprintf("projects/%s/subscriptions/%s", c.projectID, id), + name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, id), } } @@ -164,9 +169,13 @@ func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { } func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionConfig, error) { - rd, err := ptypes.Duration(pbSub.MessageRetentionDuration) - if err != nil { - return SubscriptionConfig{}, err + rd := time.Hour * 24 * 7 + var err error + if pbSub.MessageRetentionDuration != nil { + rd, err = ptypes.Duration(pbSub.MessageRetentionDuration) + if err != nil { + return SubscriptionConfig{}, err + } } return SubscriptionConfig{ Topic: newTopic(c, pbSub.Topic), @@ -188,7 +197,11 @@ type ReceiveSettings struct { // // The Subscription will automatically extend the ack deadline of all // fetched Messages for the duration specified. Automatic deadline - // extension may be disabled by specifying a duration less than 1. + // extension may be disabled by specifying a duration less than 0. + // + // Connections may be terminated if they last longer than 30m, which + // effectively makes that the ceiling for this value. For longer message + // processing, see the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example_SubscriberClient_Pull_lengthyClientProcessing MaxExtension time.Duration // MaxOutstandingMessages is the maximum number of unprocessed messages @@ -484,13 +497,15 @@ func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowCon } return nil } + old := msg.doneFunc + msgLen := len(msg.Data) + msg.doneFunc = func(ackID string, ack bool, receiveTime time.Time) { + defer fc.release(msgLen) + old(ackID, ack, receiveTime) + } wg.Add(1) go func() { - // TODO(jba): call release when the message is available for GC. - // This considers the message to be released when - // f is finished, but f may ack early or not at all. defer wg.Done() - defer fc.release(len(msg.Data)) f(ctx2, msg) }() } diff --git a/vendor/cloud.google.com/go/pubsub/topic_test.go b/vendor/cloud.google.com/go/pubsub/topic_test.go index 3cb892c86..0a6bc2f83 100644 --- a/vendor/cloud.google.com/go/pubsub/topic_test.go +++ b/vendor/cloud.google.com/go/pubsub/topic_test.go @@ -21,6 +21,7 @@ import ( "time" "cloud.google.com/go/internal/testutil" + "google.golang.org/grpc/status" "golang.org/x/net/context" "google.golang.org/api/iterator" @@ -135,7 +136,7 @@ type alwaysFailPublish struct { } func (s *alwaysFailPublish) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { - return nil, grpc.Errorf(codes.Unavailable, "try again") + return nil, status.Errorf(codes.Unavailable, "try again") } func mustCreateTopic(t *testing.T, c *Client, id string) *Topic { diff --git a/vendor/cloud.google.com/go/regen-gapic.sh b/vendor/cloud.google.com/go/regen-gapic.sh index 158b1b0db..9b345c972 100755 --- a/vendor/cloud.google.com/go/regen-gapic.sh +++ b/vendor/cloud.google.com/go/regen-gapic.sh @@ -43,7 +43,8 @@ google/firestore/artman_firestore.yaml google/logging/artman_logging.yaml google/longrunning/artman_longrunning.yaml google/monitoring/artman_monitoring.yaml -google/privacy/dlp/artman_dlp.yaml +google/privacy/dlp/artman_dlp_v2beta1.yaml +google/privacy/dlp/artman_dlp_v2.yaml google/pubsub/artman_pubsub.yaml google/spanner/admin/database/artman_spanner_admin_database.yaml google/spanner/admin/instance/artman_spanner_admin_instance.yaml @@ -52,7 +53,7 @@ google/spanner/artman_spanner.yaml for api in "${APIS[@]}"; do rm -rf artman-genfiles/* - artman2 --config "$api" generate go_gapic + artman --config "$api" generate go_gapic cp -r artman-genfiles/gapi-*/cloud.google.com/go/* $GOPATH/src/cloud.google.com/go/ done diff --git a/vendor/cloud.google.com/go/rpcreplay/fake_test.go b/vendor/cloud.google.com/go/rpcreplay/fake_test.go index 86ef0081c..6953e6b67 100644 --- a/vendor/cloud.google.com/go/rpcreplay/fake_test.go +++ b/vendor/cloud.google.com/go/rpcreplay/fake_test.go @@ -22,6 +22,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" pb "cloud.google.com/go/rpcreplay/proto/intstore" ) @@ -74,7 +75,7 @@ func (s *intStoreServer) setItem(item *pb.Item) int32 { func (s *intStoreServer) Get(_ context.Context, req *pb.GetRequest) (*pb.Item, error) { val, ok := s.items[req.Name] if !ok { - return nil, grpc.Errorf(codes.NotFound, "%q", req.Name) + return nil, status.Errorf(codes.NotFound, "%q", req.Name) } return &pb.Item{Name: req.Name, Value: val}, nil } diff --git a/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go index 1de2c5118..3315384fa 100644 --- a/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go +++ b/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go @@ -203,6 +203,30 @@ func (s *mockSpannerServer) Rollback(ctx context.Context, req *spannerpb.Rollbac return s.resps[0].(*emptypb.Empty), nil } +func (s *mockSpannerServer) PartitionQuery(ctx context.Context, req *spannerpb.PartitionQueryRequest) (*spannerpb.PartitionResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.PartitionResponse), nil +} + +func (s *mockSpannerServer) PartitionRead(ctx context.Context, req *spannerpb.PartitionReadRequest) (*spannerpb.PartitionResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.PartitionResponse), nil +} + // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption @@ -935,3 +959,127 @@ func TestSpannerRollbackError(t *testing.T) { t.Errorf("got error code %q, want %q", c, errCode) } } +func TestSpannerPartitionQuery(t *testing.T) { + var expectedResponse *spannerpb.PartitionResponse = &spannerpb.PartitionResponse{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.PartitionQueryRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.PartitionQuery(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerPartitionQueryError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.PartitionQueryRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.PartitionQuery(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerPartitionRead(t *testing.T) { + var expectedResponse *spannerpb.PartitionResponse = &spannerpb.PartitionResponse{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.PartitionReadRequest{ + Session: formattedSession, + Table: table, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.PartitionRead(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerPartitionReadError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", "[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.PartitionReadRequest{ + Session: formattedSession, + Table: table, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.PartitionRead(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go index 113069211..0ba50d3c3 100644 --- a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go +++ b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go @@ -45,6 +45,8 @@ type CallOptions struct { BeginTransaction []gax.CallOption Commit []gax.CallOption Rollback []gax.CallOption + PartitionQuery []gax.CallOption + PartitionRead []gax.CallOption } func defaultClientOptions() []option.ClientOption { @@ -92,6 +94,8 @@ func defaultCallOptions() *CallOptions { BeginTransaction: retry[[2]string{"default", "idempotent"}], Commit: retry[[2]string{"long_running", "long_running"}], Rollback: retry[[2]string{"default", "idempotent"}], + PartitionQuery: retry[[2]string{"default", "idempotent"}], + PartitionRead: retry[[2]string{"default", "idempotent"}], } } @@ -405,6 +409,52 @@ func (c *Client) Rollback(ctx context.Context, req *spannerpb.RollbackRequest, o return err } +// PartitionQuery creates a set of partition tokens that can be used to execute a query +// operation in parallel. Each of the returned partition tokens can be used +// by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset +// of the query result to read. The same session and read-only transaction +// must be used by the PartitionQueryRequest used to create the +// partition tokens and the ExecuteSqlRequests that use the partition tokens. +// Partition tokens become invalid when the session used to create them +// is deleted or begins a new transaction. +func (c *Client) PartitionQuery(ctx context.Context, req *spannerpb.PartitionQueryRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.PartitionQuery[0:len(c.CallOptions.PartitionQuery):len(c.CallOptions.PartitionQuery)], opts...) + var resp *spannerpb.PartitionResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.PartitionQuery(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// PartitionRead creates a set of partition tokens that can be used to execute a read +// operation in parallel. Each of the returned partition tokens can be used +// by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read +// result to read. The same session and read-only transaction must be used by +// the PartitionReadRequest used to create the partition tokens and the +// ReadRequests that use the partition tokens. +// Partition tokens become invalid when the session used to create them +// is deleted or begins a new transaction. +func (c *Client) PartitionRead(ctx context.Context, req *spannerpb.PartitionReadRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.PartitionRead[0:len(c.CallOptions.PartitionRead):len(c.CallOptions.PartitionRead)], opts...) + var resp *spannerpb.PartitionResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.PartitionRead(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + // SessionIterator manages a stream of *spannerpb.Session. type SessionIterator struct { items []*spannerpb.Session diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go index a4f1acc2d..6806af9de 100644 --- a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go +++ b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go @@ -252,3 +252,39 @@ func ExampleClient_Rollback() { // TODO: Handle error. } } + +func ExampleClient_PartitionQuery() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.PartitionQueryRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.PartitionQuery(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_PartitionRead() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.PartitionReadRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.PartitionRead(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/batch.go b/vendor/cloud.google.com/go/spanner/batch.go new file mode 100644 index 000000000..9c25f8e07 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/batch.go @@ -0,0 +1,345 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "encoding/gob" + "log" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// BatchReadOnlyTransaction is a ReadOnlyTransaction that allows for exporting +// arbitrarily large amounts of data from Cloud Spanner databases. +// BatchReadOnlyTransaction partitions a read/query request. Read/query request +// can then be executed independently over each partition while observing the +// same snapshot of the database. BatchReadOnlyTransaction can also be shared +// across multiple clients by passing around the BatchReadOnlyTransactionID and +// then recreating the transaction using Client.BatchReadOnlyTransactionFromID. +// +// Note: if a client is used only to run partitions, you can +// create it using a ClientConfig with both MinOpened and MaxIdle set to +// zero to avoid creating unnecessary sessions. You can also avoid excess +// gRPC channels by setting ClientConfig.NumChannels to the number of +// concurrently active BatchReadOnlyTransactions you expect to have. +type BatchReadOnlyTransaction struct { + ReadOnlyTransaction + ID BatchReadOnlyTransactionID +} + +// BatchReadOnlyTransactionID is a unique identifier for a +// BatchReadOnlyTransaction. It can be used to re-create a +// BatchReadOnlyTransaction on a different machine or process by calling +// Client.BatchReadOnlyTransactionFromID. +type BatchReadOnlyTransactionID struct { + // unique ID for the transaction. + tid transactionID + // sid is the id of the Cloud Spanner session used for this transaction. + sid string + // rts is the read timestamp of this transaction. + rts time.Time +} + +// Partition defines a segment of data to be read in a batch read or query. A +// partition can be serialized and processed across several different machines +// or processes. +type Partition struct { + pt []byte + qreq *sppb.ExecuteSqlRequest + rreq *sppb.ReadRequest +} + +// PartitionOptions specifies options for a PartitionQueryRequest and +// PartitionReadRequest. See +// https://godoc.org/google.golang.org/genproto/googleapis/spanner/v1#PartitionOptions +// for more details. +type PartitionOptions struct { + // The desired data size for each partition generated. + PartitionBytes int64 + // The desired maximum number of partitions to return. + MaxPartitions int64 +} + +// toProto converts a spanner.PartitionOptions into a sppb.PartitionOptions +func (opt PartitionOptions) toProto() *sppb.PartitionOptions { + return &sppb.PartitionOptions{ + PartitionSizeBytes: opt.PartitionBytes, + MaxPartitions: opt.MaxPartitions, + } +} + +// PartitionRead returns a list of Partitions that can be used to read rows from +// the database. These partitions can be executed across multiple processes, +// even across different machines. The partition size and count hints can be +// configured using PartitionOptions. +func (t *BatchReadOnlyTransaction) PartitionRead(ctx context.Context, table string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error) { + return t.PartitionReadUsingIndex(ctx, table, "", keys, columns, opt) +} + +// PartitionReadUsingIndex returns a list of Partitions that can be used to read +// rows from the database using an index. +func (t *BatchReadOnlyTransaction) PartitionReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error) { + sh, ts, err := t.acquire(ctx) + if err != nil { + return nil, err + } + sid, client := sh.getID(), sh.getClient() + var ( + kset *sppb.KeySet + resp *sppb.PartitionResponse + partitions []*Partition + ) + kset, err = keys.keySetProto() + // request Partitions + if err != nil { + return nil, err + } + resp, err = client.PartitionRead(ctx, &sppb.PartitionReadRequest{ + Session: sid, + Transaction: ts, + Table: table, + Index: index, + Columns: columns, + KeySet: kset, + PartitionOptions: opt.toProto(), + }) + // prepare ReadRequest + req := &sppb.ReadRequest{ + Session: sid, + Transaction: ts, + Table: table, + Index: index, + Columns: columns, + KeySet: kset, + } + // generate Partitions + for _, p := range resp.GetPartitions() { + partitions = append(partitions, &Partition{ + pt: p.PartitionToken, + rreq: req, + }) + } + return partitions, err +} + +// PartitionQuery returns a list of Partitions that can be used to execute a query against the database. +func (t *BatchReadOnlyTransaction) PartitionQuery(ctx context.Context, statement Statement, opt PartitionOptions) ([]*Partition, error) { + sh, ts, err := t.acquire(ctx) + if err != nil { + return nil, err + } + sid, client := sh.getID(), sh.getClient() + var ( + resp *sppb.PartitionResponse + partitions []*Partition + ) + // request Partitions + req := &sppb.PartitionQueryRequest{ + Session: sid, + Transaction: ts, + Sql: statement.SQL, + PartitionOptions: opt.toProto(), + } + if err := statement.bindParams(req); err != nil { + return nil, err + } + resp, err = client.PartitionQuery(ctx, req) + // prepare ExecuteSqlRequest + r := &sppb.ExecuteSqlRequest{ + Session: sid, + Transaction: ts, + Sql: statement.SQL, + } + if err := statement.bindParams(r); err != nil { + return nil, err + } + // generate Partitions + for _, p := range resp.GetPartitions() { + partitions = append(partitions, &Partition{ + pt: p.PartitionToken, + qreq: r, + }) + } + return partitions, err +} + +// release implements txReadEnv.release, noop. +func (t *BatchReadOnlyTransaction) release(err error) { +} + +// setTimestamp implements txReadEnv.setTimestamp, noop. +// read timestamp is ready on txn initialization, avoid contending writing to it with future partitions. +func (t *BatchReadOnlyTransaction) setTimestamp(ts time.Time) { +} + +// Close marks the txn as closed. +func (t *BatchReadOnlyTransaction) Close() { + t.mu.Lock() + defer t.mu.Unlock() + t.state = txClosed +} + +// Cleanup cleans up all the resources used by this transaction and makes +// it unusable. Once this method is invoked, the transaction is no longer +// usable anywhere, including other clients/processes with which this +// transaction was shared. +// +// Calling Cleanup is optional, but recommended. If Cleanup is not called, the +// transaction's resources will be freed when the session expires on the backend and +// is deleted. For more information about recycled sessions, see +// https://cloud.google.com/spanner/docs/sessions. +func (t *BatchReadOnlyTransaction) Cleanup(ctx context.Context) { + t.Close() + t.mu.Lock() + defer t.mu.Unlock() + sh := t.sh + if sh == nil { + return + } + t.sh = nil + sid, client := sh.getID(), sh.getClient() + err := runRetryable(ctx, func(ctx context.Context) error { + _, e := client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: sid}) + return e + }) + if err != nil { + log.Printf("Failed to delete session %v. Error: %v", sid, err) + } +} + +// Execute runs a single Partition obtained from PartitionRead or PartitionQuery. +func (t *BatchReadOnlyTransaction) Execute(ctx context.Context, p *Partition) *RowIterator { + var ( + sh *sessionHandle + err error + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + ) + if sh, _, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + client := sh.getClient() + if client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + // read or query partition + if p.rreq != nil { + p.rreq.PartitionToken = p.pt + rpc = func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + p.rreq.ResumeToken = resumeToken + return client.StreamingRead(ctx, p.rreq) + } + } else { + p.qreq.PartitionToken = p.pt + rpc = func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + p.qreq.ResumeToken = resumeToken + return client.ExecuteStreamingSql(ctx, p.qreq) + } + } + return stream( + contextWithOutgoingMetadata(ctx, sh.getMetadata()), + rpc, + t.setTimestamp, + t.release) +} + +// MarshalBinary implements BinaryMarshaler. +func (tid BatchReadOnlyTransactionID) MarshalBinary() (data []byte, err error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(tid.tid); err != nil { + return nil, err + } + if err := enc.Encode(tid.sid); err != nil { + return nil, err + } + if err := enc.Encode(tid.rts); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalBinary implements BinaryUnmarshaler. +func (tid *BatchReadOnlyTransactionID) UnmarshalBinary(data []byte) error { + dec := gob.NewDecoder(bytes.NewReader(data)) + if err := dec.Decode(&tid.tid); err != nil { + return err + } + if err := dec.Decode(&tid.sid); err != nil { + return err + } + return dec.Decode(&tid.rts) +} + +// MarshalBinary implements BinaryMarshaler. +func (p Partition) MarshalBinary() (data []byte, err error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(p.pt); err != nil { + return nil, err + } + var isReadPartition bool + var req proto.Message + if p.rreq != nil { + isReadPartition = true + req = p.rreq + } else { + isReadPartition = false + req = p.qreq + } + if err := enc.Encode(isReadPartition); err != nil { + return nil, err + } + if data, err = proto.Marshal(req); err != nil { + return nil, err + } + if err := enc.Encode(data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalBinary implements BinaryUnmarshaler. +func (p *Partition) UnmarshalBinary(data []byte) error { + var ( + isReadPartition bool + d []byte + err error + ) + dec := gob.NewDecoder(bytes.NewReader(data)) + if err := dec.Decode(&p.pt); err != nil { + return err + } + if err := dec.Decode(&isReadPartition); err != nil { + return err + } + if err := dec.Decode(&d); err != nil { + return err + } + if isReadPartition { + p.rreq = &sppb.ReadRequest{} + err = proto.Unmarshal(d, p.rreq) + } else { + p.qreq = &sppb.ExecuteSqlRequest{} + err = proto.Unmarshal(d, p.qreq) + } + return err +} diff --git a/vendor/cloud.google.com/go/spanner/batch_test.go b/vendor/cloud.google.com/go/spanner/batch_test.go new file mode 100644 index 000000000..e30c140df --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/batch_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "testing" + "time" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +func TestPartitionRoundTrip(t *testing.T) { + t.Parallel() + for i, want := range []Partition{ + {rreq: &sppb.ReadRequest{Table: "t"}}, + {qreq: &sppb.ExecuteSqlRequest{Sql: "sql"}}, + } { + got := serdesPartition(t, i, &want) + if !testEqual(got, want) { + t.Errorf("got: %#v\nwant:%#v", got, want) + } + } +} + +func TestBROTIDRoundTrip(t *testing.T) { + t.Parallel() + tm := time.Now() + want := BatchReadOnlyTransactionID{ + tid: []byte("tid"), + sid: "sid", + rts: tm, + } + data, err := want.MarshalBinary() + if err != nil { + t.Fatal(err) + } + var got BatchReadOnlyTransactionID + if err := got.UnmarshalBinary(data); err != nil { + t.Fatal(err) + } + if !testEqual(got, want) { + t.Errorf("got: %#v\nwant:%#v", got, want) + } +} + +// serdesPartition is a helper that serialize a Partition then deserialize it +func serdesPartition(t *testing.T, i int, p1 *Partition) (p2 Partition) { + var ( + data []byte + err error + ) + if data, err = p1.MarshalBinary(); err != nil { + t.Fatalf("#%d: encoding failed %v", i, err) + } + if err = p2.UnmarshalBinary(data); err != nil { + t.Fatalf("#%d: decoding failed %v", i, err) + } + return p2 +} diff --git a/vendor/cloud.google.com/go/spanner/client.go b/vendor/cloud.google.com/go/spanner/client.go index 2d7191b5c..bb02d9c9d 100644 --- a/vendor/cloud.google.com/go/spanner/client.go +++ b/vendor/cloud.google.com/go/spanner/client.go @@ -18,6 +18,7 @@ package spanner import ( "fmt" + "log" "regexp" "sync/atomic" "time" @@ -33,7 +34,7 @@ import ( ) const ( - prodAddr = "spanner.googleapis.com:443" + endpoint = "spanner.googleapis.com:443" // resourcePrefixHeader is the name of the metadata header used to indicate // the resource being operated on. @@ -126,7 +127,7 @@ func NewClientWithConfig(ctx context.Context, database string, config ClientConf xGoogHeaderKey, xGoogHeaderVal), } allOpts := []option.ClientOption{ - option.WithEndpoint(prodAddr), + option.WithEndpoint(endpoint), option.WithScopes(Scope), option.WithGRPCDialOption( grpc.WithDefaultCallOptions( @@ -135,7 +136,6 @@ func NewClientWithConfig(ctx context.Context, database string, config ClientConf ), ), } - allOpts = append(allOpts, openCensusOptions()...) allOpts = append(allOpts, opts...) // Prepare gRPC channels. if config.NumChannels == 0 { @@ -219,6 +219,112 @@ func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction { return t } +// BatchReadOnlyTransaction returns a BatchReadOnlyTransaction that can be used +// for partitioned reads or queries from a snapshot of the database. This is +// useful in batch processing pipelines where one wants to divide the work of +// reading from the database across multiple machines. +// +// Note: This transaction does not use the underlying session pool but creates a +// new session each time, and the session is reused across clients. +// +// You should call Close() after the txn is no longer needed on local +// client, and call Cleanup() when the txn is finished for all clients, to free +// the session. +func (c *Client) BatchReadOnlyTransaction(ctx context.Context, tb TimestampBound) (*BatchReadOnlyTransaction, error) { + var ( + tx transactionID + rts time.Time + s *session + sh *sessionHandle + err error + ) + defer func() { + if err != nil && sh != nil { + e := runRetryable(ctx, func(ctx context.Context) error { + _, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()}) + return e + }) + if e != nil { + log.Printf("Failed to delete session %v. Error: %v", s.getID(), e) + } + } + }() + // create session + sc := c.rrNext() + err = runRetryable(ctx, func(ctx context.Context) error { + sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: c.database}) + if e != nil { + return e + } + // If no error, construct the new session. + s = &session{valid: true, client: sc, id: sid.Name, createTime: time.Now(), md: c.md} + return nil + }) + if err != nil { + return nil, err + } + sh = &sessionHandle{session: s} + // begin transaction + err = runRetryable(contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error { + res, e := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sh.getID(), + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(tb, true), + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + if res.ReadTimestamp != nil { + rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos)) + } + return nil + }) + if err != nil { + return nil, err + } + + t := &BatchReadOnlyTransaction{ + ReadOnlyTransaction: ReadOnlyTransaction{ + tx: tx, + txReadyOrClosed: make(chan struct{}), + state: txActive, + sh: sh, + rts: rts, + }, + ID: BatchReadOnlyTransactionID{ + tid: tx, + sid: sh.getID(), + rts: rts, + }, + } + t.txReadOnly.txReadEnv = t + return t, nil +} + +// BatchReadOnlyTransactionFromID reconstruct a BatchReadOnlyTransaction from BatchReadOnlyTransactionID +func (c *Client) BatchReadOnlyTransactionFromID(tid BatchReadOnlyTransactionID) *BatchReadOnlyTransaction { + sc := c.rrNext() + s := &session{valid: true, client: sc, id: tid.sid, createTime: time.Now(), md: c.md} + sh := &sessionHandle{session: s} + + t := &BatchReadOnlyTransaction{ + ReadOnlyTransaction: ReadOnlyTransaction{ + tx: tid.tid, + txReadyOrClosed: make(chan struct{}), + state: txActive, + sh: sh, + rts: tid.rts, + }, + ID: tid, + } + t.txReadOnly.txReadEnv = t + return t +} + type transactionInProgressKey struct{} func checkNestedTxn(ctx context.Context) error { diff --git a/vendor/cloud.google.com/go/spanner/doc.go b/vendor/cloud.google.com/go/spanner/doc.go index 2f800e96e..97a3c9aa0 100644 --- a/vendor/cloud.google.com/go/spanner/doc.go +++ b/vendor/cloud.google.com/go/spanner/doc.go @@ -192,7 +192,7 @@ For Cloud Spanner columns that may contain NULL, use one of the NullXXX types, like NullString: var ns spanner.NullString - if err =: row.Column(0, &ns); err != nil { + if err := row.Column(0, &ns); err != nil { // TODO: Handle error. } if ns.Valid { diff --git a/vendor/cloud.google.com/go/spanner/errors_test.go b/vendor/cloud.google.com/go/spanner/errors_test.go index 4e48123a4..1385ef865 100644 --- a/vendor/cloud.google.com/go/spanner/errors_test.go +++ b/vendor/cloud.google.com/go/spanner/errors_test.go @@ -21,8 +21,8 @@ import ( "testing" "golang.org/x/net/context" - "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) func TestToSpannerError(t *testing.T) { @@ -33,7 +33,7 @@ func TestToSpannerError(t *testing.T) { {errors.New("wha?"), codes.Unknown}, {context.Canceled, codes.Canceled}, {context.DeadlineExceeded, codes.DeadlineExceeded}, - {grpc.Errorf(codes.ResourceExhausted, "so tired"), codes.ResourceExhausted}, + {status.Errorf(codes.ResourceExhausted, "so tired"), codes.ResourceExhausted}, {spannerErrorf(codes.InvalidArgument, "bad"), codes.InvalidArgument}, } { err := toSpannerError(test.err) diff --git a/vendor/cloud.google.com/go/spanner/examples_test.go b/vendor/cloud.google.com/go/spanner/examples_test.go index 9980b3182..a73f7a3e3 100644 --- a/vendor/cloud.google.com/go/spanner/examples_test.go +++ b/vendor/cloud.google.com/go/spanner/examples_test.go @@ -19,6 +19,7 @@ package spanner_test import ( "errors" "fmt" + "sync" "time" "cloud.google.com/go/spanner" @@ -550,3 +551,57 @@ func ExampleGenericColumnValue_Decode() { // int 42 // string my-text } + +func ExampleClient_BatchReadOnlyTransaction() { + ctx := context.Background() + var ( + client *spanner.Client + txn *spanner.BatchReadOnlyTransaction + err error + ) + if client, err = spanner.NewClient(ctx, myDB); err != nil { + // TODO: Handle error. + } + defer client.Close() + if txn, err = client.BatchReadOnlyTransaction(ctx, spanner.StrongRead()); err != nil { + // TODO: Handle error. + } + defer txn.Close() + + // Singer represents the elements in a row from the Singers table. + type Singer struct { + SingerID int64 + FirstName string + LastName string + SingerInfo []byte + } + stmt := spanner.Statement{SQL: "SELECT * FROM Singers;"} + partitions, err := txn.PartitionQuery(ctx, stmt, spanner.PartitionOptions{}) + if err != nil { + // TODO: Handle error. + } + // Note: here we use multiple goroutines, but you should use separate processes/machines. + wg := sync.WaitGroup{} + for i, p := range partitions { + wg.Add(1) + go func(i int, p *spanner.Partition) { + defer wg.Done() + iter := txn.Execute(ctx, p) + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } else if err != nil { + // TODO: Handle error. + } + var s Singer + if err := row.ToStruct(&s); err != nil { + // TODO: Handle error. + } + _ = s // TODO: Process the row. + } + }(i, p) + } + wg.Wait() +} diff --git a/vendor/cloud.google.com/go/spanner/go18.go b/vendor/cloud.google.com/go/spanner/go18.go index 4e7ab6ced..81176a08a 100644 --- a/vendor/cloud.google.com/go/spanner/go18.go +++ b/vendor/cloud.google.com/go/spanner/go18.go @@ -19,29 +19,22 @@ package spanner import ( "fmt" - ocgrpc "go.opencensus.io/plugin/grpc" "go.opencensus.io/trace" "golang.org/x/net/context" - "google.golang.org/api/option" - "google.golang.org/grpc" ) -func openCensusOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithGRPCDialOption(grpc.WithStatsHandler(ocgrpc.NewClientStatsHandler())), - } -} - func traceStartSpan(ctx context.Context, name string) context.Context { - return trace.StartSpan(ctx, name) + ctx, _ = trace.StartSpan(ctx, name) + return ctx } func traceEndSpan(ctx context.Context, err error) { span := trace.FromContext(ctx) if err != nil { + // TODO(jba): Add error code to the status. span.SetStatus(trace.Status{Message: err.Error()}) } - trace.EndSpan(ctx) + span.End() } func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { @@ -50,15 +43,15 @@ func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format str var a trace.Attribute switch v := v.(type) { case string: - a = trace.StringAttribute{k, v} + a = trace.StringAttribute(k, v) case bool: - a = trace.BoolAttribute{k, v} + a = trace.BoolAttribute(k, v) case int: - a = trace.Int64Attribute{k, int64(v)} + a = trace.Int64Attribute(k, int64(v)) case int64: - a = trace.Int64Attribute{k, v} + a = trace.Int64Attribute(k, v) default: - a = trace.StringAttribute{k, fmt.Sprintf("%#v", v)} + a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) } attrs = append(attrs, a) } diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go index 43baee396..f137b48c1 100644 --- a/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go @@ -24,6 +24,7 @@ import ( "time" "golang.org/x/net/context" + "google.golang.org/grpc/status" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/empty" @@ -132,7 +133,7 @@ func (m *MockCloudSpannerClient) CreateSession(c context.Context, r *sppb.Create s := &sppb.Session{} if r.Database != "mockdb" { // Reject other databases - return s, grpc.Errorf(codes.NotFound, fmt.Sprintf("database not found: %v", r.Database)) + return s, status.Errorf(codes.NotFound, fmt.Sprintf("database not found: %v", r.Database)) } // Generate & record session name. s.Name = fmt.Sprintf("mockdb-%v", time.Now().UnixNano()) @@ -150,7 +151,7 @@ func (m *MockCloudSpannerClient) GetSession(c context.Context, r *sppb.GetSessio } m.pings = append(m.pings, r.Name) if _, ok := m.sessions[r.Name]; !ok { - return nil, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) } return &sppb.Session{Name: r.Name}, nil } @@ -165,7 +166,7 @@ func (m *MockCloudSpannerClient) DeleteSession(c context.Context, r *sppb.Delete } if _, ok := m.sessions[r.Name]; !ok { // Session not found. - return &empty.Empty{}, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + return &empty.Empty{}, status.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) } // Delete session from in-memory table. delete(m.sessions, r.Name) @@ -318,6 +319,18 @@ func (m *MockCloudSpannerClient) Rollback(c context.Context, r *sppb.RollbackReq return nil, nil } +// PartitionQuery is a placeholder for SpannerServer.PartitionQuery. +func (m *MockCloudSpannerClient) PartitionQuery(ctx context.Context, r *sppb.PartitionQueryRequest, opts ...grpc.CallOption) (*sppb.PartitionResponse, error) { + m.ready() + return nil, errors.New("Unimplemented") +} + +// PartitionRead is a placeholder for SpannerServer.PartitionRead. +func (m *MockCloudSpannerClient) PartitionRead(ctx context.Context, r *sppb.PartitionReadRequest, opts ...grpc.CallOption) (*sppb.PartitionResponse, error) { + m.ready() + return nil, errors.New("Unimplemented") +} + func (m *MockCloudSpannerClient) expectAction(methods ...string) (Action, error) { for _, me := range methods { if err := m.injErr[me]; err != nil { diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go index c47a5d5f8..aace72f3a 100644 --- a/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go @@ -18,10 +18,10 @@ package testutil import ( "encoding/binary" - "errors" "fmt" "io" "net" + "sync" "testing" "time" @@ -34,6 +34,7 @@ import ( sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var ( @@ -75,6 +76,10 @@ type MockCloudSpanner struct { msgs chan MockCtlMsg readTs time.Time next int + + mu sync.Mutex + nextSession int + sessions map[string]*sppb.Session } // Addr returns the listening address of mock server. @@ -102,26 +107,31 @@ func (m *MockCloudSpanner) Done() { // CreateSession is a placeholder for SpannerServer.CreateSession. func (m *MockCloudSpanner) CreateSession(c context.Context, r *sppb.CreateSessionRequest) (*sppb.Session, error) { - m.t.Fatalf("CreateSession is unimplemented") - return nil, errors.New("Unimplemented") + m.mu.Lock() + defer m.mu.Unlock() + name := fmt.Sprintf("session-%d", m.nextSession) + m.nextSession++ + s := &sppb.Session{Name: name} + m.sessions[name] = s + return s, nil } // GetSession is a placeholder for SpannerServer.GetSession. func (m *MockCloudSpanner) GetSession(c context.Context, r *sppb.GetSessionRequest) (*sppb.Session, error) { - m.t.Fatalf("GetSession is unimplemented") - return nil, errors.New("Unimplemented") + m.mu.Lock() + defer m.mu.Unlock() + if s, ok := m.sessions[r.Name]; ok { + return s, nil + } + return nil, status.Errorf(codes.NotFound, "not found") } // DeleteSession is a placeholder for SpannerServer.DeleteSession. func (m *MockCloudSpanner) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest) (*empty.Empty, error) { - m.t.Fatalf("DeleteSession is unimplemented") - return nil, errors.New("Unimplemented") -} - -// ExecuteSql is a placeholder for SpannerServer.ExecuteSql. -func (m *MockCloudSpanner) ExecuteSql(c context.Context, r *sppb.ExecuteSqlRequest) (*sppb.ResultSet, error) { - m.t.Fatalf("ExecuteSql is unimplemented") - return nil, errors.New("Unimplemented") + m.mu.Lock() + defer m.mu.Unlock() + delete(m.sessions, r.Name) + return &empty.Empty{}, nil } // EncodeResumeToken return mock resume token encoding for an uint64 integer. @@ -144,7 +154,7 @@ func DecodeResumeToken(t []byte) (uint64, error) { func (m *MockCloudSpanner) ExecuteStreamingSql(r *sppb.ExecuteSqlRequest, s sppb.Spanner_ExecuteStreamingSqlServer) error { switch r.Sql { case "SELECT * from t_unavailable": - return grpc.Errorf(codes.Unavailable, "mock table unavailable") + return status.Errorf(codes.Unavailable, "mock table unavailable") case "SELECT t.key key, t.value value FROM t_mock t": if r.ResumeToken != nil { s, err := DecodeResumeToken(r.ResumeToken) @@ -192,34 +202,9 @@ func (m *MockCloudSpanner) ExecuteStreamingSql(r *sppb.ExecuteSqlRequest, s sppb } } -// Read is a placeholder for SpannerServer.Read. -func (m *MockCloudSpanner) Read(c context.Context, r *sppb.ReadRequest) (*sppb.ResultSet, error) { - m.t.Fatalf("Read is unimplemented") - return nil, errors.New("Unimplemented") -} - // StreamingRead is a placeholder for SpannerServer.StreamingRead. func (m *MockCloudSpanner) StreamingRead(r *sppb.ReadRequest, s sppb.Spanner_StreamingReadServer) error { - m.t.Fatalf("StreamingRead is unimplemented") - return errors.New("Unimplemented") -} - -// BeginTransaction is a placeholder for SpannerServer.BeginTransaction. -func (m *MockCloudSpanner) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest) (*sppb.Transaction, error) { - m.t.Fatalf("BeginTransaction is unimplemented") - return nil, errors.New("Unimplemented") -} - -// Commit is a placeholder for SpannerServer.Commit. -func (m *MockCloudSpanner) Commit(c context.Context, r *sppb.CommitRequest) (*sppb.CommitResponse, error) { - m.t.Fatalf("Commit is unimplemented") - return nil, errors.New("Unimplemented") -} - -// Rollback is a placeholder for SpannerServer.Rollback. -func (m *MockCloudSpanner) Rollback(c context.Context, r *sppb.RollbackRequest) (*empty.Empty, error) { - m.t.Fatalf("Rollback is unimplemented") - return nil, errors.New("Unimplemented") + return s.Send(&sppb.PartialResultSet{}) } // Serve runs a MockCloudSpanner listening on a random localhost address. @@ -249,9 +234,10 @@ func (m *MockCloudSpanner) Stop() { // NewMockCloudSpanner creates a new MockCloudSpanner instance. func NewMockCloudSpanner(t *testing.T, ts time.Time) *MockCloudSpanner { mcs := &MockCloudSpanner{ - t: t, - msgs: make(chan MockCtlMsg, 1000), - readTs: ts, + t: t, + msgs: make(chan MockCtlMsg, 1000), + readTs: ts, + sessions: map[string]*sppb.Session{}, } return mcs } diff --git a/vendor/cloud.google.com/go/spanner/key.go b/vendor/cloud.google.com/go/spanner/key.go index 5b332f4dc..7bb1ef5d3 100644 --- a/vendor/cloud.google.com/go/spanner/key.go +++ b/vendor/cloud.google.com/go/spanner/key.go @@ -30,13 +30,11 @@ import ( // A Key can be either a Cloud Spanner row's primary key or a secondary index key. // It is essentially an interface{} array, which represents a set of Cloud Spanner -// columns. A Key type has the following usages: +// columns. A Key can be used as: // -// - Used as primary key which uniquely identifies a Cloud Spanner row. -// - Used as secondary index key which maps to a set of Cloud Spanner rows -// indexed under it. -// - Used as endpoints of primary key/secondary index ranges, -// see also the KeyRange type. +// - A primary key which uniquely identifies a Cloud Spanner row. +// - A secondary index key which maps to a set of Cloud Spanner rows indexed under it. +// - An endpoint of primary key/secondary index ranges; see the KeyRange type. // // Rows that are identified by the Key type are outputs of read operation or targets of // delete operation in a mutation. Note that for Insert/Update/InsertOrUpdate/Update diff --git a/vendor/cloud.google.com/go/spanner/mutation.go b/vendor/cloud.google.com/go/spanner/mutation.go index 4eac91568..5801c292e 100644 --- a/vendor/cloud.google.com/go/spanner/mutation.go +++ b/vendor/cloud.google.com/go/spanner/mutation.go @@ -53,23 +53,23 @@ const ( // // Many mutations can be applied in a single atomic commit. For purposes of // constraint checking (such as foreign key constraints), the operations can be -// viewed as applying in same order as the mutations are supplied in (so that -// e.g., a row and its logical "child" can be inserted in the same commit). +// viewed as applying in the same order as the mutations are provided (so that, e.g., +// a row and its logical "child" can be inserted in the same commit). // -// - The Apply function applies series of mutations. -// - A ReadWriteTransaction applies a series of mutations as part of an -// atomic read-modify-write operation. -// Example: +// The Apply function applies series of mutations. For example, // -// m := spanner.Insert("User", -// []string{"user_id", "profile"}, -// []interface{}{UserID, profile}) -// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// m := spanner.Insert("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) // -// In this example, we insert a new row into the User table. The primary key +// inserts a new row into the User table. The primary key // for the new row is UserID (presuming that "user_id" has been declared as the // primary key of the "User" table). // +// To apply a series of mutations as part of an atomic read-modify-write operation, +// use ReadWriteTransaction. +// // Updating a row // // Changing the values of columns in an existing row is very similar to diff --git a/vendor/cloud.google.com/go/spanner/not_go18.go b/vendor/cloud.google.com/go/spanner/not_go18.go index 4e077d495..c9e62a128 100644 --- a/vendor/cloud.google.com/go/spanner/not_go18.go +++ b/vendor/cloud.google.com/go/spanner/not_go18.go @@ -16,15 +16,10 @@ package spanner -import ( - "golang.org/x/net/context" - "google.golang.org/api/option" -) +import "golang.org/x/net/context" // OpenCensus only supports go 1.8 and higher. -func openCensusOptions() []option.ClientOption { return nil } - func traceStartSpan(ctx context.Context, _ string) context.Context { return ctx } diff --git a/vendor/cloud.google.com/go/spanner/oc_test.go b/vendor/cloud.google.com/go/spanner/oc_test.go new file mode 100644 index 000000000..df0dd3146 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/oc_test.go @@ -0,0 +1,72 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package spanner + +import ( + "testing" + "time" + + "cloud.google.com/go/spanner/internal/testutil" + "go.opencensus.io/plugin/ocgrpc" + statsview "go.opencensus.io/stats/view" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func TestOCStats(t *testing.T) { + // Check that stats are being exported. + te := &testExporter{c: make(chan *statsview.Data)} + statsview.RegisterExporter(te) + defer statsview.UnregisterExporter(te) + statsview.SetReportingPeriod(time.Millisecond) + if err := ocgrpc.ClientRequestCountView.Subscribe(); err != nil { + t.Fatal(err) + } + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + ctx := context.Background() + c, err := NewClient(ctx, "projects/P/instances/I/databases/D", + option.WithEndpoint(ms.Addr()), + option.WithGRPCDialOption(grpc.WithInsecure()), + option.WithoutAuthentication()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + c.Single().ReadRow(ctx, "Users", Key{"alice"}, []string{"email"}) + // Wait until we see data from the view. + select { + case <-te.c: + case <-time.After(1 * time.Second): + t.Fatal("no stats were exported before timeout") + } +} + +type testExporter struct { + c chan *statsview.Data +} + +func (e *testExporter) ExportView(vd *statsview.Data) { + if len(vd.Rows) > 0 { + select { + case e.c <- vd: + default: + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/read_test.go b/vendor/cloud.google.com/go/spanner/read_test.go index c83aebcb3..1a2e693c4 100644 --- a/vendor/cloud.google.com/go/spanner/read_test.go +++ b/vendor/cloud.google.com/go/spanner/read_test.go @@ -25,8 +25,9 @@ import ( "time" "golang.org/x/net/context" + "google.golang.org/grpc/status" - proto "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" proto3 "github.com/golang/protobuf/ptypes/struct" "cloud.google.com/go/spanner/internal/testutil" @@ -717,7 +718,7 @@ func TestRsdNonblockingStates(t *testing.T) { queueingRetryable, // got foo-02 aborted, // got error }, - wantErr: grpc.Errorf(codes.Unknown, "I quit"), + wantErr: status.Errorf(codes.Unknown, "I quit"), }, { // unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable @@ -785,7 +786,7 @@ func TestRsdNonblockingStates(t *testing.T) { s = append(s, aborted) // Error happens return s }(), - wantErr: grpc.Errorf(codes.Unknown, "Just Abort It"), + wantErr: status.Errorf(codes.Unknown, "Just Abort It"), }, } nextTest: @@ -901,11 +902,11 @@ func TestRsdBlockingStates(t *testing.T) { // unConnected -> unConnected name: "unConnected -> unConnected", rpc: func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { - return nil, grpc.Errorf(codes.Unavailable, "trust me: server is unavailable") + return nil, status.Errorf(codes.Unavailable, "trust me: server is unavailable") }, sql: "SELECT * from t_whatever", stateHistory: []resumableStreamDecoderState{unConnected, unConnected, unConnected}, - wantErr: grpc.Errorf(codes.Unavailable, "trust me: server is unavailable"), + wantErr: status.Errorf(codes.Unavailable, "trust me: server is unavailable"), }, { // unConnected -> queueingRetryable @@ -1344,7 +1345,7 @@ func TestResumeToken(t *testing.T) { } // Inject resumable failure. ms.AddMsg( - grpc.Errorf(codes.Unavailable, "mock server unavailable"), + status.Errorf(codes.Unavailable, "mock server unavailable"), false, ) // Test if client detects the resumable failure and retries. @@ -1401,7 +1402,7 @@ func TestResumeToken(t *testing.T) { // Inject resumable error, but since resumableStreamDecoder is already at queueingUnretryable // state, query will just fail. ms.AddMsg( - grpc.Errorf(codes.Unavailable, "mock server wants some sleep"), + status.Errorf(codes.Unavailable, "mock server wants some sleep"), false, ) var gotErr error @@ -1410,7 +1411,7 @@ func TestResumeToken(t *testing.T) { case <-time.After(10 * time.Second): t.Fatalf("timeout in waiting for failed query to return.") } - if wantErr := toSpannerError(grpc.Errorf(codes.Unavailable, "mock server wants some sleep")); !testEqual(gotErr, wantErr) { + if wantErr := toSpannerError(status.Errorf(codes.Unavailable, "mock server wants some sleep")); !testEqual(gotErr, wantErr) { t.Fatalf("stream() returns error: %v, but want error: %v", gotErr, wantErr) } diff --git a/vendor/cloud.google.com/go/spanner/retry_test.go b/vendor/cloud.google.com/go/spanner/retry_test.go index 33cb8488d..b15eb7f26 100644 --- a/vendor/cloud.google.com/go/spanner/retry_test.go +++ b/vendor/cloud.google.com/go/spanner/retry_test.go @@ -26,9 +26,9 @@ import ( "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" edpb "google.golang.org/genproto/googleapis/rpc/errdetails" - "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) // Test if runRetryable loop deals with various errors correctly. @@ -37,10 +37,10 @@ func TestRetry(t *testing.T) { t.SkipNow() } responses := []error{ - grpc.Errorf(codes.Internal, "transport is closing"), - grpc.Errorf(codes.Unknown, "unexpected EOF"), - grpc.Errorf(codes.Internal, "stream terminated by RST_STREAM with error code: 2"), - grpc.Errorf(codes.Unavailable, "service is currently unavailable"), + status.Errorf(codes.Internal, "transport is closing"), + status.Errorf(codes.Unknown, "unexpected EOF"), + status.Errorf(codes.Internal, "stream terminated by RST_STREAM with error code: 2"), + status.Errorf(codes.Unavailable, "service is currently unavailable"), errRetry(fmt.Errorf("just retry it")), } err := runRetryable(context.Background(), func(ct context.Context) error { @@ -100,7 +100,7 @@ func TestRetryInfo(t *testing.T) { trailers := map[string]string{ retryInfoKey: string(b), } - gotDelay, ok := extractRetryDelay(errRetry(toSpannerErrorWithMetadata(grpc.Errorf(codes.Aborted, ""), metadata.New(trailers)))) + gotDelay, ok := extractRetryDelay(errRetry(toSpannerErrorWithMetadata(status.Errorf(codes.Aborted, ""), metadata.New(trailers)))) if !ok || !testEqual(time.Second, gotDelay) { t.Errorf(" = <%t, %v>, want ", ok, gotDelay, time.Second) } diff --git a/vendor/cloud.google.com/go/spanner/row.go b/vendor/cloud.google.com/go/spanner/row.go index 200fda55a..e59226f14 100644 --- a/vendor/cloud.google.com/go/spanner/row.go +++ b/vendor/cloud.google.com/go/spanner/row.go @@ -274,11 +274,13 @@ func errToStructArgType(p interface{}) error { // ToStruct fetches the columns in a row into the fields of a struct. // The rules for mapping a row's columns into a struct's exported fields // are as the following: -// 1. If a field has a `spanner: "column_name"` tag, then decode column -// 'column_name' into the field. A special case is the `spanner: "-"` -// tag, which instructs ToStruct to ignore the field during decoding. -// 2. Otherwise, if the name of a field matches the name of a column (ignoring case), -// decode the column into the field. +// +// 1. If a field has a `spanner: "column_name"` tag, then decode column +// 'column_name' into the field. A special case is the `spanner: "-"` +// tag, which instructs ToStruct to ignore the field during decoding. +// +// 2. Otherwise, if the name of a field matches the name of a column (ignoring case), +// decode the column into the field. // // The fields of the destination struct can be of any type that is acceptable // to spanner.Row.Column. diff --git a/vendor/cloud.google.com/go/spanner/session_test.go b/vendor/cloud.google.com/go/spanner/session_test.go index 39a2ee526..c5bc8c9a3 100644 --- a/vendor/cloud.google.com/go/spanner/session_test.go +++ b/vendor/cloud.google.com/go/spanner/session_test.go @@ -25,10 +25,10 @@ import ( "time" "golang.org/x/net/context" + "google.golang.org/grpc/status" "cloud.google.com/go/spanner/internal/testutil" sppb "google.golang.org/genproto/googleapis/spanner/v1" - "google.golang.org/grpc" "google.golang.org/grpc/codes" ) @@ -222,7 +222,7 @@ func TestTakeFromIdleListChecked(t *testing.T) { } // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and // the session pool will create a new session. - sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + sc.InjectError("GetSession", status.Errorf(codes.NotFound, "Session not found:")) // Delay to trigger sessionPool.Take to ping the session. <-time.After(time.Second) sh, err = sp.take(context.Background()) @@ -279,7 +279,7 @@ func TestTakeFromIdleWriteListChecked(t *testing.T) { } // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and // the session pool will create a new session. - sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + sc.InjectError("GetSession", status.Errorf(codes.NotFound, "Session not found:")) // Delay to trigger sessionPool.Take to ping the session. <-time.After(time.Second) sh, err = sp.takeWriteSession(context.Background()) @@ -369,7 +369,7 @@ func TestMaxBurst(t *testing.T) { sp, sc, cancel := setup(t, SessionPoolConfig{MaxBurst: 1}) defer cancel() // Will cause session creation RPC to be retried forever. - sc.InjectError("CreateSession", grpc.Errorf(codes.Unavailable, "try later")) + sc.InjectError("CreateSession", status.Errorf(codes.Unavailable, "try later")) // This session request will never finish until the injected error is cleared. go sp.take(context.Background()) // Poll for the execution of the first session request. @@ -619,7 +619,7 @@ func TestSessionHealthCheck(t *testing.T) { if err != nil { t.Errorf("cannot get session from session pool: %v", err) } - sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + sc.InjectError("GetSession", status.Errorf(codes.NotFound, "Session not found:")) // Wait for healthcheck workers to find the broken session and tear it down. <-time.After(1 * time.Second) s := sh.session diff --git a/vendor/cloud.google.com/go/spanner/spanner_test.go b/vendor/cloud.google.com/go/spanner/spanner_test.go index e9feae56a..ab63f4ef5 100644 --- a/vendor/cloud.google.com/go/spanner/spanner_test.go +++ b/vendor/cloud.google.com/go/spanner/spanner_test.go @@ -93,6 +93,19 @@ var ( `CREATE INDEX TestTableByValue ON TestTable(StringValue)`, `CREATE INDEX TestTableByValueDesc ON TestTable(StringValue DESC)`, } + + simpleDBStatements = []string{ + `CREATE TABLE test ( + a STRING(1024), + b STRING(1024), + ) PRIMARY KEY (a)`, + } + simpleDBTableColumns = []string{"a", "b"} +) + +const ( + str1 = "alice" + str2 = "a@example.com" ) type testTableRow struct{ Key, StringValue string } @@ -119,8 +132,7 @@ func initIntegrationTest() { } var err error // Create Admin client and Data client. - // TODO: Remove the EndPoint option once this is the default. - admin, err = database.NewDatabaseAdminClient(ctx, option.WithTokenSource(ts), option.WithEndpoint("spanner.googleapis.com:443")) + admin, err = database.NewDatabaseAdminClient(ctx, option.WithTokenSource(ts), option.WithEndpoint(endpoint)) if err != nil { log.Fatalf("cannot create admin client: %v", err) } @@ -158,16 +170,16 @@ func prepare(ctx context.Context, t *testing.T, statements []string) (client *Cl } client, err = NewClientWithConfig(ctx, dbPath, ClientConfig{ SessionPoolConfig: SessionPoolConfig{WriteSessions: 0.2}, - }, option.WithTokenSource(testutil.TokenSource(ctx, Scope))) + }, option.WithTokenSource(testutil.TokenSource(ctx, Scope)), option.WithEndpoint(endpoint)) if err != nil { t.Fatalf("cannot create data client on DB %v: %v", dbPath, err) } return client, dbPath, func() { + client.Close() if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{dbPath}); err != nil { t.Logf("failed to drop database %s (error %v), might need a manual removal", dbPath, err) } - client.Close() } } @@ -959,8 +971,11 @@ func TestDbRemovalRecovery(t *testing.T) { ) PRIMARY KEY (SingerId)`, }, }) + if err != nil { + t.Fatalf("cannot recreate testing DB %v: %v", dbPath, err) + } if _, err := op.Wait(ctx); err != nil { - t.Errorf("cannot recreate testing DB %v: %v", dbPath, err) + t.Fatalf("cannot recreate testing DB %v: %v", dbPath, err) } // Now, send the query again. @@ -968,7 +983,7 @@ func TestDbRemovalRecovery(t *testing.T) { defer iter.Stop() _, err = iter.Next() if err != nil && err != iterator.Done { - t.Fatalf("failed to send query to database %v: %v", dbPath, err) + t.Errorf("failed to send query to database %v: %v", dbPath, err) } } @@ -1434,7 +1449,7 @@ func readAllTestTable(iter *RowIterator) ([]testTableRow, error) { // Test TransactionRunner. Test that transactions are aborted and retried as expected. func TestTransactionRunner(t *testing.T) { t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() client, _, tearDown := prepare(ctx, t, singerDBStatements) defer tearDown() @@ -1559,3 +1574,231 @@ func TestTransactionRunner(t *testing.T) { } } } + +// createClient creates Cloud Spanner data client. +func createClient(ctx context.Context, dbPath string) (client *Client, err error) { + client, err = NewClientWithConfig(ctx, dbPath, ClientConfig{ + SessionPoolConfig: SessionPoolConfig{WriteSessions: 0.2}, + }, option.WithTokenSource(testutil.TokenSource(ctx, Scope)), option.WithEndpoint(endpoint)) + if err != nil { + return nil, fmt.Errorf("cannot create data client on DB %v: %v", dbPath, err) + } + return client, nil +} + +// populate prepares the database with some data. +func populate(ctx context.Context, client *Client) error { + // Populate data + var err error + m := InsertMap("test", map[string]interface{}{ + "a": str1, + "b": str2, + }) + _, err = client.Apply(ctx, []*Mutation{m}) + return err +} + +// Test PartitionQuery of BatchReadOnlyTransaction, create partitions then +// serialize and deserialize both transaction and partition to be used in +// execution on another client, and compare results. +func TestBatchQuery(t *testing.T) { + t.Parallel() + // Set up testing environment. + var ( + client2 *Client + err error + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + client, dbPath, tearDown := prepare(ctx, t, simpleDBStatements) + defer tearDown() + if err = populate(ctx, client); err != nil { + t.Fatal(err) + } + if client2, err = createClient(ctx, dbPath); err != nil { + t.Fatal(err) + } + defer client2.Close() + + // PartitionQuery + var ( + txn *BatchReadOnlyTransaction + partitions []*Partition + stmt = Statement{SQL: "SELECT * FROM test;"} + ) + + if txn, err = client.BatchReadOnlyTransaction(ctx, StrongRead()); err != nil { + t.Fatal(err) + } + defer txn.Cleanup(ctx) + if partitions, err = txn.PartitionQuery(ctx, stmt, PartitionOptions{0, 3}); err != nil { + t.Fatal(err) + } + + // Reconstruct BatchReadOnlyTransactionID and execute partitions + var ( + tid2 BatchReadOnlyTransactionID + data []byte + gotResult bool // if we get matching result from two separate txns + ) + if data, err = txn.ID.MarshalBinary(); err != nil { + t.Fatalf("encoding failed %v", err) + } + if err = tid2.UnmarshalBinary(data); err != nil { + t.Fatalf("decoding failed %v", err) + } + txn2 := client2.BatchReadOnlyTransactionFromID(tid2) + + // Execute Partitions and compare results + for i, p := range partitions { + iter := txn.Execute(ctx, p) + defer iter.Stop() + p2 := serdesPartition(t, i, p) + iter2 := txn2.Execute(ctx, &p2) + defer iter2.Stop() + + row1, err1 := iter.Next() + row2, err2 := iter2.Next() + if err1 != err2 { + t.Fatalf("execution failed for different reasons: %v, %v", err1, err2) + continue + } + if !testEqual(row1, row2) { + t.Fatalf("execution returned different values: %v, %v", row1, row2) + continue + } + if row1 == nil { + continue + } + var a, b string + if err = row1.Columns(&a, &b); err != nil { + t.Fatalf("failed to parse row %v", err) + continue + } + if a == str1 && b == str2 { + gotResult = true + } + } + if !gotResult { + t.Fatalf("execution didn't return expected values") + } +} + +// Test PartitionRead of BatchReadOnlyTransaction, similar to TestBatchQuery +func TestBatchRead(t *testing.T) { + t.Parallel() + // Set up testing environment. + var ( + client2 *Client + err error + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + client, dbPath, tearDown := prepare(ctx, t, simpleDBStatements) + defer tearDown() + if err = populate(ctx, client); err != nil { + t.Fatal(err) + } + if client2, err = createClient(ctx, dbPath); err != nil { + t.Fatal(err) + } + defer client2.Close() + + // PartitionRead + var ( + txn *BatchReadOnlyTransaction + partitions []*Partition + ) + + if txn, err = client.BatchReadOnlyTransaction(ctx, StrongRead()); err != nil { + t.Fatal(err) + } + defer txn.Cleanup(ctx) + if partitions, err = txn.PartitionRead(ctx, "test", AllKeys(), simpleDBTableColumns, PartitionOptions{0, 3}); err != nil { + t.Fatal(err) + } + + // Reconstruct BatchReadOnlyTransactionID and execute partitions + var ( + tid2 BatchReadOnlyTransactionID + data []byte + gotResult bool // if we get matching result from two separate txns + ) + if data, err = txn.ID.MarshalBinary(); err != nil { + t.Fatalf("encoding failed %v", err) + } + if err = tid2.UnmarshalBinary(data); err != nil { + t.Fatalf("decoding failed %v", err) + } + txn2 := client2.BatchReadOnlyTransactionFromID(tid2) + + // Execute Partitions and compare results + for i, p := range partitions { + iter := txn.Execute(ctx, p) + defer iter.Stop() + p2 := serdesPartition(t, i, p) + iter2 := txn2.Execute(ctx, &p2) + defer iter2.Stop() + + row1, err1 := iter.Next() + row2, err2 := iter2.Next() + if err1 != err2 { + t.Fatalf("execution failed for different reasons: %v, %v", err1, err2) + continue + } + if !testEqual(row1, row2) { + t.Fatalf("execution returned different values: %v, %v", row1, row2) + continue + } + if row1 == nil { + continue + } + var a, b string + if err = row1.Columns(&a, &b); err != nil { + t.Fatalf("failed to parse row %v", err) + continue + } + if a == str1 && b == str2 { + gotResult = true + } + } + if !gotResult { + t.Fatalf("execution didn't return expected values") + } +} + +// Test normal txReadEnv method on BatchReadOnlyTransaction. +func TestBROTNormal(t *testing.T) { + t.Parallel() + // Set up testing environment and create txn. + var ( + txn *BatchReadOnlyTransaction + err error + row *Row + i int64 + ) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + client, _, tearDown := prepare(ctx, t, simpleDBStatements) + defer tearDown() + + if txn, err = client.BatchReadOnlyTransaction(ctx, StrongRead()); err != nil { + t.Fatal(err) + } + defer txn.Cleanup(ctx) + if _, err := txn.PartitionRead(ctx, "test", AllKeys(), simpleDBTableColumns, PartitionOptions{0, 3}); err != nil { + t.Fatal(err) + } + // Normal query should work with BatchReadOnlyTransaction + stmt2 := Statement{SQL: "SELECT 1"} + iter := txn.Query(ctx, stmt2) + defer iter.Stop() + + row, err = iter.Next() + if err != nil { + t.Errorf("query failed with %v", err) + } + if err = row.Columns(&i); err != nil { + t.Errorf("failed to parse row %v", err) + } +} diff --git a/vendor/cloud.google.com/go/spanner/statement.go b/vendor/cloud.google.com/go/spanner/statement.go index d04c2003f..a4313ba59 100644 --- a/vendor/cloud.google.com/go/spanner/statement.go +++ b/vendor/cloud.google.com/go/spanner/statement.go @@ -66,12 +66,12 @@ var ( errNoType = errors.New("no type information") ) -// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest. -func (s *Statement) bindParams(r *sppb.ExecuteSqlRequest) error { - r.Params = &proto3.Struct{ +// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest or sppb.PartitionQueryRequest. +func (s *Statement) bindParams(i interface{}) error { + params := &proto3.Struct{ Fields: map[string]*proto3.Value{}, } - r.ParamTypes = map[string]*sppb.Type{} + paramTypes := map[string]*sppb.Type{} for k, v := range s.Params { if v == nil { return errBindParam(k, v, errNilParam) @@ -83,8 +83,19 @@ func (s *Statement) bindParams(r *sppb.ExecuteSqlRequest) error { if t == nil { // should not happen, because of nil check above return errBindParam(k, v, errNoType) } - r.Params.Fields[k] = val - r.ParamTypes[k] = t + params.Fields[k] = val + paramTypes[k] = t + } + + switch r := i.(type) { + default: + return fmt.Errorf("failed to bind query parameter, unexpected request type: %v", r) + case *sppb.ExecuteSqlRequest: + r.Params = params + r.ParamTypes = paramTypes + case *sppb.PartitionQueryRequest: + r.Params = params + r.ParamTypes = paramTypes } return nil } diff --git a/vendor/cloud.google.com/go/spanner/timestampbound.go b/vendor/cloud.google.com/go/spanner/timestampbound.go index 068d96600..e606e6c2d 100644 --- a/vendor/cloud.google.com/go/spanner/timestampbound.go +++ b/vendor/cloud.google.com/go/spanner/timestampbound.go @@ -39,11 +39,8 @@ const ( // TimestampBound defines how Cloud Spanner will choose a timestamp for a single // read/query or read-only transaction. // -// The types of timestamp bound are: -// -// - Strong (the default). -// - Bounded staleness. -// - Exact staleness. +// There are three types of timestamp bound: strong, bounded staleness and exact +// staleness. Strong is the default. // // If the Cloud Spanner database to be read is geographically distributed, stale // read-only transactions can execute more quickly than strong or read-write @@ -57,7 +54,7 @@ const ( // // Strong reads are guaranteed to see the effects of all transactions that have // committed before the start of the read. Furthermore, all rows yielded by a -// single read are consistent with each other - if any part of the read +// single read are consistent with each other: if any part of the read // observes a transaction, all parts of the read see the transaction. // // Strong reads are not repeatable: two consecutive strong read-only @@ -65,18 +62,17 @@ const ( // writes. If consistency across reads is required, the reads should be // executed within a transaction or at an exact read timestamp. // -// Use StrongRead() to create a bound of this type. +// Use StrongRead to create a bound of this type. // // Exact staleness // -// These timestamp bounds execute reads at a user-specified timestamp. Reads at -// a timestamp are guaranteed to see a consistent prefix of the global -// transaction history: they observe modifications done by all transactions -// with a commit timestamp less than or equal to the read timestamp, and -// observe none of the modifications done by transactions with a larger commit -// timestamp. They will block until all conflicting transactions that may be -// assigned commit timestamps less than or equal to the read timestamp have -// finished. +// An exact staleness timestamp bound executes reads at a user-specified timestamp. +// Reads at a timestamp are guaranteed to see a consistent prefix of the global +// transaction history: they observe modifications done by all transactions with a +// commit timestamp less than or equal to the read timestamp, and observe none of the +// modifications done by transactions with a larger commit timestamp. They will block +// until all conflicting transactions that may be assigned commit timestamps less +// than or equal to the read timestamp have finished. // // The timestamp can either be expressed as an absolute Cloud Spanner commit // timestamp or a staleness relative to the current time. @@ -86,7 +82,7 @@ const ( // concurrency modes. On the other hand, boundedly stale reads usually return // fresher results. // -// Use ReadTimestamp() and ExactStaleness() to create a bound of this type. +// Use ReadTimestamp and ExactStaleness to create a bound of this type. // // Bounded staleness // @@ -95,17 +91,17 @@ const ( // the staleness bound that allows execution of the reads at the closest // available replica without blocking. // -// All rows yielded are consistent with each other -- if any part of the read +// All rows yielded are consistent with each other: if any part of the read // observes a transaction, all parts of the read see the transaction. Boundedly // stale reads are not repeatable: two stale reads, even if they use the same // staleness bound, can execute at different timestamps and thus return // inconsistent results. // -// Boundedly stale reads execute in two phases: the first phase negotiates a +// Boundedly stale reads execute in two phases. The first phase negotiates a // timestamp among all replicas needed to serve the read. In the second phase, // reads are executed at the negotiated timestamp. // -// As a result of the two phase execution, bounded staleness reads are usually +// As a result of this two-phase execution, bounded staleness reads are usually // a little slower than comparable exact staleness reads. However, they are // typically able to return fresher results, and are more likely to execute at // the closest replica. @@ -114,7 +110,7 @@ const ( // will be read, it can only be used with single-use reads and single-use // read-only transactions. // -// Use MinReadTimestamp() and MaxStaleness() to create a bound of this type. +// Use MinReadTimestamp and MaxStaleness to create a bound of this type. // // Old read timestamps and garbage collection // @@ -123,7 +119,7 @@ const ( // GC". By default, version GC reclaims versions after they are four hours // old. Because of this, Cloud Spanner cannot perform reads at read timestamps more // than four hours in the past. This restriction also applies to in-progress -// reads and/or SQL queries whose timestamp become too old while +// reads and/or SQL queries whose timestamps become too old while // executing. Reads and SQL queries with too-old read timestamps fail with the // error ErrorCode.FAILED_PRECONDITION. type TimestampBound struct { @@ -174,7 +170,6 @@ func ReadTimestamp(t time.Time) TimestampBound { } } -// String implements fmt.Stringer. func (tb TimestampBound) String() string { switch tb.mode { case strong: diff --git a/vendor/cloud.google.com/go/spanner/transaction.go b/vendor/cloud.google.com/go/spanner/transaction.go index 8d7ac10ce..c35ba4ef5 100644 --- a/vendor/cloud.google.com/go/spanner/transaction.go +++ b/vendor/cloud.google.com/go/spanner/transaction.go @@ -268,7 +268,7 @@ func errUnexpectedTxState(ts txState) error { // applications do not need to worry about this in practice. See the // documentation of TimestampBound for more details. // -// A ReadOnlyTransaction consumes resources on the server until Close() is +// A ReadOnlyTransaction consumes resources on the server until Close is // called. type ReadOnlyTransaction struct { // txReadOnly contains methods for performing transactional reads. diff --git a/vendor/cloud.google.com/go/spanner/util_test.go b/vendor/cloud.google.com/go/spanner/util_test.go index 0cad6fac5..6a0f9a862 100644 --- a/vendor/cloud.google.com/go/spanner/util_test.go +++ b/vendor/cloud.google.com/go/spanner/util_test.go @@ -23,5 +23,6 @@ import ( func testEqual(a, b interface{}) bool { return testutil.Equal(a, b, - cmp.AllowUnexported(TimestampBound{}, Error{}, Mutation{}, Row{})) + cmp.AllowUnexported(TimestampBound{}, Error{}, Mutation{}, Row{}, + Partition{}, BatchReadOnlyTransactionID{})) } diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index 24f90c924..676f3ea04 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -18,6 +18,7 @@ import ( "net/http" "reflect" + "cloud.google.com/go/internal/trace" "golang.org/x/net/context" "google.golang.org/api/googleapi" raw "google.golang.org/api/storage/v1" @@ -63,7 +64,10 @@ type ACLHandle struct { } // Delete permanently deletes the ACL entry for the given entity. -func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") + defer func() { trace.EndSpan(ctx, err) }() + if a.object != "" { return a.objectDelete(ctx, entity) } @@ -74,7 +78,10 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { } // Set sets the permission level for the given entity. -func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") + defer func() { trace.EndSpan(ctx, err) }() + if a.object != "" { return a.objectSet(ctx, entity, role, false) } @@ -85,7 +92,10 @@ func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) err } // List retrieves ACL entries. -func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) { +func (a *ACLHandle) List(ctx context.Context) (_ []ACLRule, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") + defer func() { trace.EndSpan(ctx, err) }() + if a.object != "" { return a.objectList(ctx) } diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index fcaa59db0..5125274f6 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -21,6 +21,7 @@ import ( "time" "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" "golang.org/x/net/context" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -63,7 +64,10 @@ func (c *Client) Bucket(name string) *BucketHandle { // Create creates the Bucket in the project. // If attrs is nil the API defaults will be used. -func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + var bkt *raw.Bucket if attrs != nil { bkt = attrs.toRawBucket() @@ -82,7 +86,10 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck } // Delete deletes the Bucket. -func (b *BucketHandle) Delete(ctx context.Context) error { +func (b *BucketHandle) Delete(ctx context.Context) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") + defer func() { trace.EndSpan(ctx, err) }() + req, err := b.newDeleteCall() if err != nil { return err @@ -139,7 +146,10 @@ func (b *BucketHandle) Object(name string) *ObjectHandle { } // Attrs returns the metadata for the bucket. -func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { +func (b *BucketHandle) Attrs(ctx context.Context) (_ *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + req, err := b.newGetCall() if err != nil { return nil, err @@ -155,7 +165,7 @@ func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { if err != nil { return nil, err } - return newBucket(resp), nil + return newBucket(resp) } func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { @@ -170,7 +180,10 @@ func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { return req, nil } -func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) { +func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (_ *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + req, err := b.newPatchCall(&uattrs) if err != nil { return nil, err @@ -180,7 +193,7 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) ( if err != nil { return nil, err } - return newBucket(rb), nil + return newBucket(rb) } func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { @@ -241,8 +254,21 @@ type BucketAttrs struct { // a user project (see BucketHandle.UserProject), which will be billed // for the operations. RequesterPays bool + // Lifecycle is the lifecycle configuration for objects in the bucket. Lifecycle Lifecycle + + // Retention policy enforces a minimum retention time for all objects + // contained in the bucket. A RetentionPolicy of nil implies the bucket + // has no minimum data retention. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // The bucket's Cross-Origin Resource Sharing (CORS) configuration. + CORS []CORS } // Lifecycle is the lifecycle configuration for objects in the bucket. @@ -250,6 +276,31 @@ type Lifecycle struct { Rules []LifecycleRule } +// Retention policy enforces a minimum retention time for all objects +// contained in the bucket. +// +// Any attempt to overwrite or delete objects younger than the retention +// period will result in an error. An unlocked retention policy can be +// modified or removed from the bucket via the Update method. A +// locked retention policy cannot be removed or shortened in duration +// for the lifetime of the bucket. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +type RetentionPolicy struct { + // RetentionPeriod specifies the duration that objects need to be + // retained. Retention duration must be greater than zero and less than + // 100 years. Note that enforcement of retention periods less than a day + // is not guaranteed. Such periods should only be used for testing + // purposes. + RetentionPeriod time.Duration + + // EffectiveTime is the time from which the policy was enforced and + // effective. This field is read-only. + EffectiveTime time.Time +} + const ( // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. rfc3339Date = "2006-01-02" @@ -335,9 +386,13 @@ type LifecycleCondition struct { NumNewerVersions int64 } -func newBucket(b *raw.Bucket) *BucketAttrs { +func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if b == nil { - return nil + return nil, nil + } + rp, err := toRetentionPolicy(b.RetentionPolicy) + if err != nil { + return nil, err } bucket := &BucketAttrs{ Name: b.Name, @@ -349,6 +404,8 @@ func newBucket(b *raw.Bucket) *BucketAttrs { Labels: b.Labels, RequesterPays: b.Billing != nil && b.Billing.RequesterPays, Lifecycle: toLifecycle(b.Lifecycle), + RetentionPolicy: rp, + CORS: toCORS(b.Cors), } acl := make([]ACLRule, len(b.Acl)) for i, rule := range b.Acl { @@ -366,7 +423,7 @@ func newBucket(b *raw.Bucket) *BucketAttrs { } } bucket.DefaultObjectACL = objACL - return bucket + return bucket, nil } // toRawBucket copies the editable attribute from b to the raw library's Bucket type. @@ -411,9 +468,33 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { Labels: labels, Billing: bb, Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), } } +// The bucket's Cross-Origin Resource Sharing (CORS) configuration. +type CORS struct { + // MaxAge is the value to return in the Access-Control-Max-Age + // header used in preflight responses. + MaxAge time.Duration + + // Methods is the list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Methods []string + + // Origins is the list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origins []string + + // ResponseHeaders is the list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeaders []string +} + type BucketAttrsToUpdate struct { // VersioningEnabled, if set, updates whether the bucket uses versioning. VersioningEnabled optional.Bool @@ -421,6 +502,19 @@ type BucketAttrsToUpdate struct { // RequesterPays, if set, updates whether the bucket is a Requester Pays bucket. RequesterPays optional.Bool + // RetentionPolicy, if set, updates the retention policy of the bucket. Using + // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // CORS, if set, replaces the CORS configuration with a new configuration. + // When an empty slice is provided, all CORS policies are removed; when nil + // is provided, the value is ignored in the update. + CORS []CORS + setLabels map[string]string deleteLabels map[string]bool } @@ -445,6 +539,18 @@ func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { rb := &raw.Bucket{} + if ua.CORS != nil { + rb.Cors = toRawCORS(ua.CORS) + rb.ForceSendFields = append(rb.ForceSendFields, "Cors") + } + if ua.RetentionPolicy != nil { + if ua.RetentionPolicy.RetentionPeriod == 0 { + rb.NullFields = append(rb.NullFields, "RetentionPolicy") + rb.RetentionPolicy = nil + } else { + rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy() + } + } if ua.VersioningEnabled != nil { rb.Versioning = &raw.BucketVersioning{ Enabled: optional.ToBool(ua.VersioningEnabled), @@ -521,6 +627,25 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle { return &b2 } +// LockRetentionPolicy locks a bucket's retention policy until a previously-configured +// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less +// than a day, the retention policy is treated as a development configuration and locking +// will have no effect. The BucketHandle must have a metageneration condition that +// matches the bucket's metageneration. See BucketHandle.If. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { + var metageneration int64 + if b.conds != nil { + metageneration = b.conds.MetagenerationMatch + } + req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration) + _, err := req.Context(ctx).Do() + return err +} + // applyBucketConds modifies the provided call using the conditions in conds. // call is something that quacks like a *raw.WhateverCall. func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { @@ -544,6 +669,55 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{}) return nil } +func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { + if rp == nil { + return nil + } + return &raw.BucketRetentionPolicy{ + RetentionPeriod: int64(rp.RetentionPeriod / time.Second), + } +} + +func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { + if rp == nil { + return nil, nil + } + t, err := time.Parse(time.RFC3339, rp.EffectiveTime) + if err != nil { + return nil, err + } + return &RetentionPolicy{ + RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second, + EffectiveTime: t, + }, nil +} + +func toRawCORS(c []CORS) []*raw.BucketCors { + var out []*raw.BucketCors + for _, v := range c { + out = append(out, &raw.BucketCors{ + MaxAgeSeconds: int64(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + +func toCORS(rc []*raw.BucketCors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second, + Methods: v.Method, + Origins: v.Origin, + ResponseHeaders: v.ResponseHeader, + }) + } + return out +} + func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { var rl raw.BucketLifecycle if len(l.Rules) == 0 { @@ -742,7 +916,7 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) { // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } -func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) { +func (it *BucketIterator) fetch(pageSize int, pageToken string) (_ string, err error) { req := it.client.raw.Buckets.List(it.projectID) setClientHeader(req.Header()) req.Projection("full") @@ -752,7 +926,6 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) req.MaxResults(int64(pageSize)) } var resp *raw.Buckets - var err error err = runWithRetry(it.ctx, func() error { resp, err = req.Context(it.ctx).Do() return err @@ -761,7 +934,11 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) return "", err } for _, item := range resp.Items { - it.buckets = append(it.buckets, newBucket(item)) + b, err := newBucket(item) + if err != nil { + return "", err + } + it.buckets = append(it.buckets, b) } return resp.NextPageToken, nil } diff --git a/vendor/cloud.google.com/go/storage/bucket_test.go b/vendor/cloud.google.com/go/storage/bucket_test.go index 97900d04a..b52d59b3a 100644 --- a/vendor/cloud.google.com/go/storage/bucket_test.go +++ b/vendor/cloud.google.com/go/storage/bucket_test.go @@ -20,9 +20,9 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" - "cloud.google.com/go/internal/testutil" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/api/googleapi" raw "google.golang.org/api/storage/v1" ) @@ -30,16 +30,27 @@ import ( func TestBucketAttrsToRawBucket(t *testing.T) { t.Parallel() attrs := &BucketAttrs{ - Name: "name", - ACL: []ACLRule{{Entity: "bob@example.com", Role: RoleOwner}}, - DefaultObjectACL: []ACLRule{{Entity: AllUsers, Role: RoleReader}}, - Location: "loc", - StorageClass: "class", + Name: "name", + ACL: []ACLRule{{Entity: "bob@example.com", Role: RoleOwner}}, + DefaultObjectACL: []ACLRule{{Entity: AllUsers, Role: RoleReader}}, + Location: "loc", + StorageClass: "class", + RetentionPolicy: &RetentionPolicy{ + RetentionPeriod: 3 * time.Second, + }, VersioningEnabled: false, // should be ignored: MetaGeneration: 39, Created: time.Now(), Labels: map[string]string{"label": "value"}, + CORS: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"GET", "POST"}, + Origins: []string{"*"}, + ResponseHeaders: []string{"FOO"}, + }, + }, } got := attrs.toRawBucket() want := &raw.Bucket{ @@ -52,8 +63,19 @@ func TestBucketAttrsToRawBucket(t *testing.T) { }, Location: "loc", StorageClass: "class", - Versioning: nil, // ignore VersioningEnabled if false - Labels: map[string]string{"label": "value"}, + RetentionPolicy: &raw.BucketRetentionPolicy{ + RetentionPeriod: 3, + }, + Versioning: nil, // ignore VersioningEnabled if false + Labels: map[string]string{"label": "value"}, + Cors: []*raw.BucketCors{ + { + MaxAgeSeconds: 3600, + Method: []string{"GET", "POST"}, + Origin: []string{"*"}, + ResponseHeader: []string{"FOO"}, + }, + }, } if msg := testutil.Diff(got, want); msg != "" { t.Error(msg) @@ -231,6 +253,18 @@ func TestNewBucket(t *testing.T) { }, }}, }, + RetentionPolicy: &raw.BucketRetentionPolicy{ + RetentionPeriod: 3, + EffectiveTime: time.Now().Format(time.RFC3339), + }, + Cors: []*raw.BucketCors{ + { + MaxAgeSeconds: 3600, + Method: []string{"GET", "POST"}, + Origin: []string{"*"}, + ResponseHeader: []string{"FOO"}, + }, + }, Acl: []*raw.BucketAccessControl{ {Bucket: "name", Role: "READER", Email: "joe@example.com", Entity: "allUsers"}, }, @@ -261,11 +295,25 @@ func TestNewBucket(t *testing.T) { }, }, }, + RetentionPolicy: &RetentionPolicy{ + RetentionPeriod: 3 * time.Second, + }, + CORS: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"GET", "POST"}, + Origins: []string{"*"}, + ResponseHeaders: []string{"FOO"}, + }, + }, ACL: []ACLRule{{Entity: "allUsers", Role: RoleReader}}, DefaultObjectACL: []ACLRule{}, } - got := newBucket(rb) - if diff := testutil.Diff(got, want); diff != "" { + got, err := newBucket(rb) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want, cmpopts.IgnoreTypes(time.Time{})); diff != "" { t.Errorf("got=-, want=+:\n%s", diff) } } diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index d0a999c1b..cf7476555 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" + "cloud.google.com/go/internal/trace" "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" ) @@ -63,7 +64,10 @@ type Copier struct { } // Run performs the copy. -func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { +func (c *Copier) Run(ctx context.Context) (_ *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") + defer func() { trace.EndSpan(ctx, err) }() + if err := c.src.validate(); err != nil { return nil, err } @@ -149,7 +153,10 @@ type Composer struct { } // Run performs the compose operation. -func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { +func (c *Composer) Run(ctx context.Context) (_ *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") + defer func() { trace.EndSpan(ctx, err) }() + if err := c.dst.validate(); err != nil { return nil, err } @@ -191,7 +198,6 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { return nil, err } var obj *raw.Object - var err error setClientHeader(call.Header()) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) if err != nil { diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 1f32fde95..9040ac2af 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -74,15 +74,16 @@ Attrs: Objects An object holds arbitrary data as a sequence of bytes, like a file. You -refer to objects using a handle, just as with buckets. You can use the -standard Go io.Reader and io.Writer interfaces to read and write -object data: +refer to objects using a handle, just as with buckets, but unlike buckets +you don't explicitly create an object. Instead, the first time you write +to an object it will be created. You can use the standard Go io.Reader +and io.Writer interfaces to read and write object data: obj := bkt.Object("data") // Write something to obj. // w implements io.Writer. w := obj.NewWriter(ctx) - // Write some text to obj. This will overwrite whatever is there. + // Write some text to obj. This will either create the object or overwrite whatever is there already. if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { // TODO: Handle error. } diff --git a/vendor/cloud.google.com/go/storage/example_test.go b/vendor/cloud.google.com/go/storage/example_test.go index c5afe31bf..d4aec8b58 100644 --- a/vendor/cloud.google.com/go/storage/example_test.go +++ b/vendor/cloud.google.com/go/storage/example_test.go @@ -196,6 +196,25 @@ func ExampleBucketHandle_AddNotification() { fmt.Println(n.ID) } +func ExampleBucketHandle_LockRetentionPolicy() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + attrs, err := b.Attrs(ctx) + if err != nil { + // TODO: handle error. + } + // Note that locking the bucket without first attaching a RetentionPolicy + // that's at least 1 day is a no-op + err = b.If(storage.BucketConditions{MetagenerationMatch: attrs.MetaGeneration}).LockRetentionPolicy(ctx) + if err != nil { + // TODO: handle err + } +} + func ExampleBucketHandle_Notifications() { ctx := context.Background() client, err := storage.NewClient(ctx) diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go index 9365509ed..2e1dc5ce6 100644 --- a/vendor/cloud.google.com/go/storage/iam.go +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -16,6 +16,7 @@ package storage import ( "cloud.google.com/go/iam" + "cloud.google.com/go/internal/trace" "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" iampb "google.golang.org/genproto/googleapis/iam/v1" @@ -35,14 +36,16 @@ type iamClient struct { userProject string } -func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) { +func (c *iamClient) Get(ctx context.Context, resource string) (_ *iampb.Policy, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") + defer func() { trace.EndSpan(ctx, err) }() + call := c.raw.Buckets.GetIamPolicy(resource) setClientHeader(call.Header()) if c.userProject != "" { call.UserProject(c.userProject) } var rp *raw.Policy - var err error err = runWithRetry(ctx, func() error { rp, err = call.Context(ctx).Do() return err @@ -53,7 +56,10 @@ func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, er return iamFromStoragePolicy(rp), nil } -func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error { +func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") + defer func() { trace.EndSpan(ctx, err) }() + rp := iamToStoragePolicy(p) call := c.raw.Buckets.SetIamPolicy(resource, rp) setClientHeader(call.Header()) @@ -66,14 +72,16 @@ func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) e }) } -func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { +func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (_ []string, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") + defer func() { trace.EndSpan(ctx, err) }() + call := c.raw.Buckets.TestIamPermissions(resource, perms) setClientHeader(call.Header()) if c.userProject != "" { call.UserProject(c.userProject) } var res *raw.TestIamPermissionsResponse - var err error err = runWithRetry(ctx, func() error { res, err = call.Context(ctx).Do() return err diff --git a/vendor/cloud.google.com/go/storage/integration_test.go b/vendor/cloud.google.com/go/storage/integration_test.go index f73f52530..0b64142cf 100644 --- a/vendor/cloud.google.com/go/storage/integration_test.go +++ b/vendor/cloud.google.com/go/storage/integration_test.go @@ -36,6 +36,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/net/context" "cloud.google.com/go/iam" @@ -46,12 +47,12 @@ import ( "google.golang.org/api/option" ) -const testPrefix = "-go-test" +const testPrefix = "go-integration-test" -// suffix is a timestamp-based suffix which is added to all buckets created by -// tests. This reduces flakiness when the tests are run in parallel and allows -// automatic cleaning up of artifacts left when tests fail. -var suffix = fmt.Sprintf("%s-%d", testPrefix, time.Now().UnixNano()) +var ( + uidSpace = testutil.NewUIDSpace(testPrefix) + bucketName = uidSpace.New() +) func TestMain(m *testing.M) { integrationTest := initIntegrationTest() @@ -73,36 +74,36 @@ func initIntegrationTest() bool { if testing.Short() { return false } - client, bucket := config(ctx) + client := config(ctx) if client == nil { return false } defer client.Close() - if err := client.Bucket(bucket).Create(ctx, testutil.ProjID(), nil); err != nil { - log.Fatalf("creating bucket %q: %v", bucket, err) + if err := client.Bucket(bucketName).Create(ctx, testutil.ProjID(), nil); err != nil { + log.Fatalf("creating bucket %q: %v", bucketName, err) } return true } -// testConfig returns the Client used to access GCS and the default bucket -// name to use. testConfig skips the current test if credentials are not -// available or when being run in Short mode. -func testConfig(ctx context.Context, t *testing.T) (*Client, string) { +// testConfig returns the Client used to access GCS. testConfig skips +// the current test if credentials are not available or when being run +// in Short mode. +func testConfig(ctx context.Context, t *testing.T) *Client { if testing.Short() { t.Skip("Integration tests skipped in short mode") } - client, bucket := config(ctx) + client := config(ctx) if client == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } - return client, bucket + return client } // config is like testConfig, but it doesn't need a *testing.T. -func config(ctx context.Context) (*Client, string) { +func config(ctx context.Context) *Client { ts := testutil.TokenSource(ctx, ScopeFullControl) if ts == nil { - return nil, "" + return nil } p := testutil.ProjID() if p == "" { @@ -112,20 +113,20 @@ func config(ctx context.Context) (*Client, string) { if err != nil { log.Fatalf("NewClient: %v", err) } - return client, p + suffix + return client } func TestIntegration_BucketMethods(t *testing.T) { ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() projectID := testutil.ProjID() - newBucket := bucket + "-new" - b := client.Bucket(newBucket) + newBucketName := uidSpace.New() + b := client.Bucket(newBucketName) // Test Create and Delete. if err := b.Create(ctx, projectID, nil); err != nil { - t.Fatalf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, nil, err) + t.Fatalf("Bucket(%v).Create(%v, %v) failed: %v", newBucketName, projectID, nil, err) } attrs, err := b.Attrs(ctx) if err != nil { @@ -141,8 +142,8 @@ func TestIntegration_BucketMethods(t *testing.T) { t.Error("got versioning enabled, wanted it disabled") } } - if err := client.Bucket(newBucket).Delete(ctx); err != nil { - t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) + if err := client.Bucket(newBucketName).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucketName, err) } // Test Create and Delete with attributes. @@ -181,8 +182,8 @@ func TestIntegration_BucketMethods(t *testing.T) { }}, }, } - if err := client.Bucket(newBucket).Create(ctx, projectID, attrs); err != nil { - t.Fatalf("Bucket(%v).Create(%v, %+v) failed: %v", newBucket, projectID, attrs, err) + if err := client.Bucket(newBucketName).Create(ctx, projectID, attrs); err != nil { + t.Fatalf("Bucket(%v).Create(%v, %+v) failed: %v", newBucketName, projectID, attrs, err) } attrs, err = b.Attrs(ctx) if err != nil { @@ -201,17 +202,17 @@ func TestIntegration_BucketMethods(t *testing.T) { t.Errorf("labels: got %v, want %v", got, want) } } - if err := client.Bucket(newBucket).Delete(ctx); err != nil { - t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) + if err := client.Bucket(newBucketName).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucketName, err) } } func TestIntegration_BucketUpdate(t *testing.T) { ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - b := client.Bucket(bucket) + b := client.Bucket(bucketName) attrs, err := b.Attrs(ctx) if err != nil { t.Fatal(err) @@ -278,10 +279,10 @@ func TestIntegration_BucketUpdate(t *testing.T) { func TestIntegration_ConditionalDelete(t *testing.T) { ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - o := client.Bucket(bucket).Object("conddel") + o := client.Bucket(bucketName).Object("conddel") wc := o.NewWriter(ctx) wc.ContentType = "text/plain" @@ -312,10 +313,10 @@ func TestIntegration_ConditionalDelete(t *testing.T) { func TestIntegration_Objects(t *testing.T) { // TODO(jba): Use subtests (Go 1.7). ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) const defaultType = "text/plain" @@ -502,9 +503,9 @@ func TestIntegration_Objects(t *testing.T) { copyObj, err := bkt.Object(copyName).CopierFrom(bkt.Object(objName)).Run(ctx) if err != nil { t.Errorf("Copier.Run failed with %v", err) - } else if !namesEqual(copyObj, bucket, copyName) { + } else if !namesEqual(copyObj, bucketName, copyName) { t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", - copyObj.Bucket, copyObj.Name, bucket, copyName) + copyObj.Bucket, copyObj.Name, bucketName, copyName) } // Copying with attributes. @@ -515,9 +516,9 @@ func TestIntegration_Objects(t *testing.T) { if err != nil { t.Errorf("Copier.Run failed with %v", err) } else { - if !namesEqual(copyObj, bucket, copyName) { + if !namesEqual(copyObj, bucketName, copyName) { t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", - copyObj.Bucket, copyObj.Name, bucket, copyName) + copyObj.Bucket, copyObj.Name, bucketName, copyName) } if copyObj.ContentEncoding != contentEncoding { t.Errorf("Copy ContentEncoding: got %q, want %q", copyObj.ContentEncoding, contentEncoding) @@ -632,7 +633,7 @@ func TestIntegration_Objects(t *testing.T) { t.Fatal(err) } - slurp, err := readObject(ctx, publicClient.Bucket(bucket).Object(publicObj)) + slurp, err := readObject(ctx, publicClient.Bucket(bucketName).Object(publicObj)) if err != nil { t.Errorf("readObject failed with %v", err) } else if !bytes.Equal(slurp, contents[publicObj]) { @@ -640,7 +641,7 @@ func TestIntegration_Objects(t *testing.T) { } // Test writer error handling. - wc := publicClient.Bucket(bucket).Object(publicObj).NewWriter(ctx) + wc := publicClient.Bucket(bucketName).Object(publicObj).NewWriter(ctx) if _, err := wc.Write([]byte("hello")); err != nil { t.Errorf("Write unexpectedly failed with %v", err) } @@ -744,10 +745,10 @@ func TestIntegration_SignedURL(t *testing.T) { } ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) obj := "signedURL" contents := []byte("This is a test of SignedURL.\n") md5 := "Jyxvgwm9n2MsrGTMPbMeYA==" // base64-encoded MD5 of contents @@ -805,7 +806,7 @@ func TestIntegration_SignedURL(t *testing.T) { opts.PrivateKey = jwtConf.PrivateKey opts.Method = "GET" opts.Expires = time.Now().Add(time.Hour) - u, err := SignedURL(bucket, obj, &opts) + u, err := SignedURL(bucketName, obj, &opts) if err != nil { t.Errorf("%s: SignedURL: %v", test.desc, err) continue @@ -843,10 +844,10 @@ func getURL(url string, headers map[string][]string) ([]byte, error) { func TestIntegration_ACL(t *testing.T) { ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) entity := ACLEntity("domain-google.com") rule := ACLRule{Entity: entity, Role: RoleReader} @@ -855,7 +856,7 @@ func TestIntegration_ACL(t *testing.T) { } acl, err := bkt.DefaultObjectACL().List(ctx) if err != nil { - t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucket, err) + t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucketName, err) } else if !hasRule(acl, rule) { t.Errorf("default ACL missing %#v", rule) } @@ -912,10 +913,10 @@ func hasRule(acl []ACLRule, rule ACLRule) bool { func TestIntegration_ValidObjectNames(t *testing.T) { ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) validNames := []string{ "gopher", @@ -949,10 +950,10 @@ func TestIntegration_ValidObjectNames(t *testing.T) { func TestIntegration_WriterContentType(t *testing.T) { ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - obj := client.Bucket(bucket).Object("content") + obj := client.Bucket(bucketName).Object("content") testCases := []struct { content string setType, wantType string @@ -994,10 +995,10 @@ func TestIntegration_WriterContentType(t *testing.T) { func TestIntegration_ZeroSizedObject(t *testing.T) { t.Parallel() ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - obj := client.Bucket(bucket).Object("zero") + obj := client.Bucket(bucketName).Object("zero") // Check writing it works as expected. w := obj.NewWriter(ctx) @@ -1021,10 +1022,10 @@ func TestIntegration_Encryption(t *testing.T) { // involving objects. Bucket and ACL operations aren't tested because they // aren't affected by customer encryption. Neither is deletion. ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - obj := client.Bucket(bucket).Object("customer-encryption") + obj := client.Bucket(bucketName).Object("customer-encryption") key := []byte("my-secret-AES-256-encryption-key") keyHash := sha256.Sum256(key) keyHashB64 := base64.StdEncoding.EncodeToString(keyHash[:]) @@ -1112,7 +1113,7 @@ func TestIntegration_Encryption(t *testing.T) { checkRead("first object", obj, key, contents) - obj2 := client.Bucket(bucket).Object("customer-encryption-2") + obj2 := client.Bucket(bucketName).Object("customer-encryption-2") // Copying an object without the key should fail. if _, err := obj2.CopierFrom(obj).Run(ctx); err == nil { t.Fatal("want error, got nil") @@ -1144,7 +1145,7 @@ func TestIntegration_Encryption(t *testing.T) { if _, err := obj2.Key(key).CopierFrom(obj2.Key(key2)).Run(ctx); err != nil { t.Fatal(err) } - obj3 := client.Bucket(bucket).Object("customer-encryption-3") + obj3 := client.Bucket(bucketName).Object("customer-encryption-3") // Composing without keys should fail. if _, err := obj3.ComposerFrom(obj, obj2).Run(ctx); err == nil { t.Fatal("want error, got nil") @@ -1175,10 +1176,10 @@ func TestIntegration_Encryption(t *testing.T) { func TestIntegration_NonexistentBucket(t *testing.T) { t.Parallel() ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket + "-nonexistent") + bkt := client.Bucket(uidSpace.New()) if _, err := bkt.Attrs(ctx); err != ErrBucketNotExist { t.Errorf("Attrs: got %v, want ErrBucketNotExist", err) } @@ -1194,10 +1195,10 @@ func TestIntegration_PerObjectStorageClass(t *testing.T) { newStorageClass = "MULTI_REGIONAL" ) ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) // The bucket should have the default storage class. battrs, err := bkt.Attrs(ctx) @@ -1254,16 +1255,16 @@ func TestIntegration_BucketInCopyAttrs(t *testing.T) { // call, but object name and content-type aren't, then we get an error. See // the comment in Copier.Run. ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) obj := bkt.Object("bucketInCopyAttrs") if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { t.Fatal(err) } copier := obj.CopierFrom(obj) - rawObject := copier.ObjectAttrs.toRawObject(bucket) + rawObject := copier.ObjectAttrs.toRawObject(bucketName) _, err := copier.callRewrite(ctx, rawObject) if err == nil { t.Errorf("got nil, want error") @@ -1273,7 +1274,7 @@ func TestIntegration_BucketInCopyAttrs(t *testing.T) { func TestIntegration_NoUnicodeNormalization(t *testing.T) { t.Parallel() ctx := context.Background() - client, _ := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() bkt := client.Bucket("storage-library-test-bucket") @@ -1303,12 +1304,12 @@ func TestIntegration_HashesOnUpload(t *testing.T) { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) if client == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } defer client.Close() - obj := client.Bucket(bucket).Object("hashesOnUpload-1") + obj := client.Bucket(bucketName).Object("hashesOnUpload-1") data := []byte("I can't wait to be verified") write := func(w *Writer) error { @@ -1363,10 +1364,10 @@ func TestIntegration_HashesOnUpload(t *testing.T) { func TestIntegration_BucketIAM(t *testing.T) { ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) // This bucket is unique to this test run. So we don't have // to worry about other runs interfering with our IAM policy @@ -1439,10 +1440,10 @@ func TestIntegration_RequesterPays(t *testing.T) { const wantErrorCode = 400 ctx := context.Background() - client, bucketName := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bucketName += "-rp" - b := client.Bucket(bucketName) + bucketName2 := uidSpace.New() + b := client.Bucket(bucketName2) projID := testutil.ProjID() // Use Firestore project as a project that does not contain the bucket. otherProjID := os.Getenv(envFirestoreProjID) @@ -1458,7 +1459,7 @@ func TestIntegration_RequesterPays(t *testing.T) { t.Fatal(err) } defer otherClient.Close() - ob := otherClient.Bucket(bucketName) + ob := otherClient.Bucket(bucketName2) user, err := keyFileEmail(os.Getenv("GCLOUD_TESTS_GOLANG_KEY")) if err != nil { t.Fatal(err) @@ -1663,9 +1664,9 @@ func keyFileEmail(filename string) (string, error) { func TestNotifications(t *testing.T) { ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) checkNotifications := func(msg string, want map[string]*Notification) { got, err := bkt.Notifications(ctx) @@ -1752,7 +1753,7 @@ func TestIntegration_Public(t *testing.T) { } // Reading from or writing to a non-public bucket fails. - c, bucketName := testConfig(ctx, t) + c := testConfig(ctx, t) defer c.Close() nonPublicObj := client.Bucket(bucketName).Object("noauth") // Oddly, reading returns 403 but writing returns 401. @@ -1908,9 +1909,9 @@ func TestIntegration_CancelWrite(t *testing.T) { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() - client, bucket := testConfig(ctx, t) + client := testConfig(ctx, t) defer client.Close() - bkt := client.Bucket(bucket) + bkt := client.Bucket(bucketName) cctx, cancel := context.WithCancel(ctx) defer cancel() @@ -1939,6 +1940,251 @@ func TestIntegration_CancelWrite(t *testing.T) { } } +func TestIntegration_UpdateCORS(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + initialSettings := []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"POST"}, + Origins: []string{"some-origin.com"}, + ResponseHeaders: []string{"foo-bar"}, + }, + } + + for _, test := range []struct { + input []CORS + want []CORS + }{ + { + input: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"GET"}, + Origins: []string{"*"}, + ResponseHeaders: []string{"some-header"}, + }, + }, + want: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"GET"}, + Origins: []string{"*"}, + ResponseHeaders: []string{"some-header"}, + }, + }, + }, + { + input: []CORS{}, + want: nil, + }, + { + input: nil, + want: []CORS{ + { + MaxAge: time.Hour, + Methods: []string{"POST"}, + Origins: []string{"some-origin.com"}, + ResponseHeaders: []string{"foo-bar"}, + }, + }, + }, + } { + bkt := client.Bucket(uidSpace.New()) + defer func(b *BucketHandle) { + err := b.Delete(ctx) + if err != nil { + t.Fatal(err) + } + }(bkt) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{CORS: initialSettings}) + if err != nil { + t.Fatal(err) + } + + _, err = bkt.Update(ctx, BucketAttrsToUpdate{CORS: test.input}) + if err != nil { + t.Fatal(err) + } + + attrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + + if diff := testutil.Diff(attrs.CORS, test.want); diff != "" { + t.Errorf("input: %v\ngot=-, want=+:\n%s", test.input, diff) + } + } +} + +func TestIntegration_UpdateRetentionPolicy(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + initial := &RetentionPolicy{RetentionPeriod: time.Minute} + + for _, test := range []struct { + input *RetentionPolicy + want *RetentionPolicy + }{ + { // Update + input: &RetentionPolicy{RetentionPeriod: time.Hour}, + want: &RetentionPolicy{RetentionPeriod: time.Hour}, + }, + { // Update even with timestamp (EffectiveTime should be ignored) + input: &RetentionPolicy{RetentionPeriod: time.Hour, EffectiveTime: time.Now()}, + want: &RetentionPolicy{RetentionPeriod: time.Hour}, + }, + { // Remove + input: &RetentionPolicy{}, + want: nil, + }, + { // Remove even with timestamp (EffectiveTime should be ignored) + input: &RetentionPolicy{EffectiveTime: time.Now()}, + want: nil, + }, + { // Ignore + input: nil, + want: initial, + }, + } { + bkt := client.Bucket(uidSpace.New()) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{RetentionPolicy: initial}) + if err != nil { + t.Fatal(err) + } + + defer func() { + if err := bkt.Delete(ctx); err != nil { + t.Fatal(err) + } + }() + + _, err = bkt.Update(ctx, BucketAttrsToUpdate{RetentionPolicy: test.input}) + if err != nil { + t.Fatal(err) + } + + attrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + + if attrs.RetentionPolicy != nil && attrs.RetentionPolicy.EffectiveTime.Unix() == 0 { + // Should be set by the server and parsed by the client + t.Fatal("EffectiveTime should be set, but it was not") + } + if diff := testutil.Diff(attrs.RetentionPolicy, test.want, cmpopts.IgnoreTypes(time.Time{})); diff != "" { + t.Errorf("input: %v\ngot=-, want=+:\n%s", test.input, diff) + } + } +} + +func TestIntegration_DeleteObjectInBucketWithRetentionPolicy(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(uidSpace.New()) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: 25 * time.Hour}}) + if err != nil { + t.Fatal(err) + } + + oh := bkt.Object("some-object") + if err = writeObject(ctx, oh, "text/plain", []byte("hello world")); err != nil { + t.Fatal(err) + } + + err = oh.Delete(ctx) + if err == nil { + t.Fatal("expected to err deleting an object in a bucket with retention period, but got nil") + } + + // Remove the retention period + _, err = bkt.Update(ctx, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{RetentionPeriod: 0}}) + if err != nil { + t.Fatal(err) + } + + err = oh.Delete(ctx) + if err != nil { + t.Fatal(err) + } + + if err := bkt.Delete(ctx); err != nil { + t.Fatal(err) + } +} + +func TestIntegration_LockBucket(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(uidSpace.New()) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour * 25}}) + if err != nil { + t.Fatal(err) + } + + attrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + + err = bkt.If(BucketConditions{MetagenerationMatch: attrs.MetaGeneration}).LockRetentionPolicy(ctx) + if err != nil { + t.Fatal("could not lock", err) + } + + _, err = bkt.Update(ctx, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour}}) + if err == nil { + t.Fatal("Expected error updating locked bucket, got nil") + } +} + +func TestIntegration_LockBucket_MetagenerationRequired(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(uidSpace.New()) + err := bkt.Create(ctx, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour * 25}}) + if err != nil { + t.Fatal(err) + } + + err = bkt.LockRetentionPolicy(ctx) + if err == nil { + t.Fatal("expected error locking bucket without metageneration condition, got nil") + } +} + func writeObject(ctx context.Context, obj *ObjectHandle, contentType string, contents []byte) error { w := obj.NewWriter(ctx) w.ContentType = contentType @@ -1968,12 +2214,12 @@ func cleanup() error { return nil // Don't clean up in short mode. } ctx := context.Background() - client, bucket := config(ctx) + client := config(ctx) if client == nil { return nil // Don't cleanup if we're not configured correctly. } defer client.Close() - if err := killBucket(ctx, client, bucket); err != nil { + if err := killBucket(ctx, client, bucketName); err != nil { return err } @@ -1983,7 +2229,7 @@ func cleanup() error { const expireAge = 24 * time.Hour projectID := testutil.ProjID() it := client.Buckets(ctx, projectID) - it.Prefix = projectID + testPrefix + it.Prefix = testPrefix for { bktAttrs, err := it.Next() if err == iterator.Done { diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index b95dd453a..861bcab33 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -19,6 +19,7 @@ import ( "fmt" "regexp" + "cloud.google.com/go/internal/trace" "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" ) @@ -118,7 +119,10 @@ func toRawNotification(n *Notification) *raw.Notification { // AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID // and PayloadFormat, and must not set its ID. The other fields are all optional. The // returned Notification's ID can be used to refer to it. -func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (*Notification, error) { +func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (_ *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") + defer func() { trace.EndSpan(ctx, err) }() + if n.ID != "" { return nil, errors.New("storage: AddNotification: ID must not be set") } @@ -142,14 +146,16 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (*N // Notifications returns all the Notifications configured for this bucket, as a map // indexed by notification ID. -func (b *BucketHandle) Notifications(ctx context.Context) (map[string]*Notification, error) { +func (b *BucketHandle) Notifications(ctx context.Context) (_ map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") + defer func() { trace.EndSpan(ctx, err) }() + call := b.c.raw.Notifications.List(b.name) setClientHeader(call.Header()) if b.userProject != "" { call.UserProject(b.userProject) } var res *raw.Notifications - var err error err = runWithRetry(ctx, func() error { res, err = call.Context(ctx).Do() return err @@ -169,7 +175,10 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification { } // DeleteNotification deletes the notification with the given ID. -func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) error { +func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + call := b.c.raw.Notifications.Delete(b.name, id) setClientHeader(call.Header()) if b.userProject != "" { diff --git a/vendor/cloud.google.com/go/storage/notifications_test.go b/vendor/cloud.google.com/go/storage/notifications_test.go index d4dcf6bc9..3d40cea27 100644 --- a/vendor/cloud.google.com/go/storage/notifications_test.go +++ b/vendor/cloud.google.com/go/storage/notifications_test.go @@ -18,6 +18,7 @@ import ( "testing" "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" ) @@ -89,7 +90,7 @@ func TestAddNotificationsErrors(t *testing.T) { {TopicProjectID: "p"}, // missing TopicID {TopicID: "t"}, // missing TopicProjectID } { - _, err := b.AddNotification(nil, n) + _, err := b.AddNotification(context.Background(), n) if err == nil { t.Errorf("%+v: got nil, want error", n) } diff --git a/vendor/cloud.google.com/go/storage/oc_test.go b/vendor/cloud.google.com/go/storage/oc_test.go new file mode 100644 index 000000000..144137fed --- /dev/null +++ b/vendor/cloud.google.com/go/storage/oc_test.go @@ -0,0 +1,54 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package storage + +import ( + "testing" + + "go.opencensus.io/trace" + "golang.org/x/net/context" +) + +func TestIntegration_OCTracing(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + te := &testExporter{} + trace.RegisterExporter(te) + defer trace.UnregisterExporter(te) + trace.SetDefaultSampler(trace.AlwaysSample()) + + ctx := context.Background() + client := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucketName) + bkt.Attrs(ctx) + + if len(te.spans) != 1 { + t.Fatalf("Expected 1 span to be created, but got %d", len(te.spans)) + } +} + +type testExporter struct { + spans []*trace.SpanData +} + +func (te *testExporter) ExportSpan(s *trace.SpanData) { + te.spans = append(te.spans, s) +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 68b3c3bf4..fba456e29 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -15,13 +15,164 @@ package storage import ( + "errors" "fmt" "hash/crc32" "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "cloud.google.com/go/internal/trace" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" ) var crc32cTable = crc32.MakeTable(crc32.Castagnoli) +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (_ *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + RawQuery: conditionsQuery(o.gen, o.conds), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = withContext(req, ctx) + if length < 0 && offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } + if o.userProject != "" { + req.Header.Set("X-Goog-User-Project", o.userProject) + } + if o.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { + return nil, err + } + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + return nil + }) + if err != nil { + return nil, err + } + + var size int64 // total size of object, even if a range was requested. + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + } else { + size = res.ContentLength + } + + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var ( + checkCRC bool + crc uint32 + ) + // Even if there is a CRC header, we can't compute the hash on partial data. + if remain == size { + crc, checkCRC = parseCRC32c(res) + } + return &Reader{ + body: body, + size: size, + remain: remain, + contentType: res.Header.Get("Content-Type"), + contentEncoding: res.Header.Get("Content-Encoding"), + cacheControl: res.Header.Get("Cache-Control"), + wantCRC: crc, + checkCRC: checkCRC, + }, nil +} + +func parseCRC32c(res *http.Response) (uint32, bool) { + const prefix = "crc32c=" + for _, spec := range res.Header["X-Goog-Hash"] { + if strings.HasPrefix(spec, prefix) { + c, err := decodeUint32(spec[len(prefix):]) + if err == nil { + return c, true + } + } + } + return 0, false +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + // Reader reads a Cloud Storage object. // It implements io.Reader. // diff --git a/vendor/cloud.google.com/go/storage/reader_test.go b/vendor/cloud.google.com/go/storage/reader_test.go new file mode 100644 index 000000000..de734c636 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader_test.go @@ -0,0 +1,112 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "io/ioutil" + "net/http" + "strconv" + "strings" + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +const readData = "0123456789" + +func TestRangeReader(t *testing.T) { + hc, close := newTestServer(handleRangeRead) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + obj := c.Bucket("b").Object("o") + for _, test := range []struct { + offset, length int64 + want string + }{ + {0, -1, readData}, + {0, 10, readData}, + {0, 5, readData[:5]}, + {1, 3, readData[1:4]}, + {6, -1, readData[6:]}, + {4, 20, readData[4:]}, + } { + r, err := obj.NewRangeReader(ctx, test.offset, test.length) + if err != nil { + t.Errorf("%d/%d: %v", test.offset, test.length, err) + continue + } + gotb, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("%d/%d: %v", test.offset, test.length, err) + continue + } + if got := string(gotb); got != test.want { + t.Errorf("%d/%d: got %q, want %q", test.offset, test.length, got, test.want) + } + } +} + +func handleRangeRead(w http.ResponseWriter, r *http.Request) { + rh := strings.TrimSpace(r.Header.Get("Range")) + data := readData + var from, to int + if rh == "" { + from = 0 + to = len(data) + } else { + // assume "bytes=N-" or "bytes=N-M" + var err error + i := strings.IndexRune(rh, '=') + j := strings.IndexRune(rh, '-') + from, err = strconv.Atoi(rh[i+1 : j]) + if err != nil { + w.WriteHeader(500) + return + } + to = len(data) + if j+1 < len(rh) { + to, err = strconv.Atoi(rh[j+1:]) + if err != nil { + w.WriteHeader(500) + return + } + to++ // Range header is inclusive, Go slice is exclusive + } + if from >= len(data) && to != from { + w.WriteHeader(416) + return + } + if from > len(data) { + from = len(data) + } + if to > len(data) { + to = len(data) + } + } + data = data[from:to] + if data != readData { + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", from, to-1, len(readData))) + w.WriteHeader(http.StatusPartialContent) + } + if _, err := w.Write([]byte(data)); err != nil { + panic(err) + } +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 0723f07ef..9f324f449 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -26,7 +26,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "reflect" @@ -37,6 +36,7 @@ import ( "time" "unicode/utf8" + "cloud.google.com/go/internal/trace" "google.golang.org/api/option" htransport "google.golang.org/api/transport/http" @@ -368,7 +368,10 @@ func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { // Attrs returns meta information about the object. // ErrObjectNotExist will be returned if the object is not found. -func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { +func (o *ObjectHandle) Attrs(ctx context.Context) (_ *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + if err := o.validate(); err != nil { return nil, err } @@ -383,7 +386,6 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { return nil, err } var obj *raw.Object - var err error setClientHeader(call.Header()) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { @@ -398,7 +400,10 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { // Update updates an object with the provided attributes. // All zero-value attributes are ignored. // ErrObjectNotExist will be returned if the object is not found. -func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) { +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (_ *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") + defer func() { trace.EndSpan(ctx, err) }() + if err := o.validate(); err != nil { return nil, err } @@ -466,7 +471,6 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( return nil, err } var obj *raw.Object - var err error setClientHeader(call.Header()) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { @@ -532,144 +536,6 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { return &o2 } -// NewReader creates a new Reader to read the contents of the -// object. -// ErrObjectNotExist will be returned if the object is not found. -// -// The caller must call Close on the returned Reader when done reading. -func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { - return o.NewRangeReader(ctx, 0, -1) -} - -// NewRangeReader reads part of an object, reading at most length bytes -// starting at the given offset. If length is negative, the object is read -// until the end. -func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) { - if err := o.validate(); err != nil { - return nil, err - } - if offset < 0 { - return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) - } - if o.conds != nil { - if err := o.conds.validate("NewRangeReader"); err != nil { - return nil, err - } - } - u := &url.URL{ - Scheme: "https", - Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), - RawQuery: conditionsQuery(o.gen, o.conds), - } - verb := "GET" - if length == 0 { - verb = "HEAD" - } - req, err := http.NewRequest(verb, u.String(), nil) - if err != nil { - return nil, err - } - req = withContext(req, ctx) - if length < 0 && offset > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - } else if length > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - } - if o.userProject != "" { - req.Header.Set("X-Goog-User-Project", o.userProject) - } - if o.readCompressed { - req.Header.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { - return nil, err - } - var res *http.Response - err = runWithRetry(ctx, func() error { - res, err = o.c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - return nil - }) - if err != nil { - return nil, err - } - - var size int64 // total size of object, even if a range was requested. - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - } else { - size = res.ContentLength - } - - remain := res.ContentLength - body := res.Body - if length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var ( - checkCRC bool - crc uint32 - ) - // Even if there is a CRC header, we can't compute the hash on partial data. - if remain == size { - crc, checkCRC = parseCRC32c(res) - } - return &Reader{ - body: body, - size: size, - remain: remain, - contentType: res.Header.Get("Content-Type"), - contentEncoding: res.Header.Get("Content-Encoding"), - cacheControl: res.Header.Get("Cache-Control"), - wantCRC: crc, - checkCRC: checkCRC, - }, nil -} - -func parseCRC32c(res *http.Response) (uint32, bool) { - const prefix = "crc32c=" - for _, spec := range res.Header["X-Goog-Hash"] { - if strings.HasPrefix(spec, prefix) { - c, err := decodeUint32(spec[len(prefix):]) - if err == nil { - return c, true - } - } - } - return 0, false -} - -var emptyBody = ioutil.NopCloser(strings.NewReader("")) - // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 534ba567b..21d146f0f 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "sync" "unicode/utf8" "golang.org/x/net/context" @@ -47,8 +48,11 @@ type Writer struct { // to the nearest multiple of 256K. If zero, chunking will be disabled and // the object will be uploaded in a single request. // - // ChunkSize will default to a reasonable value. Any custom configuration - // must be done before the first Write call. + // ChunkSize will default to a reasonable value. If you perform many concurrent + // writes of small objects, you may wish set ChunkSize to a value that matches + // your objects' sizes to avoid consuming large amounts of memory. + // + // ChunkSize must be set before the first Write call. ChunkSize int // ProgressFunc can be used to monitor the progress of a large write. @@ -68,8 +72,10 @@ type Writer struct { pw *io.PipeWriter donec chan struct{} // closed after err and obj are set. - err error obj *ObjectAttrs + + mu sync.Mutex + err error } func (w *Writer) open() error { @@ -114,8 +120,10 @@ func (w *Writer) open() error { call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) } if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { + w.mu.Lock() w.err = err - pr.CloseWithError(w.err) + w.mu.Unlock() + pr.CloseWithError(err) return } var resp *raw.Object @@ -142,8 +150,10 @@ func (w *Writer) open() error { } } if err != nil { + w.mu.Lock() w.err = err - pr.CloseWithError(w.err) + w.mu.Unlock() + pr.CloseWithError(err) return } w.obj = newObject(resp) @@ -158,8 +168,11 @@ func (w *Writer) open() error { // use the error returned from Writer.Close to determine if // the upload was successful. func (w *Writer) Write(p []byte) (n int, err error) { - if w.err != nil { - return 0, w.err + w.mu.Lock() + werr := w.err + w.mu.Unlock() + if werr != nil { + return 0, werr } if !w.opened { if err := w.open(); err != nil { @@ -182,6 +195,8 @@ func (w *Writer) Close() error { return err } <-w.donec + w.mu.Lock() + defer w.mu.Unlock() return w.err } diff --git a/vendor/cloud.google.com/go/storage/writer_test.go b/vendor/cloud.google.com/go/storage/writer_test.go index c04b4a5b3..c1f73809a 100644 --- a/vendor/cloud.google.com/go/storage/writer_test.go +++ b/vendor/cloud.google.com/go/storage/writer_test.go @@ -28,6 +28,7 @@ import ( "golang.org/x/net/context" + "google.golang.org/api/googleapi" "google.golang.org/api/option" ) @@ -144,3 +145,30 @@ func TestEncryption(t *testing.T) { t.Errorf("hash: got\n%v, want\n%v", gotHash, wantHash) } } + +// This test demonstrates the data race on Writer.err that can happen when the +// Writer's context is cancelled. To see the race, comment out the w.mu.Lock/Unlock +// lines in writer.go and run this test with -race. +func TestRaceOnCancel(t *testing.T) { + ctx := context.Background() + ft := &fakeTransport{} + hc := &http.Client{Transport: ft} + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatalf("error when creating client: %v", err) + } + + cctx, cancel := context.WithCancel(ctx) + w := client.Bucket("b").Object("o").NewWriter(cctx) + w.ChunkSize = googleapi.MinUploadChunkSize + buf := make([]byte, w.ChunkSize) + // This Write starts the goroutine in Writer.open. That reads the first chunk in its entirety + // before sending the request (see google.golang.org/api/gensupport.PrepareUpload), + // so to exhibit the race we must provide ChunkSize bytes. The goroutine then makes the RPC (L137). + w.Write(buf) + // Canceling the context causes the call to return context.Canceled, which makes the open goroutine + // write to w.err (L151). + cancel() + // This call to Write concurrently reads w.err (L169). + w.Write([]byte(nil)) +} diff --git a/vendor/cloud.google.com/go/trace/trace.go b/vendor/cloud.google.com/go/trace/trace.go index 2e50d2d30..df7020256 100644 --- a/vendor/cloud.google.com/go/trace/trace.go +++ b/vendor/cloud.google.com/go/trace/trace.go @@ -12,7 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This package is OBSOLETE. See https://godoc.org/go.opencensus.io/trace. +// This package is OBSOLETE. See https://godoc.org/go.opencensus.io/trace; and use +// OpenCensus Stackdriver exporter, https://godoc.org/go.opencensus.io/exporter/stackdriver. // // Package trace is a Google Stackdriver Trace library. // @@ -176,11 +177,11 @@ const ( spanKindServer = `RPC_SERVER` spanKindUnspecified = `SPAN_KIND_UNSPECIFIED` maxStackFrames = 20 + labelAgent = `trace.cloud.google.com/agent` ) // Stackdriver Trace API predefined labels. const ( - LabelAgent = `trace.cloud.google.com/agent` LabelComponent = `trace.cloud.google.com/component` LabelErrorMessage = `trace.cloud.google.com/error/message` LabelErrorName = `trace.cloud.google.com/error/name` @@ -574,6 +575,7 @@ func (t *trace) constructTrace(spans []*Span) *api.Trace { if sp.statusCode != 0 { sp.SetLabel(LabelHTTPStatusCode, strconv.Itoa(sp.statusCode)) } + sp.SetLabel(labelAgent, userAgent) apiSpans[i] = &sp.span } diff --git a/vendor/cloud.google.com/go/vision/apiv1/client.go b/vendor/cloud.google.com/go/vision/apiv1/client.go index cc91032e1..1bfdbfafb 100644 --- a/vendor/cloud.google.com/go/vision/apiv1/client.go +++ b/vendor/cloud.google.com/go/vision/apiv1/client.go @@ -15,11 +15,11 @@ package vision import ( - gax "github.com/googleapis/gax-go" + "github.com/googleapis/gax-go" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" - "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // AnnotateImage runs image detection and annotation for a single image. @@ -50,7 +50,7 @@ func (c *ImageAnnotatorClient) annotateOne(ctx context.Context, img *pb.Image, i // error because it preserves the code as a separate field. // TODO(jba): preserve the details field. if res.Error != nil { - return nil, grpc.Errorf(codes.Code(res.Error.Code), "%s", res.Error.Message) + return nil, status.Errorf(codes.Code(res.Error.Code), "%s", res.Error.Message) } return res, nil } diff --git a/vendor/cloud.google.com/go/vision/apiv1/client_test.go b/vendor/cloud.google.com/go/vision/apiv1/client_test.go index c6f0ba0bf..d656afe9a 100644 --- a/vendor/cloud.google.com/go/vision/apiv1/client_test.go +++ b/vendor/cloud.google.com/go/vision/apiv1/client_test.go @@ -75,52 +75,52 @@ func TestClientMethods(t *testing.T) { }, { func() (interface{}, error) { return c.DetectFaces(ctx, img, ictx, 2) }, - []*pb.Feature{{pb.Feature_FACE_DETECTION, 2}}, + []*pb.Feature{{Type: pb.Feature_FACE_DETECTION, MaxResults: 2}}, batchResponse.Responses[0].FaceAnnotations, }, { func() (interface{}, error) { return c.DetectLandmarks(ctx, img, ictx, 2) }, - []*pb.Feature{{pb.Feature_LANDMARK_DETECTION, 2}}, + []*pb.Feature{{Type: pb.Feature_LANDMARK_DETECTION, MaxResults: 2}}, batchResponse.Responses[0].LandmarkAnnotations, }, { func() (interface{}, error) { return c.DetectLogos(ctx, img, ictx, 2) }, - []*pb.Feature{{pb.Feature_LOGO_DETECTION, 2}}, + []*pb.Feature{{Type: pb.Feature_LOGO_DETECTION, MaxResults: 2}}, batchResponse.Responses[0].LogoAnnotations, }, { func() (interface{}, error) { return c.DetectLabels(ctx, img, ictx, 2) }, - []*pb.Feature{{pb.Feature_LABEL_DETECTION, 2}}, + []*pb.Feature{{Type: pb.Feature_LABEL_DETECTION, MaxResults: 2}}, batchResponse.Responses[0].LabelAnnotations, }, { func() (interface{}, error) { return c.DetectTexts(ctx, img, ictx, 2) }, - []*pb.Feature{{pb.Feature_TEXT_DETECTION, 2}}, + []*pb.Feature{{Type: pb.Feature_TEXT_DETECTION, MaxResults: 2}}, batchResponse.Responses[0].TextAnnotations, }, { func() (interface{}, error) { return c.DetectDocumentText(ctx, img, ictx) }, - []*pb.Feature{{pb.Feature_DOCUMENT_TEXT_DETECTION, 0}}, + []*pb.Feature{{Type: pb.Feature_DOCUMENT_TEXT_DETECTION, MaxResults: 0}}, batchResponse.Responses[0].FullTextAnnotation, }, { func() (interface{}, error) { return c.DetectSafeSearch(ctx, img, ictx) }, - []*pb.Feature{{pb.Feature_SAFE_SEARCH_DETECTION, 0}}, + []*pb.Feature{{Type: pb.Feature_SAFE_SEARCH_DETECTION, MaxResults: 0}}, batchResponse.Responses[0].SafeSearchAnnotation, }, { func() (interface{}, error) { return c.DetectImageProperties(ctx, img, ictx) }, - []*pb.Feature{{pb.Feature_IMAGE_PROPERTIES, 0}}, + []*pb.Feature{{Type: pb.Feature_IMAGE_PROPERTIES, MaxResults: 0}}, batchResponse.Responses[0].ImagePropertiesAnnotation, }, { func() (interface{}, error) { return c.DetectWeb(ctx, img, ictx) }, - []*pb.Feature{{pb.Feature_WEB_DETECTION, 0}}, + []*pb.Feature{{Type: pb.Feature_WEB_DETECTION, MaxResults: 0}}, batchResponse.Responses[0].WebDetection, }, { func() (interface{}, error) { return c.CropHints(ctx, img, ictx) }, - []*pb.Feature{{pb.Feature_CROP_HINTS, 0}}, + []*pb.Feature{{Type: pb.Feature_CROP_HINTS, MaxResults: 0}}, batchResponse.Responses[0].CropHintsAnnotation, }, } { diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go index 6fdf83799..f25ee7a3c 100644 --- a/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go @@ -16,7 +16,8 @@ // Package vision is an auto-generated package for the // Google Cloud Vision API. - +// +// NOTE: This package is in beta. It is not stable, and may be subject to changes. // // Integrates Google Vision features, including image labeling, face, logo, // and