From 0e7e3cb71439ae6e56cf6efd2c9cc3575e00aa15 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 8 Dec 2017 20:45:59 +0100 Subject: [PATCH] Update minio-go --- Gopkg.lock | 6 +- Gopkg.toml | 4 - vendor/github.com/minio/minio-go/.gitignore | 3 +- vendor/github.com/minio/minio-go/.travis.yml | 26 +- .../github.com/minio/minio-go/MAINTAINERS.md | 27 +- vendor/github.com/minio/minio-go/Makefile | 17 + vendor/github.com/minio/minio-go/NOTICE | 2 + vendor/github.com/minio/minio-go/README.md | 28 +- .../minio/minio-go/api-compose-object.go | 89 +- .../minio/minio-go/api-compose-object_test.go | 3 +- .../minio/minio-go/api-datatypes.go | 3 +- .../minio/minio-go/api-error-response.go | 71 +- .../minio/minio-go/api-error-response_test.go | 108 +- ...sport_1_5.go => api-get-object-context.go} | 25 +- .../minio/minio-go/api-get-object-file.go | 35 +- .../minio/minio-go/api-get-object.go | 101 +- ...{request-headers.go => api-get-options.go} | 69 +- .../minio/minio-go/api-get-policy.go | 12 +- vendor/github.com/minio/minio-go/api-list.go | 40 +- .../minio/minio-go/api-notification.go | 27 +- .../minio/minio-go/api-presigned.go | 56 +- .../minio/minio-go/api-put-bucket.go | 45 +- .../minio/minio-go/api-put-object-common.go | 34 +- .../minio/minio-go/api-put-object-context.go | 39 + .../minio/minio-go/api-put-object-copy.go | 3 +- .../minio-go/api-put-object-encrypted.go | 22 +- .../minio-go/api-put-object-file-context.go | 64 + .../minio/minio-go/api-put-object-file.go | 51 +- .../minio-go/api-put-object-multipart.go | 97 +- .../minio-go/api-put-object-streaming.go | 111 +- .../minio/minio-go/api-put-object.go | 236 +- .../minio/minio-go/api-put-object_test.go | 53 + .../github.com/minio/minio-go/api-remove.go | 46 +- .../minio/minio-go/api-s3-datatypes.go | 5 +- vendor/github.com/minio/minio-go/api-stat.go | 32 +- vendor/github.com/minio/minio-go/api.go | 53 +- .../minio/minio-go/api_unit_test.go | 132 +- .../github.com/minio/minio-go/bucket-cache.go | 7 +- .../minio/minio-go/bucket-cache_test.go | 9 +- .../minio/minio-go/bucket-notification.go | 3 +- vendor/github.com/minio/minio-go/constants.go | 5 +- vendor/github.com/minio/minio-go/core.go | 60 +- vendor/github.com/minio/minio-go/core_test.go | 189 +- vendor/github.com/minio/minio-go/docs/API.md | 804 ++-- .../minio/minio-go/docs/checker.go.template | 21 + .../minio/minio-go/docs/validator.go | 227 ++ .../minio/listenbucketnotification.go | 3 +- .../minio-go/examples/s3/bucketexists.go | 3 +- .../minio-go/examples/s3/composeobject.go | 3 +- .../minio/minio-go/examples/s3/copyobject.go | 3 +- .../examples/s3/fgetobject-context.go | 54 + .../minio/minio-go/examples/s3/fgetobject.go | 5 +- .../examples/s3/fputencrypted-object.go | 80 + .../examples/s3/fputobject-context.go | 53 + .../minio/minio-go/examples/s3/fputobject.go | 7 +- .../examples/s3/get-encrypted-object.go | 3 +- .../examples/s3/getbucketnotification.go | 3 +- .../minio-go/examples/s3/getbucketpolicy.go | 3 +- .../minio-go/examples/s3/getobject-context.go | 73 + .../minio/minio-go/examples/s3/getobject.go | 5 +- .../examples/s3/listbucketpolicies.go | 3 +- .../minio/minio-go/examples/s3/listbuckets.go | 3 +- .../examples/s3/listincompleteuploads.go | 3 +- .../minio-go/examples/s3/listobjects-N.go | 3 +- .../minio/minio-go/examples/s3/listobjects.go | 3 +- .../minio-go/examples/s3/listobjectsV2.go | 3 +- .../minio/minio-go/examples/s3/makebucket.go | 3 +- .../examples/s3/presignedgetobject.go | 3 +- .../examples/s3/presignedheadobject.go | 3 +- .../examples/s3/presignedpostpolicy.go | 3 +- .../examples/s3/presignedputobject.go | 3 +- .../examples/s3/put-encrypted-object.go | 5 +- .../minio-go/examples/s3/putobject-context.go | 68 + .../examples/s3/putobject-getobject-sse.go | 20 +- .../examples/s3/putobject-progress.go | 9 +- .../examples/s3/putobject-s3-accelerate.go | 10 +- .../examples/s3/putobject-streaming.go | 5 +- .../minio/minio-go/examples/s3/putobject.go | 9 +- .../s3/removeallbucketnotification.go | 3 +- .../minio-go/examples/s3/removebucket.go | 3 +- .../examples/s3/removeincompleteupload.go | 3 +- .../minio-go/examples/s3/removeobject.go | 3 +- .../minio-go/examples/s3/removeobjects.go | 12 +- .../examples/s3/setbucketnotification.go | 3 +- .../minio-go/examples/s3/setbucketpolicy.go | 3 +- .../minio/minio-go/examples/s3/statobject.go | 5 +- .../minio/minio-go/functional_tests.go | 3579 ++++++++++++----- ...st-headers_test.go => get-options_test.go} | 11 +- .../github.com/minio/minio-go/hook-reader.go | 3 +- .../minio/minio-go/pkg/credentials/chain.go | 2 +- .../minio-go/pkg/credentials/chain_test.go | 2 +- .../minio-go/pkg/credentials/credentials.go | 2 +- .../pkg/credentials/credentials_test.go | 2 +- .../minio/minio-go/pkg/credentials/doc.go | 17 + .../minio/minio-go/pkg/credentials/env_aws.go | 2 +- .../minio-go/pkg/credentials/env_minio.go | 2 +- .../minio-go/pkg/credentials/env_test.go | 2 +- .../pkg/credentials/file_aws_credentials.go | 2 +- .../pkg/credentials/file_minio_client.go | 2 +- .../minio-go/pkg/credentials/file_test.go | 2 +- .../minio/minio-go/pkg/credentials/iam_aws.go | 17 +- .../minio-go/pkg/credentials/iam_aws_test.go | 17 + .../pkg/credentials/signature-type.go | 3 +- .../minio/minio-go/pkg/credentials/static.go | 2 +- .../minio-go/pkg/credentials/static_test.go | 2 +- .../minio/minio-go/pkg/encrypt/cbc.go | 3 +- .../minio/minio-go/pkg/encrypt/interface.go | 3 +- .../minio/minio-go/pkg/encrypt/keys.go | 3 +- .../pkg/policy/bucket-policy-condition.go | 3 +- .../policy/bucket-policy-condition_test.go | 3 +- .../minio-go/pkg/policy/bucket-policy.go | 3 +- .../minio-go/pkg/policy/bucket-policy_test.go | 3 +- .../s3signer/request-signature-streaming.go | 7 +- .../request-signature-streaming_test.go | 7 +- .../pkg/s3signer/request-signature-v2.go | 46 +- .../pkg/s3signer/request-signature-v2_test.go | 3 +- .../pkg/s3signer/request-signature-v4.go | 3 +- .../pkg/s3signer/request-signature_test.go | 3 +- .../minio-go/pkg/s3signer/test-utils_test.go | 3 +- .../minio/minio-go/pkg/s3signer/utils.go | 3 +- .../minio/minio-go/pkg/s3signer/utils_test.go | 3 +- .../minio/minio-go/pkg/s3utils/utils.go | 3 +- .../minio/minio-go/pkg/s3utils/utils_test.go | 3 +- .../minio/minio-go/pkg/set/stringset.go | 3 +- .../minio/minio-go/pkg/set/stringset_test.go | 3 +- .../github.com/minio/minio-go/post-policy.go | 39 + .../minio/minio-go/retry-continous.go | 17 + vendor/github.com/minio/minio-go/retry.go | 3 +- .../github.com/minio/minio-go/s3-endpoints.go | 3 +- vendor/github.com/minio/minio-go/s3-error.go | 3 +- .../minio/minio-go/test-utils_test.go | 3 +- vendor/github.com/minio/minio-go/transport.go | 2 +- .../minio/minio-go/transport_1_6.go | 40 - vendor/github.com/minio/minio-go/utils.go | 93 +- .../github.com/minio/minio-go/utils_test.go | 106 +- 135 files changed, 5327 insertions(+), 2356 deletions(-) create mode 100644 vendor/github.com/minio/minio-go/Makefile create mode 100644 vendor/github.com/minio/minio-go/NOTICE rename vendor/github.com/minio/minio-go/{transport_1_5.go => api-get-object-context.go} (51%) rename vendor/github.com/minio/minio-go/{request-headers.go => api-get-options.go} (53%) create mode 100644 vendor/github.com/minio/minio-go/api-put-object-context.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-file-context.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object_test.go create mode 100644 vendor/github.com/minio/minio-go/docs/checker.go.template create mode 100644 vendor/github.com/minio/minio-go/docs/validator.go create mode 100644 vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go create mode 100644 vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go create mode 100644 vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go create mode 100644 vendor/github.com/minio/minio-go/examples/s3/getobject-context.go create mode 100644 vendor/github.com/minio/minio-go/examples/s3/putobject-context.go rename vendor/github.com/minio/minio-go/{request-headers_test.go => get-options_test.go} (84%) delete mode 100644 vendor/github.com/minio/minio-go/transport_1_6.go diff --git a/Gopkg.lock b/Gopkg.lock index 7a4740f97..02c706dfe 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -100,8 +100,8 @@ [[projects]] name = "github.com/minio/minio-go" packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"] - revision = "4e0f567303d4cc90ceb055a451959fb9fc391fb9" - version = "3.0.3" + revision = "57a8ae886b49af6eb0d2c27c2d007ed2f71e1da5" + version = "4.0.3" [[projects]] branch = "master" @@ -214,6 +214,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "c6e2522d1b0c6832101ba15fc062074ad790648e26f481e3419a171d3579bfc4" + inputs-digest = "f0a207197cb502238ac87ca8e07b2640c02ec380a50b036e09ef87e40e31ca2d" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 838f65283..323002ca8 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -19,7 +19,3 @@ # [[override]] # name = "github.com/x/y" # version = "2.4.0" - -[[constraint]] - name = "github.com/minio/minio-go" - version = "3.0.0" diff --git a/vendor/github.com/minio/minio-go/.gitignore b/vendor/github.com/minio/minio-go/.gitignore index acf19db3a..fa967abd7 100644 --- a/vendor/github.com/minio/minio-go/.gitignore +++ b/vendor/github.com/minio/minio-go/.gitignore @@ -1,2 +1,3 @@ *~ -*.test \ No newline at end of file +*.test +validator diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml index 3d260fa61..4ae1eadf0 100644 --- a/vendor/github.com/minio/minio-go/.travis.yml +++ b/vendor/github.com/minio/minio-go/.travis.yml @@ -9,18 +9,22 @@ env: - ARCH=i686 go: -- 1.5.3 -- 1.6 - 1.7.4 -- 1.8 +- 1.8.x +- 1.9.x +- tip + +matrix: + fast_finish: true + allow_failures: + - go: tip + +addons: + apt: + packages: + - devscripts script: - diff -au <(gofmt -d .) <(printf "") -- go get -u github.com/cheggaaa/pb/... -- go get -u github.com/sirupsen/logrus/... -- go get -u github.com/dustin/go-humanize/... -- go vet ./... -- SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./... -- SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go -- mkdir /tmp/examples \ - && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done +- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "") +- make diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md index e2a957137..17973078e 100644 --- a/vendor/github.com/minio/minio-go/MAINTAINERS.md +++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md @@ -5,24 +5,25 @@ Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) ### Making new releases -Edit `libraryVersion` constant in `api.go`. - +Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key. +```sh +$ export GNUPGHOME=/media/${USER}/minio/trusted +$ git tag -s 4.0.0 +$ git push +$ git push --tags ``` + +### Update version +Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. + +```sh $ grep libraryVersion api.go - libraryVersion = "0.3.0" + libraryVersion = "4.0.1" ``` Commit your changes ``` -$ git commit -a -m "Bump to new release 0.3.0" --author "Minio Trusted " -``` - -Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key. -``` -$ export GNUPGHOME=/path/to/trusted/key -$ git tag -s 0.3.0 -$ git push -$ git push --tags +$ git commit -a -m "Update version for next release" --author "Minio Trusted " ``` ### Announce @@ -30,5 +31,5 @@ Announce new release by adding release notes at https://github.com/minio/minio-g To generate `changelog` ```sh -git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. +$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. ``` diff --git a/vendor/github.com/minio/minio-go/Makefile b/vendor/github.com/minio/minio-go/Makefile new file mode 100644 index 000000000..e7e5577dc --- /dev/null +++ b/vendor/github.com/minio/minio-go/Makefile @@ -0,0 +1,17 @@ +all: checks + +checks: + @go get -u github.com/go-ini/ini/... + @go get -u github.com/minio/go-homedir/... + @go get -u github.com/cheggaaa/pb/... + @go get -u github.com/sirupsen/logrus/... + @go get -u github.com/dustin/go-humanize/... + @go vet ./... + @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./... + @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go + @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done + @go get -u github.com/a8m/mark/... + @go get -u github.com/minio/cli/... + @go get -u golang.org/x/tools/cmd/goimports + @go get -u github.com/gernest/wow/... + @go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl diff --git a/vendor/github.com/minio/minio-go/NOTICE b/vendor/github.com/minio/minio-go/NOTICE new file mode 100644 index 000000000..c521791c5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/NOTICE @@ -0,0 +1,2 @@ +minio-go +Copyright 2015-2017 Minio, Inc. \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md index 5eb6656d5..2dedc1a28 100644 --- a/vendor/github.com/minio/minio-go/README.md +++ b/vendor/github.com/minio/minio-go/README.md @@ -1,19 +1,7 @@ -# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) +# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. -**Supported cloud storage providers:** - -- AWS Signature Version 4 - - Amazon S3 - - Minio - -- AWS Signature Version 2 - - Google Cloud Storage (Compatibility Mode) - - Openstack Swift + Swift3 middleware - - Ceph Object Gateway - - Riak CS - This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). @@ -55,6 +43,7 @@ func main() { } log.Printf("%#v\n", minioClient) // minioClient is now setup +} ``` ## Quick Start Example - File Uploader @@ -105,7 +94,7 @@ func main() { contentType := "application/zip" // Upload the zip file with FPutObject - n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType) + n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) if err != nil { log.Fatalln(err) } @@ -152,10 +141,14 @@ The full API Reference is available here. ### API Reference : File Object Operations * [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) * [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) +* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) +* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) ### API Reference : Object Operations * [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) * [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) +* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) +* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) * [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) * [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) * [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) @@ -204,10 +197,14 @@ The full API Reference is available here. ### Full Examples : File Object Operations * [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) * [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) ### Full Examples : Object Operations * [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) * [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) * [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) * [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) * [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) @@ -217,6 +214,7 @@ The full API Reference is available here. ### Full Examples : Encrypted Object Operations * [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) * [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) ### Full Examples : Presigned Operations * [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) @@ -235,3 +233,5 @@ The full API Reference is available here. [![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) +## License +This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go index 4fa88b818..4b763f873 100644 --- a/vendor/github.com/minio/minio-go/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/api-compose-object.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ package minio import ( + "context" "encoding/base64" "fmt" "net/http" @@ -58,7 +60,7 @@ func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string { return map[string]string{ "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo, "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key), - "x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)), + "x-amz-" + cs + "server-side-encryption-customer-key-MD5": sumMD5Base64(s.key), } } @@ -115,7 +117,7 @@ func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, k = k[len("x-amz-meta-"):] } if _, ok := m[k]; ok { - return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k) + return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)) } m[k] = v } @@ -243,13 +245,13 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s // Get object info - need size and etag here. Also, decryption // headers are added to the stat request if given. var objInfo ObjectInfo - rh := NewGetReqHeaders() + opts := StatObjectOptions{} for k, v := range s.decryptKey.getSSEHeaders(false) { - rh.Set(k, v) + opts.Set(k, v) } - objInfo, err = c.statObject(s.bucket, s.object, rh) + objInfo, err = c.statObject(s.bucket, s.object, opts) if err != nil { - err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err) + err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) } else { size = objInfo.Size etag = objInfo.ETag @@ -265,10 +267,55 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s return } +// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. +func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, + metadata map[string]string) (ObjectInfo, error) { + + // Build headers. + headers := make(http.Header) + + // Set all the metadata headers. + for k, v := range metadata { + headers.Set(k, v) + } + + // Set the source header + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) + } + + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return ObjectInfo{}, err + } + + objInfo := ObjectInfo{ + Key: destObject, + ETag: strings.Trim(cpObjRes.ETag, "\""), + LastModified: cpObjRes.LastModified, + } + return objInfo, nil +} + // uploadPartCopy - helper function to create a part in a multipart // upload via an upload-part-copy request // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html -func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int, +func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, headers http.Header) (p CompletePart, err error) { // Build query parameters @@ -277,7 +324,7 @@ func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int, urlValues.Set("uploadId", uploadID) // Send upload-part-copy request - resp, err := c.executeMethod("PUT", requestMetadata{ + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ bucketName: bucket, objectName: object, customHeader: headers, @@ -311,7 +358,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { if len(srcs) < 1 || len(srcs) > maxPartsCount { return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") } - + ctx := context.Background() srcSizes := make([]int64, len(srcs)) var totalSize, size, totalParts int64 var srcUserMeta map[string]string @@ -320,7 +367,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { for i, src := range srcs { size, etag, srcUserMeta, err = src.getProps(c) if err != nil { - return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err) + return err } // Error out if client side encryption is used in this source object when @@ -396,7 +443,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { } // Send copy request - resp, err := c.executeMethod("PUT", requestMetadata{ + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ bucketName: dst.bucket, objectName: dst.object, customHeader: h, @@ -426,13 +473,13 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { if len(userMeta) == 0 && len(srcs) == 1 { metaMap = srcUserMeta } - metaHeaders := make(map[string][]string) + metaHeaders := make(map[string]string) for k, v := range metaMap { - metaHeaders[k] = append(metaHeaders[k], v) + metaHeaders[k] = v } - uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders) + uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders}) if err != nil { - return fmt.Errorf("Error creating new upload: %v", err) + return err } // 2. Perform copy part uploads @@ -457,10 +504,10 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { fmt.Sprintf("bytes=%d-%d", start, end)) // make upload-part-copy request - complPart, err := c.uploadPartCopy(dst.bucket, + complPart, err := c.uploadPartCopy(ctx, dst.bucket, dst.object, uploadID, partIndex, h) if err != nil { - return fmt.Errorf("Error in upload-part-copy - %v", err) + return err } objParts = append(objParts, complPart) partIndex++ @@ -468,12 +515,12 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { } // 3. Make final complete-multipart request. - _, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID, + _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, completeMultipartUpload{Parts: objParts}) if err != nil { - err = fmt.Errorf("Error in complete-multipart request - %v", err) + return err } - return err + return nil } // partsRequired is ceiling(size / copyPartSize) diff --git a/vendor/github.com/minio/minio-go/api-compose-object_test.go b/vendor/github.com/minio/minio-go/api-compose-object_test.go index 5339d2027..0f22a960b 100644 --- a/vendor/github.com/minio/minio-go/api-compose-object_test.go +++ b/vendor/github.com/minio/minio-go/api-compose-object_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go index ab2aa4af2..763b91311 100644 --- a/vendor/github.com/minio/minio-go/api-datatypes.go +++ b/vendor/github.com/minio/minio-go/api-datatypes.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go index e0019a334..655991cff 100644 --- a/vendor/github.com/minio/minio-go/api-error-response.go +++ b/vendor/github.com/minio/minio-go/api-error-response.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,7 +21,6 @@ import ( "encoding/xml" "fmt" "net/http" - "strconv" ) /* **** SAMPLE ERROR RESPONSE **** @@ -49,6 +49,9 @@ type ErrorResponse struct { // only in HEAD bucket and ListObjects response. Region string + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` + // Headers of the returned S3 XML error Headers http.Header `xml:"-" json:"-"` } @@ -100,7 +103,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) msg := "Response is empty. " + reportIssue return ErrInvalidArgument(msg) } - var errResp ErrorResponse + + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + } err := xmlDecoder(resp.Body, &errResp) // Xml decoding failed with no body, fall back to HTTP headers. @@ -109,12 +115,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) case http.StatusNotFound: if objectName == "" { errResp = ErrorResponse{ + StatusCode: resp.StatusCode, Code: "NoSuchBucket", Message: "The specified bucket does not exist.", BucketName: bucketName, } } else { errResp = ErrorResponse{ + StatusCode: resp.StatusCode, Code: "NoSuchKey", Message: "The specified key does not exist.", BucketName: bucketName, @@ -123,6 +131,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) } case http.StatusForbidden: errResp = ErrorResponse{ + StatusCode: resp.StatusCode, Code: "AccessDenied", Message: "Access Denied.", BucketName: bucketName, @@ -130,12 +139,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) } case http.StatusConflict: errResp = ErrorResponse{ + StatusCode: resp.StatusCode, Code: "Conflict", Message: "Bucket not empty.", BucketName: bucketName, } case http.StatusPreconditionFailed: errResp = ErrorResponse{ + StatusCode: resp.StatusCode, Code: "PreconditionFailed", Message: s3ErrorResponseMap["PreconditionFailed"], BucketName: bucketName, @@ -143,6 +154,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) } default: errResp = ErrorResponse{ + StatusCode: resp.StatusCode, Code: resp.Status, Message: resp.Status, BucketName: bucketName, @@ -150,7 +162,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) } } - // Save hodID, requestID and region information + // Save hostID, requestID and region information // from headers if not available through error XML. if errResp.RequestID == "" { errResp.RequestID = resp.Header.Get("x-amz-request-id") @@ -162,7 +174,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) errResp.Region = resp.Header.Get("x-amz-bucket-region") } if errResp.Code == "InvalidRegion" && errResp.Region != "" { - errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region) + errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) } // Save headers returned in the API XML error @@ -173,10 +185,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) // ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. func ErrTransferAccelerationBucket(bucketName string) error { - msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").") return ErrorResponse{ + StatusCode: http.StatusBadRequest, Code: "InvalidArgument", - Message: msg, + Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", BucketName: bucketName, } } @@ -185,6 +197,7 @@ func ErrTransferAccelerationBucket(bucketName string) error { func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) return ErrorResponse{ + StatusCode: http.StatusBadRequest, Code: "EntityTooLarge", Message: msg, BucketName: bucketName, @@ -194,9 +207,10 @@ func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st // ErrEntityTooSmall - Input size is smaller than supported minimum. func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize) + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) return ErrorResponse{ - Code: "EntityTooLarge", + StatusCode: http.StatusBadRequest, + Code: "EntityTooSmall", Message: msg, BucketName: bucketName, Key: objectName, @@ -205,9 +219,9 @@ func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { // ErrUnexpectedEOF - Unexpected end of file reached. func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.", - strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) + msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) return ErrorResponse{ + StatusCode: http.StatusBadRequest, Code: "UnexpectedEOF", Message: msg, BucketName: bucketName, @@ -218,18 +232,20 @@ func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) // ErrInvalidBucketName - Invalid bucket name response. func ErrInvalidBucketName(message string) error { return ErrorResponse{ - Code: "InvalidBucketName", - Message: message, - RequestID: "minio", + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: message, + RequestID: "minio", } } // ErrInvalidObjectName - Invalid object name response. func ErrInvalidObjectName(message string) error { return ErrorResponse{ - Code: "NoSuchKey", - Message: message, - RequestID: "minio", + StatusCode: http.StatusNotFound, + Code: "NoSuchKey", + Message: message, + RequestID: "minio", } } @@ -240,9 +256,10 @@ var ErrInvalidObjectPrefix = ErrInvalidObjectName // ErrInvalidArgument - Invalid argument response. func ErrInvalidArgument(message string) error { return ErrorResponse{ - Code: "InvalidArgument", - Message: message, - RequestID: "minio", + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", } } @@ -250,9 +267,10 @@ func ErrInvalidArgument(message string) error { // The specified bucket does not have a bucket policy. func ErrNoSuchBucketPolicy(message string) error { return ErrorResponse{ - Code: "NoSuchBucketPolicy", - Message: message, - RequestID: "minio", + StatusCode: http.StatusNotFound, + Code: "NoSuchBucketPolicy", + Message: message, + RequestID: "minio", } } @@ -260,8 +278,9 @@ func ErrNoSuchBucketPolicy(message string) error { // The specified API call is not supported func ErrAPINotSupported(message string) error { return ErrorResponse{ - Code: "APINotSupported", - Message: message, - RequestID: "minio", + StatusCode: http.StatusNotImplemented, + Code: "APINotSupported", + Message: message, + RequestID: "minio", } } diff --git a/vendor/github.com/minio/minio-go/api-error-response_test.go b/vendor/github.com/minio/minio-go/api-error-response_test.go index 595cb50bd..bf10941b4 100644 --- a/vendor/github.com/minio/minio-go/api-error-response_test.go +++ b/vendor/github.com/minio/minio-go/api-error-response_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -7,7 +8,7 @@ * * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required bZy applicable law or agreed to in writing, software + * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and @@ -32,20 +33,23 @@ func TestHttpRespToErrorResponse(t *testing.T) { // 'genAPIErrorResponse' generates ErrorResponse for given APIError. // provides a encodable populated response values. genAPIErrorResponse := func(err APIError, bucketName string) ErrorResponse { - var errResp = ErrorResponse{} - errResp.Code = err.Code - errResp.Message = err.Description - errResp.BucketName = bucketName - return errResp + return ErrorResponse{ + Code: err.Code, + Message: err.Description, + BucketName: bucketName, + } } // Encodes the response headers into XML format. - encodeErr := func(response interface{}) []byte { - var bytesBuffer bytes.Buffer - bytesBuffer.WriteString(xml.Header) - encode := xml.NewEncoder(&bytesBuffer) - encode.Encode(response) - return bytesBuffer.Bytes() + encodeErr := func(response ErrorResponse) []byte { + buf := &bytes.Buffer{} + buf.WriteString(xml.Header) + encoder := xml.NewEncoder(buf) + err := encoder.Encode(response) + if err != nil { + t.Fatalf("error encoding response: %v", err) + } + return buf.Bytes() } // `createAPIErrorResponse` Mocks XML error response from the server. @@ -65,6 +69,7 @@ func TestHttpRespToErrorResponse(t *testing.T) { // 'genErrResponse' contructs error response based http Status Code genErrResponse := func(resp *http.Response, code, message, bucketName, objectName string) ErrorResponse { errResp := ErrorResponse{ + StatusCode: resp.StatusCode, Code: code, Message: message, BucketName: bucketName, @@ -80,9 +85,10 @@ func TestHttpRespToErrorResponse(t *testing.T) { // Generate invalid argument error. genInvalidError := func(message string) error { errResp := ErrorResponse{ - Code: "InvalidArgument", - Message: message, - RequestID: "minio", + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", } return errResp } @@ -101,22 +107,22 @@ func TestHttpRespToErrorResponse(t *testing.T) { // Set the StatusCode to the argument supplied. // Sets common headers. genEmptyBodyResponse := func(statusCode int) *http.Response { - resp := &http.Response{} - // set empty response body. - resp.Body = ioutil.NopCloser(bytes.NewBuffer([]byte(""))) - // set headers. + resp := &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } setCommonHeaders(resp) - // set status code. - resp.StatusCode = statusCode return resp } // Decode XML error message from the http response body. - decodeXMLError := func(resp *http.Response, t *testing.T) error { - var errResp ErrorResponse + decodeXMLError := func(resp *http.Response) error { + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + } err := xmlDecoder(resp.Body, &errResp) if err != nil { - t.Fatal("XML decoding of response body failed") + t.Fatalf("XML decoding of response body failed: %v", err) } return errResp } @@ -134,12 +140,12 @@ func TestHttpRespToErrorResponse(t *testing.T) { // Used for asserting the actual response. expectedErrResponse := []error{ genInvalidError("Response is empty. " + "Please report this issue at https://github.com/minio/minio-go/issues."), - decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket"), t), - genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""), - genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"), - genErrResponse(setCommonHeaders(&http.Response{}), "AccessDenied", "Access Denied.", "minio-bucket", ""), - genErrResponse(setCommonHeaders(&http.Response{}), "Conflict", "Bucket not empty.", "minio-bucket", ""), - genErrResponse(setCommonHeaders(&http.Response{}), "Bad Request", "Bad Request", "minio-bucket", ""), + decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket")), + genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusNotFound}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""), + genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusNotFound}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"), + genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusForbidden}), "AccessDenied", "Access Denied.", "minio-bucket", ""), + genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusConflict}), "Conflict", "Bucket not empty.", "minio-bucket", ""), + genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusBadRequest}), "Bad Request", "Bad Request", "minio-bucket", ""), } // List of http response to be used as input. @@ -182,6 +188,7 @@ func TestHttpRespToErrorResponse(t *testing.T) { func TestErrEntityTooLarge(t *testing.T) { msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", 1000000, 99999) expectedResult := ErrorResponse{ + StatusCode: http.StatusBadRequest, Code: "EntityTooLarge", Message: msg, BucketName: "minio-bucket", @@ -189,22 +196,23 @@ func TestErrEntityTooLarge(t *testing.T) { } actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/") if !reflect.DeepEqual(expectedResult, actualResult) { - t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) + t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } } // Test validates 'ErrEntityTooSmall' error response. func TestErrEntityTooSmall(t *testing.T) { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", -1) + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", -1) expectedResult := ErrorResponse{ - Code: "EntityTooLarge", + StatusCode: http.StatusBadRequest, + Code: "EntityTooSmall", Message: msg, BucketName: "minio-bucket", Key: "Asia/", } actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/") if !reflect.DeepEqual(expectedResult, actualResult) { - t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) + t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } } @@ -213,6 +221,7 @@ func TestErrUnexpectedEOF(t *testing.T) { msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.", strconv.FormatInt(100, 10), strconv.FormatInt(101, 10)) expectedResult := ErrorResponse{ + StatusCode: http.StatusBadRequest, Code: "UnexpectedEOF", Message: msg, BucketName: "minio-bucket", @@ -220,46 +229,49 @@ func TestErrUnexpectedEOF(t *testing.T) { } actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/") if !reflect.DeepEqual(expectedResult, actualResult) { - t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) + t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } } // Test validates 'ErrInvalidBucketName' error response. func TestErrInvalidBucketName(t *testing.T) { expectedResult := ErrorResponse{ - Code: "InvalidBucketName", - Message: "Invalid Bucket name", - RequestID: "minio", + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: "Invalid Bucket name", + RequestID: "minio", } actualResult := ErrInvalidBucketName("Invalid Bucket name") if !reflect.DeepEqual(expectedResult, actualResult) { - t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) + t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } } // Test validates 'ErrInvalidObjectName' error response. func TestErrInvalidObjectName(t *testing.T) { expectedResult := ErrorResponse{ - Code: "NoSuchKey", - Message: "Invalid Object Key", - RequestID: "minio", + StatusCode: http.StatusNotFound, + Code: "NoSuchKey", + Message: "Invalid Object Key", + RequestID: "minio", } actualResult := ErrInvalidObjectName("Invalid Object Key") if !reflect.DeepEqual(expectedResult, actualResult) { - t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) + t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } } // Test validates 'ErrInvalidArgument' response. func TestErrInvalidArgument(t *testing.T) { expectedResult := ErrorResponse{ - Code: "InvalidArgument", - Message: "Invalid Argument", - RequestID: "minio", + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: "Invalid Argument", + RequestID: "minio", } actualResult := ErrInvalidArgument("Invalid Argument") if !reflect.DeepEqual(expectedResult, actualResult) { - t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult) + t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult) } } diff --git a/vendor/github.com/minio/minio-go/transport_1_5.go b/vendor/github.com/minio/minio-go/api-get-object-context.go similarity index 51% rename from vendor/github.com/minio/minio-go/transport_1_5.go rename to vendor/github.com/minio/minio-go/api-get-object-context.go index 468daafd3..f8dfac7d6 100644 --- a/vendor/github.com/minio/minio-go/transport_1_5.go +++ b/vendor/github.com/minio/minio-go/api-get-object-context.go @@ -1,8 +1,6 @@ -// +build go1.5,!go1.6,!go1.7,!go1.8 - /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,21 +17,10 @@ package minio -import ( - "net/http" - "time" -) +import "context" -// This default transport is similar to http.DefaultTransport -// but with additional DisableCompression: -var defaultMinioTransport http.RoundTripper = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSHandshakeTimeout: 10 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, +// GetObjectWithContext - returns an seekable, readable object. +// The options can be used to specify the GET request further. +func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + return c.getObjectWithContext(ctx, bucketName, objectName, opts) } diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go index c4193e934..2b58220a6 100644 --- a/vendor/github.com/minio/minio-go/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,11 +22,34 @@ import ( "os" "path/filepath" + "github.com/minio/minio-go/pkg/encrypt" + + "context" + "github.com/minio/minio-go/pkg/s3utils" ) +// FGetObjectWithContext - download contents of an object to a local file. +// The options can be used to specify the GET request further. +func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts) +} + // FGetObject - download contents of an object to a local file. -func (c Client) FGetObject(bucketName, objectName, filePath string) error { +func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error { + return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) +} + +// FGetEncryptedObject - Decrypt and store an object at filePath. +func (c Client) FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error { + if materials == nil { + return ErrInvalidArgument("Unable to recognize empty encryption properties") + } + return c.FGetObject(bucketName, objectName, filePath, GetObjectOptions{Materials: materials}) +} + +// fGetObjectWithContext - fgetObject wrapper function with context +func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -60,7 +84,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error { } // Gather md5sum. - objectStat, err := c.StatObject(bucketName, objectName) + objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts}) if err != nil { return err } @@ -82,13 +106,12 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error { // Initialize get object request headers to set the // appropriate range offsets to read from. - reqHeaders := NewGetReqHeaders() if st.Size() > 0 { - reqHeaders.SetRange(st.Size(), 0) + opts.SetRange(st.Size(), 0) } // Seek to current position for incoming reader. - objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders) + objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts) if err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go index 9bd784ffa..eb9e9a4ca 100644 --- a/vendor/github.com/minio/minio-go/api-get-object.go +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ package minio import ( + "context" "errors" "fmt" "io" @@ -36,27 +38,16 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") } - // Fetch encrypted object - encReader, err := c.GetObject(bucketName, objectName) - if err != nil { - return nil, err - } - // Stat object to get its encryption metadata - st, err := encReader.Stat() - if err != nil { - return nil, err - } - - // Setup object for decrytion, object is transparently - // decrypted as the consumer starts reading. - encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey)) - - // Success. - return encryptMaterials, nil + return c.GetObject(bucketName, objectName, GetObjectOptions{Materials: encryptMaterials}) } // GetObject - returns an seekable, readable object. -func (c Client) GetObject(bucketName, objectName string) (*Object, error) { +func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) +} + +// GetObject wrapper function that accepts a request context +func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err @@ -102,34 +93,26 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) { if req.isFirstReq { // First request is a Read/ReadAt. if req.isReadOp { - reqHeaders := NewGetReqHeaders() // Differentiate between wanting the whole object and just a range. if req.isReadAt { // If this is a ReadAt request only get the specified range. // Range is set with respect to the offset and length of the buffer requested. // Do not set objectInfo from the first readAt request because it will not get // the whole object. - reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) - } else { - if req.Offset > 0 { - reqHeaders.SetRange(req.Offset, 0) - } - - // First request is a Read request. - httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { + opts.SetRange(req.Offset, 0) } + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) if err != nil { - resCh <- getResponse{ - Error: err, - } + resCh <- getResponse{Error: err} return } etag = objectInfo.ETag // Read at least firstReq.Buffer bytes, if not we have // reached our EOF. size, err := io.ReadFull(httpReader, req.Buffer) - if err == io.ErrUnexpectedEOF { + if size > 0 && err == io.ErrUnexpectedEOF { // If an EOF happens after reading some but not // all the bytes ReadFull returns ErrUnexpectedEOF err = io.EOF @@ -144,7 +127,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) { } else { // First request is a Stat or Seek call. // Only need to run a StatObject until an actual Read or ReadAt request comes through. - objectInfo, err = c.StatObject(bucketName, objectName) + objectInfo, err = c.statObject(bucketName, objectName, StatObjectOptions{opts}) if err != nil { resCh <- getResponse{ Error: err, @@ -159,11 +142,10 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) { } } } else if req.settingObjectInfo { // Request is just to get objectInfo. - reqHeaders := NewGetReqHeaders() if etag != "" { - reqHeaders.SetMatchETag(etag) + opts.SetMatchETag(etag) } - objectInfo, err := c.statObject(bucketName, objectName, reqHeaders) + objectInfo, err := c.statObject(bucketName, objectName, StatObjectOptions{opts}) if err != nil { resCh <- getResponse{ Error: err, @@ -183,9 +165,8 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) { // new ones when they haven't been already. // All readAt requests are new requests. if req.DidOffsetChange || !req.beenRead { - reqHeaders := NewGetReqHeaders() if etag != "" { - reqHeaders.SetMatchETag(etag) + opts.SetMatchETag(etag) } if httpReader != nil { // Close previously opened http reader. @@ -194,16 +175,11 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) { // If this request is a readAt only get the specified range. if req.isReadAt { // Range is set with respect to the offset and length of the buffer requested. - reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders) - } else { - // Range is set with respect to the offset. - if req.Offset > 0 { - reqHeaders.SetRange(req.Offset, 0) - } - - httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { // Range is set with respect to the offset. + opts.SetRange(req.Offset, 0) } + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) if err != nil { resCh <- getResponse{ Error: err, @@ -626,7 +602,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- // // For more information about the HTTP Range header. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { +func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { // Validate input arguments. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return nil, ObjectInfo{}, err @@ -635,18 +611,12 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade return nil, ObjectInfo{}, err } - // Set all the necessary reqHeaders. - customHeader := make(http.Header) - for key, value := range reqHeaders.Header { - customHeader[key] = value - } - // Execute GET on objectName. - resp, err := c.executeMethod("GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeader, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(ctx, "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: opts.Header(), + contentSHA256Hex: emptySHA256Hex, }) if err != nil { return nil, ObjectInfo{}, err @@ -692,6 +662,15 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade Metadata: extractObjMetadata(resp.Header), } + reader := resp.Body + if opts.Materials != nil { + err = opts.Materials.SetupDecryptMode(reader, objectStat.Metadata.Get(amzHeaderIV), objectStat.Metadata.Get(amzHeaderKey)) + if err != nil { + return nil, ObjectInfo{}, err + } + reader = opts.Materials + } + // do not close body here, caller will close - return resp.Body, objectStat, nil + return reader, objectStat, nil } diff --git a/vendor/github.com/minio/minio-go/request-headers.go b/vendor/github.com/minio/minio-go/api-get-options.go similarity index 53% rename from vendor/github.com/minio/minio-go/request-headers.go rename to vendor/github.com/minio/minio-go/api-get-options.go index 76c87202d..dd70415cd 100644 --- a/vendor/github.com/minio/minio-go/request-headers.go +++ b/vendor/github.com/minio/minio-go/api-get-options.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016-17 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,80 +21,94 @@ import ( "fmt" "net/http" "time" + + "github.com/minio/minio-go/pkg/encrypt" ) -// RequestHeaders - implement methods for setting special -// request headers for GET, HEAD object operations. -// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html -type RequestHeaders struct { - http.Header +// GetObjectOptions are used to specify additional headers or options +// during GET requests. +type GetObjectOptions struct { + headers map[string]string + + Materials encrypt.Materials } -// NewGetReqHeaders - initializes a new request headers for GET request. -func NewGetReqHeaders() RequestHeaders { - return RequestHeaders{ - Header: make(http.Header), - } +// StatObjectOptions are used to specify additional headers or options +// during GET info/stat requests. +type StatObjectOptions struct { + GetObjectOptions } -// NewHeadReqHeaders - initializes a new request headers for HEAD request. -func NewHeadReqHeaders() RequestHeaders { - return RequestHeaders{ - Header: make(http.Header), +// Header returns the http.Header representation of the GET options. +func (o GetObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) } + return headers +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *GetObjectOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value } // SetMatchETag - set match etag. -func (c RequestHeaders) SetMatchETag(etag string) error { +func (o *GetObjectOptions) SetMatchETag(etag string) error { if etag == "" { return ErrInvalidArgument("ETag cannot be empty.") } - c.Set("If-Match", "\""+etag+"\"") + o.Set("If-Match", "\""+etag+"\"") return nil } // SetMatchETagExcept - set match etag except. -func (c RequestHeaders) SetMatchETagExcept(etag string) error { +func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { if etag == "" { return ErrInvalidArgument("ETag cannot be empty.") } - c.Set("If-None-Match", "\""+etag+"\"") + o.Set("If-None-Match", "\""+etag+"\"") return nil } // SetUnmodified - set unmodified time since. -func (c RequestHeaders) SetUnmodified(modTime time.Time) error { +func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { if modTime.IsZero() { return ErrInvalidArgument("Modified since cannot be empty.") } - c.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) + o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) return nil } // SetModified - set modified time since. -func (c RequestHeaders) SetModified(modTime time.Time) error { +func (o *GetObjectOptions) SetModified(modTime time.Time) error { if modTime.IsZero() { return ErrInvalidArgument("Modified since cannot be empty.") } - c.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) + o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) return nil } // SetRange - set the start and end offset of the object to be read. // See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. -func (c RequestHeaders) SetRange(start, end int64) error { +func (o *GetObjectOptions) SetRange(start, end int64) error { switch { case start == 0 && end < 0: // Read last '-end' bytes. `bytes=-N`. - c.Set("Range", fmt.Sprintf("bytes=%d", end)) + o.Set("Range", fmt.Sprintf("bytes=%d", end)) case 0 < start && end == 0: // Read everything starting from offset // 'start'. `bytes=N-`. - c.Set("Range", fmt.Sprintf("bytes=%d-", start)) + o.Set("Range", fmt.Sprintf("bytes=%d-", start)) case 0 <= start && start <= end: // Read everything starting at 'start' till the // 'end'. `bytes=N-M` - c.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) default: // All other cases such as // bytes=-3- diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go index 10ccdc66b..a4259c9d7 100644 --- a/vendor/github.com/minio/minio-go/api-get-policy.go +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ package minio import ( + "context" "encoding/json" "io/ioutil" "net/http" @@ -79,10 +81,10 @@ func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, e urlValues.Set("policy", "") // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go index 6de1fe9b3..3cfb47d37 100644 --- a/vendor/github.com/minio/minio-go/api-list.go +++ b/vendor/github.com/minio/minio-go/api-list.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ package minio import ( + "context" "errors" "fmt" "net/http" @@ -38,7 +40,7 @@ import ( // func (c Client) ListBuckets() ([]BucketInfo, error) { // Execute GET on service. - resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256}) + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex}) defer closeResponse(resp) if err != nil { return nil, err @@ -215,10 +217,10 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { @@ -393,10 +395,10 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { @@ -572,10 +574,10 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) // Execute GET on bucketName to list multipart uploads. - resp, err := c.executeMethod("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { @@ -690,11 +692,11 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) // Execute GET on objectName to get list of parts. - resp, err := c.executeMethod("GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go index 25a283af5..3f5b30a3b 100644 --- a/vendor/github.com/minio/minio-go/api-notification.go +++ b/vendor/github.com/minio/minio-go/api-notification.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +19,7 @@ package minio import ( "bufio" + "context" "encoding/json" "io" "net/http" @@ -46,10 +48,10 @@ func (c Client) getBucketNotification(bucketName string) (BucketNotification, er urlValues.Set("notification", "") // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) @@ -150,7 +152,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even // Check ARN partition to verify if listening bucket is supported if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) { notificationInfoCh <- NotificationInfo{ - Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"), + Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), } return } @@ -170,13 +172,16 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even urlValues["events"] = events // Execute GET on bucket to list objects. - resp, err := c.executeMethod("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) if err != nil { - continue + notificationInfoCh <- NotificationInfo{ + Err: err, + } + return } // Validate http response, upon error return quickly. diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go index c645828df..123ad4453 100644 --- a/vendor/github.com/minio/minio-go/api-presigned.go +++ b/vendor/github.com/minio/minio-go/api-presigned.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +19,7 @@ package minio import ( "errors" + "net/http" "net/url" "time" @@ -25,16 +27,6 @@ import ( "github.com/minio/minio-go/pkg/s3utils" ) -// supportedGetReqParams - supported request parameters for GET presigned request. -var supportedGetReqParams = map[string]struct{}{ - "response-expires": {}, - "response-content-type": {}, - "response-cache-control": {}, - "response-content-language": {}, - "response-content-encoding": {}, - "response-content-disposition": {}, -} - // presignURL - Returns a presigned URL for an input 'method'. // Expires maximum is 7days - ie. 604800 and minimum is 1. func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { @@ -42,42 +34,27 @@ func (c Client) presignURL(method string, bucketName string, objectName string, if method == "" { return nil, ErrInvalidArgument("method cannot be empty.") } - if err := s3utils.CheckValidBucketName(bucketName); err != nil { + if err = s3utils.CheckValidBucketName(bucketName); err != nil { return nil, err } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - if err := isValidExpiry(expires); err != nil { + if err = isValidExpiry(expires); err != nil { return nil, err } // Convert expires into seconds. expireSeconds := int64(expires / time.Second) reqMetadata := requestMetadata{ - presignURL: true, - bucketName: bucketName, - objectName: objectName, - expires: expireSeconds, - } - - // For "GET" we are handling additional request parameters to - // override its response headers. - if method == "GET" { - // Verify if input map has unsupported params, if yes exit. - for k := range reqParams { - if _, ok := supportedGetReqParams[k]; !ok { - return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.") - } - } - // Save the request parameters to be used in presigning for GET request. - reqMetadata.queryValues = reqParams + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, } // Instantiate a new request. // Since expires is set newRequest will presign the request. - req, err := c.newRequest(method, reqMetadata) - if err != nil { + var req *http.Request + if req, err = c.newRequest(method, reqMetadata); err != nil { return nil, err } return req.URL, nil @@ -88,6 +65,9 @@ func (c Client) presignURL(method string, bucketName string, objectName string, // upto 7days or a minimum of 1sec. Additionally you can override // a set of response headers using the query parameters. func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } return c.presignURL("GET", bucketName, objectName, expires, reqParams) } @@ -96,6 +76,9 @@ func (c Client) PresignedGetObject(bucketName string, objectName string, expires // upto 7days or a minimum of 1sec. Additionally you can override // a set of response headers using the query parameters. func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) } @@ -103,6 +86,9 @@ func (c Client) PresignedHeadObject(bucketName string, objectName string, expire // without credentials. URL can have a maximum expiry of upto 7days // or a minimum of 1sec. func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } return c.presignURL("PUT", bucketName, objectName, expires, nil) } diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go index fd37dc192..bb583a78f 100644 --- a/vendor/github.com/minio/minio-go/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2015, 2016, 2017 Minio, Inc. + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ package minio import ( "bytes" + "context" "encoding/json" "encoding/xml" "fmt" @@ -75,14 +76,14 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) { if err != nil { return err } - reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes) - reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes) + reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) + reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) reqMetadata.contentLength = int64(len(createBucketConfigBytes)) } // Execute PUT to create a new bucket. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return err @@ -161,16 +162,16 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces policyBuffer := bytes.NewReader(policyBytes) reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: policyBuffer, - contentLength: int64(len(policyBytes)), - contentMD5Bytes: sumMD5(policyBytes), - contentSHA256Bytes: sum256(policyBytes), + bucketName: bucketName, + queryValues: urlValues, + contentBody: policyBuffer, + contentLength: int64(len(policyBytes)), + contentMD5Base64: sumMD5Base64(policyBytes), + contentSHA256Hex: sum256Hex(policyBytes), } // Execute PUT to upload a new bucket policy. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return err @@ -195,10 +196,10 @@ func (c Client) removeBucketPolicy(bucketName string) error { urlValues.Set("policy", "") // Execute DELETE on objectName. - resp, err := c.executeMethod("DELETE", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { @@ -226,16 +227,16 @@ func (c Client) SetBucketNotification(bucketName string, bucketNotification Buck notifBuffer := bytes.NewReader(notifBytes) reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: notifBuffer, - contentLength: int64(len(notifBytes)), - contentMD5Bytes: sumMD5(notifBytes), - contentSHA256Bytes: sum256(notifBytes), + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Base64: sumMD5Base64(notifBytes), + contentSHA256Hex: sum256Hex(notifBytes), } // Execute PUT to upload a new bucket notification. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return err diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go index 0158bc1d8..c16c3c69a 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ package minio import ( + "context" "io" "math" "os" @@ -24,12 +26,6 @@ import ( "github.com/minio/minio-go/pkg/s3utils" ) -// Verify if reader is *os.File -func isFile(reader io.Reader) (ok bool) { - _, ok = reader.(*os.File) - return -} - // Verify if reader is *minio.Object func isObject(reader io.Reader) (ok bool) { _, ok = reader.(*Object) @@ -39,6 +35,26 @@ func isObject(reader io.Reader) (ok bool) { // Verify if reader is a generic ReaderAt func isReadAt(reader io.Reader) (ok bool) { _, ok = reader.(io.ReaderAt) + if ok { + var v *os.File + v, ok = reader.(*os.File) + if ok { + // Stdin, Stdout and Stderr all have *os.File type + // which happen to also be io.ReaderAt compatible + // we need to add special conditions for them to + // be ignored by this function. + for _, f := range []string{ + "/dev/stdin", + "/dev/stdout", + "/dev/stderr", + } { + if f == v.Name() { + ok = false + break + } + } + } + } return } @@ -77,7 +93,7 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las // getUploadID - fetch upload id if already present for an object name // or initiate a new request to fetch a new upload id. -func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) { +func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return "", err @@ -87,7 +103,7 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][ } // Initiate multipart upload for an object. - initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData) + initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) if err != nil { return "", err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go new file mode 100644 index 000000000..a6f23dcaa --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-context.go @@ -0,0 +1,39 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" +) + +// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation. +func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (n int64, err error) { + err = opts.validate() + if err != nil { + return 0, err + } + if opts.EncryptMaterials != nil { + if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil { + return 0, err + } + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts) + } + return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go index 32fa873d8..8032009dc 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-copy.go +++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go index 534a21ecf..87dd1ab1a 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go +++ b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,13 +18,14 @@ package minio import ( + "context" "io" "github.com/minio/minio-go/pkg/encrypt" ) // PutEncryptedObject - Encrypt and store object. -func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) { if encryptMaterials == nil { return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") @@ -33,14 +35,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read return 0, err } - if metadata == nil { - metadata = make(map[string][]string) - } - - // Set the necessary encryption headers, for future decryption. - metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()} - metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()} - metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()} - - return c.putObjectMultipartStreamNoLength(bucketName, objectName, encryptMaterials, metadata, progress) + return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials}) +} + +// FPutEncryptedObject - Encrypt and store an object with contents from file at filePath. +func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) { + return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials}) } diff --git a/vendor/github.com/minio/minio-go/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/api-put-object-file-context.go new file mode 100644 index 000000000..140a9c069 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-file-context.go @@ -0,0 +1,64 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "mime" + "os" + "path/filepath" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return 0, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return 0, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Set contentType based on filepath extension if not given or default + // value of "application/octet-stream" if the extension has no associated type. + if opts.ContentType == "" { + if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { + opts.ContentType = "application/octet-stream" + } + } + return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go index 81cdf5c2c..7c8e05117 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-file.go +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,50 +18,10 @@ package minio import ( - "mime" - "os" - "path/filepath" - - "github.com/minio/minio-go/pkg/s3utils" + "context" ) -// FPutObject - Create an object in a bucket, with contents from file at filePath. -func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Open the referenced file. - fileReader, err := os.Open(filePath) - // If any error fail quickly here. - if err != nil { - return 0, err - } - defer fileReader.Close() - - // Save the file stat. - fileStat, err := fileReader.Stat() - if err != nil { - return 0, err - } - - // Save the file size. - fileSize := fileStat.Size() - - objMetadata := make(map[string][]string) - - // Set contentType based on filepath extension if not given or default - // value of "binary/octet-stream" if the extension has no associated type. - if contentType == "" { - if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" { - contentType = "application/octet-stream" - } - } - - objMetadata["Content-Type"] = []string{contentType} - return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil) +// FPutObject - Create an object in a bucket, with contents from file at filePath +func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { + return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) } diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index aefeb5f26..f5b8893e6 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +19,9 @@ package minio import ( "bytes" + "context" + "encoding/base64" + "encoding/hex" "encoding/xml" "fmt" "io" @@ -32,9 +36,9 @@ import ( "github.com/minio/minio-go/pkg/s3utils" ) -func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, - metadata map[string][]string, progress io.Reader) (n int64, err error) { - n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, metadata, progress) +func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions) (n int64, err error) { + n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not @@ -45,13 +49,13 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } } return n, err } -func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -74,14 +78,14 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader } // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metadata) + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return 0, err } defer func() { if err != nil { - c.abortMultipartUpload(bucketName, objectName, uploadID) + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() @@ -117,12 +121,24 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader // Update progress reader appropriately to the latest offset // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), progress) + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Checksums.. + var ( + md5Base64 string + sha256Hex string + ) + if hashSums["md5"] != nil { + md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) + } + if hashSums["sha256"] != nil { + sha256Hex = hex.EncodeToString(hashSums["sha256"]) + } // Proceed to upload the part. var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber, - hashSums["md5"], hashSums["sha256"], int64(length), metadata) + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + md5Base64, sha256Hex, int64(length), opts.UserMetadata) if err != nil { return totalUploadedSize, err } @@ -158,7 +174,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil { + if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { return totalUploadedSize, err } @@ -167,7 +183,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) { +func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err @@ -181,17 +197,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata urlValues.Set("uploads", "") // Set ContentType header. - customHeader := make(http.Header) - for k, v := range metadata { - if len(v) > 0 { - customHeader.Set(k, v[0]) - } - } - - // Set a default content-type header if the latter is not provided - if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 { - customHeader.Set("Content-Type", "application/octet-stream") - } + customHeader := opts.Header() reqMetadata := requestMetadata{ bucketName: bucketName, @@ -201,7 +207,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata } // Execute POST on an objectName to initiate multipart upload. - resp, err := c.executeMethod("POST", reqMetadata) + resp, err := c.executeMethod(ctx, "POST", reqMetadata) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err @@ -223,8 +229,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata const serverEncryptionKeyPrefix = "x-amz-server-side-encryption" // uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) { +func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, + partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err @@ -257,24 +263,24 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re for k, v := range metadata { if len(v) > 0 { if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) { - customHeader.Set(k, v[0]) + customHeader.Set(k, v) } } } reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Bytes: md5Sum, - contentSHA256Bytes: sha256Sum, + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, } // Execute PUT on each part. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(ctx, "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return ObjectPart{}, err @@ -295,7 +301,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, +func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { @@ -308,7 +314,6 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploadId", uploadID) - // Marshal complete multipart body. completeMultipartUploadBytes, err := xml.Marshal(complete) if err != nil { @@ -318,16 +323,16 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, // Instantiate all the complete multipart buffer. completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: completeMultipartUploadBuffer, - contentLength: int64(len(completeMultipartUploadBytes)), - contentSHA256Bytes: sum256(completeMultipartUploadBytes), + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), } // Execute POST to complete multipart upload for an objectName. - resp, err := c.executeMethod("POST", reqMetadata) + resp, err := c.executeMethod(ctx, "POST", reqMetadata) defer closeResponse(resp) if err != nil { return completeMultipartUploadResult{}, err diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go index 40cd5c252..579cb5482 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ package minio import ( + "context" "fmt" "io" "net/http" @@ -26,33 +28,23 @@ import ( "github.com/minio/minio-go/pkg/s3utils" ) -// PutObjectStreaming using AWS streaming signature V4 -func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) { - return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil) -} - // putObjectMultipartStream - upload a large object using // multipart upload and streaming signature for signing payload. // Comprehensive put object operation involving multipart uploads. // // Following code handles these types of readers. // -// - *os.File // - *minio.Object // - Any reader which has a method 'ReadAt()' // -func (c Client) putObjectMultipartStream(bucketName, objectName string, - reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - // Verify if reader is *minio.Object, *os.File or io.ReaderAt. - // NOTE: Verification of object is kept for a specific purpose - // while it is going to be duck typed similar to io.ReaderAt. - // It is to indicate that *minio.Object implements io.ReaderAt. - // and such a functionality is used in the subsequent code path. - if isFile(reader) || !isObject(reader) && isReadAt(reader) { - n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress) + if !isObject(reader) && isReadAt(reader) { + // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. + n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) } else { - n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress) + n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) } if err != nil { errResp := ToErrorResponse(err) @@ -64,7 +56,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } } return n, err @@ -94,8 +86,8 @@ type uploadPartReq struct { // temporary files for staging all the data, these temporary files are // cleaned automatically when the caller i.e http client closes the // stream after uploading all the contents successfully. -func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string, - reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -111,7 +103,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string } // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metadata) + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return 0, err } @@ -122,7 +114,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string // to relinquish storage space. defer func() { if err != nil { - c.abortMultipartUpload(bucketName, objectName, uploadID) + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() @@ -150,9 +142,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} } close(uploadPartsCh) - // Receive each part number from the channel allowing three parallel uploads. - for w := 1; w <= totalWorkers; w++ { + for w := 1; w <= opts.getNumThreads(); w++ { go func(partSize int64) { // Each worker will draw from the part channel and upload in parallel. for uploadReq := range uploadPartsCh { @@ -170,13 +161,13 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string } // Get a section reader on a particular offset. - sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress) + sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) // Proceed to upload the part. var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, - nil, nil, partSize, metadata) + "", "", partSize, opts.UserMetadata) if err != nil { uploadedPartsCh <- uploadedPartRes{ Size: 0, @@ -229,7 +220,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) if err != nil { return totalUploadedSize, err } @@ -238,8 +229,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string return totalUploadedSize, nil } -func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string, - reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -253,9 +244,8 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string if err != nil { return 0, err } - // Initiates a new multipart request - uploadID, err := c.newUploadID(bucketName, objectName, metadata) + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return 0, err } @@ -266,7 +256,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string // storage space. defer func() { if err != nil { - c.abortMultipartUpload(bucketName, objectName, uploadID) + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() @@ -281,17 +271,16 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { // Update progress reader appropriately to the latest offset // as we read from the source. - hookReader := newHook(reader, progress) + hookReader := newHook(reader, opts.Progress) // Proceed to upload the part. if partNumber == totalPartsCount { partSize = lastPartSize } - var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, io.LimitReader(hookReader, partSize), - partNumber, nil, nil, partSize, metadata) + partNumber, "", "", partSize, opts.UserMetadata) if err != nil { return totalUploadedSize, err } @@ -328,7 +317,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) if err != nil { return totalUploadedSize, err } @@ -339,7 +328,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string // putObjectNoChecksum special function used Google Cloud Storage. This special function // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -355,17 +344,22 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea } if size > 0 { if isReadAt(reader) && !isObject(reader) { - reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size) + seeker, _ := reader.(io.Seeker) + offset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return 0, ErrInvalidArgument(err.Error()) + } + reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) } } // Update progress reader appropriately to the latest offset as we // read from the source. - readSeeker := newHook(reader, progress) + readSeeker := newHook(reader, opts.Progress) // This function does not calculate sha256 and md5sum for payload. // Execute put object. - st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData) + st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts) if err != nil { return 0, err } @@ -377,7 +371,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea // putObjectDo - executes the put object http operation. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { +func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err @@ -385,35 +379,22 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5 if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } - // Set headers. - customHeader := make(http.Header) - - // Set metadata to headers - for k, v := range metaData { - if len(v) > 0 { - customHeader.Set(k, v[0]) - } - } - - // If Content-Type is not provided, set the default application/octet-stream one - if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { - customHeader.Set("Content-Type", "application/octet-stream") - } + customHeader := opts.Header() // Populate request metadata. reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Bytes: md5Sum, - contentSHA256Bytes: sha256Sum, + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, } // Execute PUT an objectName. - resp, err := c.executeMethod("PUT", reqMetadata) + resp, err := c.executeMethod(ctx, "PUT", reqMetadata) defer closeResponse(resp) if err != nil { return ObjectInfo{}, err diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index 94db82593..1fda1bcd2 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,119 +19,83 @@ package minio import ( "bytes" + "context" "fmt" "io" - "os" - "reflect" - "runtime" + "net/http" "runtime/debug" "sort" - "strings" + "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" ) -// toInt - converts go value to its integer representation based -// on the value kind if it is an integer. -func toInt(value reflect.Value) (size int64) { - size = -1 - if value.IsValid() { - switch value.Kind() { - case reflect.Int: - fallthrough - case reflect.Int8: - fallthrough - case reflect.Int16: - fallthrough - case reflect.Int32: - fallthrough - case reflect.Int64: - size = value.Int() - } - } - return size +// PutObjectOptions represents options specified by user for PutObject call +type PutObjectOptions struct { + UserMetadata map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + CacheControl string + EncryptMaterials encrypt.Materials + NumThreads uint } -// getReaderSize - Determine the size of Reader if available. -func getReaderSize(reader io.Reader) (size int64, err error) { - size = -1 - if reader == nil { - return -1, nil - } - // Verify if there is a method by name 'Size'. - sizeFn := reflect.ValueOf(reader).MethodByName("Size") - // Verify if there is a method by name 'Len'. - lenFn := reflect.ValueOf(reader).MethodByName("Len") - if sizeFn.IsValid() { - if sizeFn.Kind() == reflect.Func { - // Call the 'Size' function and save its return value. - result := sizeFn.Call([]reflect.Value{}) - if len(result) == 1 { - size = toInt(result[0]) - } - } - } else if lenFn.IsValid() { - if lenFn.Kind() == reflect.Func { - // Call the 'Len' function and save its return value. - result := lenFn.Call([]reflect.Value{}) - if len(result) == 1 { - size = toInt(result[0]) - } - } +// getNumThreads - gets the number of threads to be used in the multipart +// put object operation +func (opts PutObjectOptions) getNumThreads() (numThreads int) { + if opts.NumThreads > 0 { + numThreads = int(opts.NumThreads) } else { - // Fallback to Stat() method, two possible Stat() structs exist. - switch v := reader.(type) { - case *os.File: - var st os.FileInfo - st, err = v.Stat() - if err != nil { - // Handle this case specially for "windows", - // certain files for example 'Stdin', 'Stdout' and - // 'Stderr' it is not allowed to fetch file information. - if runtime.GOOS == "windows" { - if strings.Contains(err.Error(), "GetFileInformationByHandle") { - return -1, nil - } - } - return - } - // Ignore if input is a directory, throw an error. - if st.Mode().IsDir() { - return -1, ErrInvalidArgument("Input file cannot be a directory.") - } - // Ignore 'Stdin', 'Stdout' and 'Stderr', since they - // represent *os.File type but internally do not - // implement Seekable calls. Ignore them and treat - // them like a stream with unknown length. - switch st.Name() { - case "stdin", "stdout", "stderr": - return - // Ignore read/write stream of os.Pipe() which have unknown length too. - case "|0", "|1": - return - } - var pos int64 - pos, err = v.Seek(0, 1) // SeekCurrent. - if err != nil { - return -1, err - } - size = st.Size() - pos - case *Object: - var st ObjectInfo - st, err = v.Stat() - if err != nil { - return - } - var pos int64 - pos, err = v.Seek(0, 1) // SeekCurrent. - if err != nil { - return -1, err - } - size = st.Size - pos + numThreads = totalWorkers + } + return +} + +// Header - constructs the headers from metadata entered by user in +// PutObjectOptions struct +func (opts PutObjectOptions) Header() (header http.Header) { + header = make(http.Header) + + if opts.ContentType != "" { + header["Content-Type"] = []string{opts.ContentType} + } else { + header["Content-Type"] = []string{"application/octet-stream"} + } + if opts.ContentEncoding != "" { + header["Content-Encoding"] = []string{opts.ContentEncoding} + } + if opts.ContentDisposition != "" { + header["Content-Disposition"] = []string{opts.ContentDisposition} + } + if opts.CacheControl != "" { + header["Cache-Control"] = []string{opts.CacheControl} + } + if opts.EncryptMaterials != nil { + header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()} + header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()} + header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()} + } + for k, v := range opts.UserMetadata { + if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) { + header["X-Amz-Meta-"+k] = []string{v} + } else { + header[k] = []string{v} } } - // Returns the size here. - return size, err + return +} + +// validate() checks if the UserMetadata map has standard headers or client side +// encryption headers and raises an error if so. +func (opts PutObjectOptions) validate() (err error) { + for k := range opts.UserMetadata { + if isStandardHeader(k) || isCSEHeader(k) { + return ErrInvalidArgument(k + " unsupported request parameter for user defined metadata") + } + } + return nil } // completedParts is a collection of parts sortable by their part numbers. @@ -152,40 +117,12 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // - For size input as -1 PutObject does a multipart Put operation // until input stream reaches EOF. Maximum object size that can // be uploaded through this operation will be 5TiB. -func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { - return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{ - "Content-Type": []string{contentType}, - }, nil) +func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (n int64, err error) { + return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts) } -// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject() -// but takes the size argument explicitly, this function avoids doing reflection -// internally to figure out the size of input stream. Also if the input size is -// lesser than 0 this function returns an error. -func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { - return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress) -} - -// PutObjectWithMetadata using AWS streaming signature V4 -func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { - return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress) -} - -// PutObjectWithProgress using AWS streaming signature V4 -func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { - // Size of the object. - var size int64 - - // Get reader size. - size, err = getReaderSize(reader) - if err != nil { - return 0, err - } - - return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress) -} - -func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { // Check for largest object size allowed. if size > int64(maxMultipartPutObjectSize) { return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) @@ -194,30 +131,27 @@ func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, // NOTE: Streaming signature is not supported by GCS. if s3utils.IsGoogleEndpoint(c.endpointURL) { // Do not compute MD5 for Google Cloud Storage. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } if c.overrideSignerType.IsV2() { if size >= 0 && size < minPartSize { - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } - return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) } - if size < 0 { - return c.putObjectMultipartStreamNoLength(bucketName, objectName, reader, metadata, progress) + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } if size < minPartSize { - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) } - // For all sizes greater than 64MiB do multipart. - return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) } -func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, reader io.Reader, metadata map[string][]string, - progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -238,16 +172,15 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, if err != nil { return 0, err } - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metadata) + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return 0, err } defer func() { if err != nil { - c.abortMultipartUpload(bucketName, objectName, uploadID) + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() @@ -263,21 +196,20 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, for partNumber <= totalPartsCount { length, rErr := io.ReadFull(reader, buf) - if rErr == io.EOF { + if rErr == io.EOF && partNumber > 1 { break } if rErr != nil && rErr != io.ErrUnexpectedEOF { return 0, rErr } - // Update progress reader appropriately to the latest offset // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), progress) + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Proceed to upload the part. var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber, - nil, nil, int64(length), metadata) + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + "", "", int64(length), opts.UserMetadata) if err != nil { return totalUploadedSize, err } @@ -313,7 +245,7 @@ func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil { + if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { return totalUploadedSize, err } diff --git a/vendor/github.com/minio/minio-go/api-put-object_test.go b/vendor/github.com/minio/minio-go/api-put-object_test.go new file mode 100644 index 000000000..e0557e293 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object_test.go @@ -0,0 +1,53 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package minio + +import ( + "testing" +) + +func TestPutObjectOptionsValidate(t *testing.T) { + testCases := []struct { + metadata map[string]string + shouldPass bool + }{ + {map[string]string{"Content-Type": "custom/content-type"}, false}, + {map[string]string{"content-type": "custom/content-type"}, false}, + {map[string]string{"Content-Encoding": "gzip"}, false}, + {map[string]string{"Cache-Control": "blah"}, false}, + {map[string]string{"Content-Disposition": "something"}, false}, + {map[string]string{"my-custom-header": "blah"}, true}, + {map[string]string{"X-Amz-Iv": "blah"}, false}, + {map[string]string{"X-Amz-Key": "blah"}, false}, + {map[string]string{"X-Amz-Key-prefixed-header": "blah"}, false}, + {map[string]string{"custom-X-Amz-Key-middle": "blah"}, true}, + {map[string]string{"my-custom-header-X-Amz-Key": "blah"}, true}, + {map[string]string{"X-Amz-Matdesc": "blah"}, false}, + {map[string]string{"blah-X-Amz-Matdesc": "blah"}, true}, + {map[string]string{"X-Amz-MatDesc-suffix": "blah"}, true}, + {map[string]string{"x-amz-meta-X-Amz-Iv": "blah"}, false}, + {map[string]string{"x-amz-meta-X-Amz-Key": "blah"}, false}, + {map[string]string{"x-amz-meta-X-Amz-Matdesc": "blah"}, false}, + } + for i, testCase := range testCases { + err := PutObjectOptions{UserMetadata: testCase.metadata}.validate() + + if testCase.shouldPass && err != nil { + t.Errorf("Test %d - output did not match with reference results", i+1) + } + } +} diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go index 3574cbc1a..f14b2eb7f 100644 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +19,7 @@ package minio import ( "bytes" + "context" "encoding/xml" "io" "net/http" @@ -36,9 +38,9 @@ func (c Client) RemoveBucket(bucketName string) error { return err } // Execute DELETE on bucket. - resp, err := c.executeMethod("DELETE", requestMetadata{ - bucketName: bucketName, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { @@ -66,10 +68,10 @@ func (c Client) RemoveObject(bucketName, objectName string) error { return err } // Execute DELETE on objectName. - resp, err := c.executeMethod("DELETE", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { @@ -187,13 +189,13 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan // Generate remove multi objects XML request removeBytes := generateRemoveMultiObjectsRequest(batch) // Execute GET on bucket to list objects. - resp, err := c.executeMethod("POST", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(removeBytes), - contentLength: int64(len(removeBytes)), - contentMD5Bytes: sumMD5(removeBytes), - contentSHA256Bytes: sum256(removeBytes), + resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), }) if err != nil { for _, b := range batch { @@ -227,7 +229,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { } if uploadID != "" { // Upload id found, abort the incomplete multipart upload. - err := c.abortMultipartUpload(bucketName, objectName, uploadID) + err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) if err != nil { return err } @@ -237,7 +239,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // abortMultipartUpload aborts a multipart upload for the given // uploadID, all previously uploaded parts are deleted. -func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { +func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return err @@ -251,11 +253,11 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er urlValues.Set("uploadId", uploadID) // Execute DELETE on multipart upload. - resp, err := c.executeMethod("DELETE", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go index 4b297407b..8d8880c05 100644 --- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -128,7 +129,7 @@ type initiator struct { // copyObjectResult container for copy object response. type copyObjectResult struct { ETag string - LastModified string // time string format "2006-01-02T15:04:05.000Z" + LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" } // ObjectPart container for particular part of an object. diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go index 5f06bfc9e..fea112e60 100644 --- a/vendor/github.com/minio/minio-go/api-stat.go +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ package minio import ( + "context" "net/http" "strconv" "strings" @@ -33,9 +35,9 @@ func (c Client) BucketExists(bucketName string) (bool, error) { } // Execute HEAD on bucketName. - resp, err := c.executeMethod("HEAD", requestMetadata{ - bucketName: bucketName, - contentSHA256Bytes: emptySHA256, + resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, }) defer closeResponse(resp) if err != nil { @@ -80,7 +82,7 @@ func extractObjMetadata(header http.Header) http.Header { } // StatObject verifies if object exists and you have permission to access. -func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { +func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err @@ -88,12 +90,11 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { if err := s3utils.CheckValidObjectName(objectName); err != nil { return ObjectInfo{}, err } - reqHeaders := NewHeadReqHeaders() - return c.statObject(bucketName, objectName, reqHeaders) + return c.statObject(bucketName, objectName, opts) } // Lower level API for statObject supporting pre-conditions and range headers. -func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) { +func (c Client) statObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectInfo{}, err @@ -102,17 +103,12 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead return ObjectInfo{}, err } - customHeader := make(http.Header) - for k, v := range reqHeaders.Header { - customHeader[k] = v - } - // Execute HEAD on objectName. - resp, err := c.executeMethod("HEAD", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - contentSHA256Bytes: emptySHA256, - customHeader: customHeader, + resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + customHeader: opts.Header(), }) defer closeResponse(resp) if err != nil { diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 946a58869..2d83af063 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2015, 2016, 2017 Minio, Inc. + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,10 +19,9 @@ package minio import ( "bytes" + "context" "crypto/md5" "crypto/sha256" - "encoding/base64" - "encoding/hex" "errors" "fmt" "hash" @@ -87,7 +86,7 @@ type Client struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "3.0.3" + libraryVersion = "4.0.3" ) // User Agent should always following the below style. @@ -178,18 +177,6 @@ func (r *lockedRandSource) Seed(seed int64) { r.lk.Unlock() } -// redirectHeaders copies all headers when following a redirect URL. -// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800) -func redirectHeaders(req *http.Request, via []*http.Request) error { - if len(via) == 0 { - return nil - } - for key, val := range via[0].Header { - req.Header[key] = val - } - return nil -} - // getRegionFromURL - parse region from URL if present. func getRegionFromURL(u url.URL) (region string) { region = "" @@ -236,8 +223,7 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re // Instantiate http client and bucket location cache. clnt.httpClient = &http.Client{ - Transport: defaultMinioTransport, - CheckRedirect: redirectHeaders, + Transport: defaultMinioTransport, } // Sets custom region, if region is empty bucket location cache is used automatically. @@ -356,11 +342,11 @@ type requestMetadata struct { expires int64 // Generated by our internal code. - bucketLocation string - contentBody io.Reader - contentLength int64 - contentSHA256Bytes []byte - contentMD5Bytes []byte + bucketLocation string + contentBody io.Reader + contentLength int64 + contentMD5Base64 string // carries base64 encoded md5sum + contentSHA256Hex string // carries hex encoded sha256sum } // dumpHTTP - dump HTTP request and response. @@ -494,9 +480,11 @@ var successStatus = []int{ // executeMethod - instantiates a given method, and retries the // request upon any error up to maxRetries attempts in a binomially // delayed manner using a standard back off algorithm. -func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) { +func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { var isRetryable bool // Indicates if request can be retried. var bodySeeker io.Seeker // Extracted seeker from io.Reader. + var reqRetry = MaxRetry // Indicates how many times we can retry the request + if metadata.contentBody != nil { // Check if body is seekable then it is retryable. bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) @@ -504,6 +492,11 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt case os.Stdin, os.Stdout, os.Stderr: isRetryable = false } + // Retry only when reader is seekable + if !isRetryable { + reqRetry = 1 + } + // Figure out if the body can be closed - if yes // we will definitely close it upon the function // return. @@ -522,7 +515,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt // Blank indentifier is kept here on purpose since 'range' without // blank identifiers is only supported since go1.4 // https://golang.org/doc/go1.4#forrange. - for range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { + for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { // Retry executes the following function body if request has an // error until maxRetries have been exhausted, retry attempts are // performed after waiting for a given period of time in a @@ -545,6 +538,8 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt } return nil, err } + // Add context to request + req = req.WithContext(ctx) // Initiate the request. res, err = c.do(req) @@ -720,8 +715,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R } // set md5Sum for content protection. - if metadata.contentMD5Bytes != nil { - req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) + if len(metadata.contentMD5Base64) > 0 { + req.Header.Set("Content-Md5", metadata.contentMD5Base64) } // For anonymous requests just return. @@ -742,8 +737,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R default: // Set sha256 sum for signature calculation only with signature version '4'. shaHeader := unsignedPayload - if len(metadata.contentSHA256Bytes) > 0 { - shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes) + if metadata.contentSHA256Hex != "" { + shaHeader = metadata.contentSHA256Hex } req.Header.Set("X-Amz-Content-Sha256", shaHeader) diff --git a/vendor/github.com/minio/minio-go/api_unit_test.go b/vendor/github.com/minio/minio-go/api_unit_test.go index f15a6eed3..96fd8dd92 100644 --- a/vendor/github.com/minio/minio-go/api_unit_test.go +++ b/vendor/github.com/minio/minio-go/api_unit_test.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2015, 2016, 2017 Minio, Inc. + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +18,8 @@ package minio import ( - "bytes" - "io" - "io/ioutil" "net/http" "net/url" - "os" - "strings" "testing" "github.com/minio/minio-go/pkg/credentials" @@ -41,129 +36,6 @@ func (c *customReader) Size() (n int64) { return 10 } -// Tests getReaderSize() for various Reader types. -func TestGetReaderSize(t *testing.T) { - var reader io.Reader - size, err := getReaderSize(reader) - if err != nil { - t.Fatal("Error:", err) - } - if size != -1 { - t.Fatal("Reader shouldn't have any length.") - } - - bytesReader := bytes.NewReader([]byte("Hello World")) - size, err = getReaderSize(bytesReader) - if err != nil { - t.Fatal("Error:", err) - } - if size != int64(len("Hello World")) { - t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World")) - } - - size, err = getReaderSize(new(customReader)) - if err != nil { - t.Fatal("Error:", err) - } - if size != int64(10) { - t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10) - } - - stringsReader := strings.NewReader("Hello World") - size, err = getReaderSize(stringsReader) - if err != nil { - t.Fatal("Error:", err) - } - if size != int64(len("Hello World")) { - t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World")) - } - - // Create request channel. - reqCh := make(chan getRequest, 1) - // Create response channel. - resCh := make(chan getResponse, 1) - // Create done channel. - doneCh := make(chan struct{}) - - objectInfo := ObjectInfo{Size: 10} - // Create the first request. - firstReq := getRequest{ - isReadOp: false, // Perform only a HEAD object to get objectInfo. - isFirstReq: true, - } - // Create the expected response. - firstRes := getResponse{ - objectInfo: objectInfo, - } - // Send the expected response. - resCh <- firstRes - - // Test setting size on the first request. - objectReaderFirstReq := newObject(reqCh, resCh, doneCh) - defer objectReaderFirstReq.Close() - // Not checking the response here...just that the reader size is correct. - _, err = objectReaderFirstReq.doGetRequest(firstReq) - if err != nil { - t.Fatal("Error:", err) - } - - // Validate that the reader size is the objectInfo size. - size, err = getReaderSize(objectReaderFirstReq) - if err != nil { - t.Fatal("Error:", err) - } - if size != int64(10) { - t.Fatalf("Reader length doesn't match got: %d, wanted %d", size, objectInfo.Size) - } - - fileReader, err := ioutil.TempFile(os.TempDir(), "prefix") - if err != nil { - t.Fatal("Error:", err) - } - defer fileReader.Close() - defer os.RemoveAll(fileReader.Name()) - - size, err = getReaderSize(fileReader) - if err != nil { - t.Fatal("Error:", err) - } - if size == -1 { - t.Fatal("Reader length for file cannot be -1.") - } - - // Verify for standard input, output and error file descriptors. - size, err = getReaderSize(os.Stdin) - if err != nil { - t.Fatal("Error:", err) - } - if size != -1 { - t.Fatal("Stdin should have length of -1.") - } - size, err = getReaderSize(os.Stdout) - if err != nil { - t.Fatal("Error:", err) - } - if size != -1 { - t.Fatal("Stdout should have length of -1.") - } - size, err = getReaderSize(os.Stderr) - if err != nil { - t.Fatal("Error:", err) - } - if size != -1 { - t.Fatal("Stderr should have length of -1.") - } - file, err := os.Open(os.TempDir()) - if err != nil { - t.Fatal("Error:", err) - } - defer file.Close() - _, err = getReaderSize(file) - if err == nil { - t.Fatal("Input file as directory should throw an error.") - } -} - // Tests get region from host URL. func TestGetRegionFromURL(t *testing.T) { testCases := []struct { @@ -352,7 +224,7 @@ func TestMakeTargetURL(t *testing.T) { // Test 6 {"localhost:9000", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject"}, nil}, // Test 7, testing with query - {"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": []string{"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil}, + {"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": {"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil}, // Test 8, testing with port 80 {"localhost:80", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "http", Path: "/mybucket/myobject"}, nil}, // Test 9, testing with port 443 diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go index 3ad06da3a..5d56cdf42 100644 --- a/vendor/github.com/minio/minio-go/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/bucket-cache.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2015, 2016, 2017 Minio, Inc. + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ package minio import ( - "encoding/hex" "net/http" "net/url" "path" @@ -209,11 +208,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro } // Set sha256 sum for signature calculation only with signature version '4'. - var contentSha256 string + contentSha256 := emptySHA256Hex if c.secure { contentSha256 = unsignedPayload - } else { - contentSha256 = hex.EncodeToString(sum256([]byte{})) } req.Header.Set("X-Amz-Content-Sha256", contentSha256) diff --git a/vendor/github.com/minio/minio-go/bucket-cache_test.go b/vendor/github.com/minio/minio-go/bucket-cache_test.go index 6ae4e7be4..fd7e7f344 100644 --- a/vendor/github.com/minio/minio-go/bucket-cache_test.go +++ b/vendor/github.com/minio/minio-go/bucket-cache_test.go @@ -1,6 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2015, 2016, 2017 Minio, Inc. + * Copyright + * 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ package minio import ( "bytes" - "encoding/hex" "encoding/xml" "io/ioutil" "net/http" @@ -116,11 +115,9 @@ func TestGetBucketLocationRequest(t *testing.T) { // with signature version '4'. switch { case signerType.IsV4(): - var contentSha256 string + contentSha256 := emptySHA256Hex if c.secure { contentSha256 = unsignedPayload - } else { - contentSha256 = hex.EncodeToString(sum256([]byte{})) } req.Header.Set("X-Amz-Content-Sha256", contentSha256) req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go index 5ac52e5f7..1b9d6a0c7 100644 --- a/vendor/github.com/minio/minio-go/bucket-notification.go +++ b/vendor/github.com/minio/minio-go/bucket-notification.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go index 9771d2f92..b5945e7fa 100644 --- a/vendor/github.com/minio/minio-go/constants.go +++ b/vendor/github.com/minio/minio-go/constants.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -50,7 +51,7 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 const unsignedPayload = "UNSIGNED-PAYLOAD" // Total number of parallel workers used for multipart operation. -var totalWorkers = 3 +const totalWorkers = 4 // Signature related constants. const ( diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go index 4b1054a69..809653601 100644 --- a/vendor/github.com/minio/minio-go/core.go +++ b/vendor/github.com/minio/minio-go/core.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,7 +18,9 @@ package minio import ( + "context" "io" + "strings" "github.com/minio/minio-go/pkg/policy" ) @@ -52,14 +55,35 @@ func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys) } -// PutObject - Upload object. Uploads using single PUT call. -func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) { - return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata) +// CopyObject - copies an object from source object to destination object on server side. +func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { + return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata) } -// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID. -func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) { - result, err := c.initiateMultipartUpload(bucket, object, metadata) +// PutObject - Upload object. Uploads using single PUT call. +func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) { + opts := PutObjectOptions{} + m := make(map[string]string) + for k, v := range metadata { + if strings.ToLower(k) == "content-encoding" { + opts.ContentEncoding = v + } else if strings.ToLower(k) == "content-disposition" { + opts.ContentDisposition = v + } else if strings.ToLower(k) == "content-type" { + opts.ContentType = v + } else if strings.ToLower(k) == "cache-control" { + opts.CacheControl = v + } else { + m[k] = metadata[k] + } + } + opts.UserMetadata = m + return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts) +} + +// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. +func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts) return result.UploadID, err } @@ -69,14 +93,14 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de } // PutObjectPart - Upload an object part. -func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) { - return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil) +func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) { + return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil) } // PutObjectPartWithMetadata - upload an object part with additional request metadata. -func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, - size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) { - return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata) +func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader, + size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) { + return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata) } // ListObjectParts - List uploaded parts of an incomplete upload.x @@ -86,7 +110,7 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error { - _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{ + _, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ Parts: parts, }) return err @@ -94,7 +118,7 @@ func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []C // AbortMultipartUpload - Abort an incomplete upload. func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { - return c.abortMultipartUpload(bucket, object, uploadID) + return c.abortMultipartUpload(context.Background(), bucket, object, uploadID) } // GetBucketPolicy - fetches bucket access policy for a given bucket. @@ -110,12 +134,12 @@ func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPol // GetObject is a lower level API implemented to support reading // partial objects and also downloading objects with special conditions // matching etag, modtime etc. -func (c Core) GetObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { - return c.getObject(bucketName, objectName, reqHeaders) +func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { + return c.getObject(context.Background(), bucketName, objectName, opts) } // StatObject is a lower level API implemented to support special // conditions matching etag, modtime on a request. -func (c Core) StatObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) { - return c.statObject(bucketName, objectName, reqHeaders) +func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + return c.statObject(bucketName, objectName, opts) } diff --git a/vendor/github.com/minio/minio-go/core_test.go b/vendor/github.com/minio/minio-go/core_test.go index 8cadc251b..253a25892 100644 --- a/vendor/github.com/minio/minio-go/core_test.go +++ b/vendor/github.com/minio/minio-go/core_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +26,6 @@ import ( "testing" "time" - "crypto/md5" "math/rand" ) @@ -103,7 +103,9 @@ func TestGetObjectCore(t *testing.T) { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{ + ContentType: "binary/octet-stream", + }) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -112,8 +114,6 @@ func TestGetObjectCore(t *testing.T) { t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) } - reqHeaders := NewGetReqHeaders() - offset := int64(2048) // read directly @@ -122,8 +122,9 @@ func TestGetObjectCore(t *testing.T) { buf3 := make([]byte, n) buf4 := make([]byte, 1) - reqHeaders.SetRange(offset, offset+int64(len(buf1))-1) - reader, objectInfo, err := c.GetObject(bucketName, objectName, reqHeaders) + opts := GetObjectOptions{} + opts.SetRange(offset, offset+int64(len(buf1))-1) + reader, objectInfo, err := c.GetObject(bucketName, objectName, opts) if err != nil { t.Fatal(err) } @@ -141,8 +142,8 @@ func TestGetObjectCore(t *testing.T) { } offset += 512 - reqHeaders.SetRange(offset, offset+int64(len(buf2))-1) - reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders) + opts.SetRange(offset, offset+int64(len(buf2))-1) + reader, objectInfo, err = c.GetObject(bucketName, objectName, opts) if err != nil { t.Fatal(err) } @@ -160,8 +161,8 @@ func TestGetObjectCore(t *testing.T) { t.Fatal("Error: Incorrect read between two GetObject from same offset.") } - reqHeaders.SetRange(0, int64(len(buf3))) - reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders) + opts.SetRange(0, int64(len(buf3))) + reader, objectInfo, err = c.GetObject(bucketName, objectName, opts) if err != nil { t.Fatal(err) } @@ -180,9 +181,9 @@ func TestGetObjectCore(t *testing.T) { t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.") } - reqHeaders = NewGetReqHeaders() - reqHeaders.SetMatchETag("etag") - _, _, err = c.GetObject(bucketName, objectName, reqHeaders) + opts = GetObjectOptions{} + opts.SetMatchETag("etag") + _, _, err = c.GetObject(bucketName, objectName, opts) if err == nil { t.Fatal("Unexpected GetObject should fail with mismatching etags") } @@ -190,9 +191,9 @@ func TestGetObjectCore(t *testing.T) { t.Fatalf("Expected \"PreconditionFailed\" as code, got %s instead", errResp.Code) } - reqHeaders = NewGetReqHeaders() - reqHeaders.SetMatchETagExcept("etag") - reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders) + opts = GetObjectOptions{} + opts.SetMatchETagExcept("etag") + reader, objectInfo, err = c.GetObject(bucketName, objectName, opts) if err != nil { t.Fatal(err) } @@ -210,9 +211,9 @@ func TestGetObjectCore(t *testing.T) { t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.") } - reqHeaders = NewGetReqHeaders() - reqHeaders.SetRange(0, 0) - reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders) + opts = GetObjectOptions{} + opts.SetRange(0, 0) + reader, objectInfo, err = c.GetObject(bucketName, objectName, opts) if err != nil { t.Fatal(err) } @@ -275,12 +276,12 @@ func TestGetObjectContentEncoding(t *testing.T) { // Generate data more than 32K buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024) - m := make(map[string][]string) - m["Content-Encoding"] = []string{"gzip"} // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.Client.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), m, nil) + n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{ + ContentEncoding: "gzip", + }) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -289,8 +290,7 @@ func TestGetObjectContentEncoding(t *testing.T) { t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) } - reqHeaders := NewGetReqHeaders() - rwc, objInfo, err := c.GetObject(bucketName, objectName, reqHeaders) + rwc, objInfo, err := c.GetObject(bucketName, objectName, GetObjectOptions{}) if err != nil { t.Fatalf("Error: %v", err) } @@ -370,6 +370,120 @@ func TestGetBucketPolicy(t *testing.T) { } } +// Tests Core CopyObject API implementation. +func TestCoreCopyObject(t *testing.T) { + if testing.Short() { + t.Skip("skipping functional tests for short runs") + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := NewCore( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableSecurity)), + ) + if err != nil { + t.Fatal("Error:", err) + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + t.Fatal("Error:", err, bucketName) + } + + buf := bytes.Repeat([]byte("a"), 32*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if objInfo.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + cobjInfo, err := c.CopyObject(bucketName, objectName, destBucketName, destObjectName, map[string]string{ + "X-Amz-Metadata-Directive": "REPLACE", + "Content-Type": "application/javascript", + }) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName, destBucketName, destObjectName) + } + if cobjInfo.ETag != objInfo.ETag { + t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, cobjInfo.ETag) + } + + // Attempt to read from destBucketName and object name. + r, err := c.Client.GetObject(destBucketName, destObjectName, GetObjectOptions{}) + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + st, err := r.Stat() + if err != nil { + t.Fatal("Error:", err, bucketName, objectName) + } + + if st.Size != int64(len(buf)) { + t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", + len(buf), st.Size) + } + + if st.ContentType != "application/javascript" { + t.Fatalf("Error: Content types don't match, expected: application/javascript, found: %+v\n", st.ContentType) + } + + if st.ETag != objInfo.ETag { + t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, st.ETag) + } + + if err := r.Close(); err != nil { + t.Fatal("Error:", err) + } + + if err := r.Close(); err == nil { + t.Fatal("Error: object is already closed, should return error") + } + + err = c.RemoveObject(bucketName, objectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveObject(destBucketName, destObjectName) + if err != nil { + t.Fatal("Error: ", err) + } + + err = c.RemoveBucket(bucketName) + if err != nil { + t.Fatal("Error:", err) + } + + // Do not need to remove destBucketName its same as bucketName. +} + // Test Core PutObject. func TestCorePutObject(t *testing.T) { if testing.Short() { @@ -405,21 +519,21 @@ func TestCorePutObject(t *testing.T) { t.Fatal("Error:", err, bucketName) } - buf := bytes.Repeat([]byte("a"), minPartSize) + buf := bytes.Repeat([]byte("a"), 32*1024) // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") // Object content type objectContentType := "binary/octet-stream" - metadata := make(map[string][]string) - metadata["Content-Type"] = []string{objectContentType} + metadata := make(map[string]string) + metadata["Content-Type"] = objectContentType - objInfo, err := c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), md5.New().Sum(nil), nil, metadata) + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "1B2M2Y8AsgTpgAmY7PhCfg==", "", metadata) if err == nil { - t.Fatal("Error expected: nil, got: ", err) + t.Fatal("Error expected: error, got: nil(success)") } - objInfo, err = c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), nil, nil, metadata) + objInfo, err = c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", metadata) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -429,7 +543,7 @@ func TestCorePutObject(t *testing.T) { } // Read the data back - r, err := c.Client.GetObject(bucketName, objectName) + r, err := c.Client.GetObject(bucketName, objectName, GetObjectOptions{}) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -490,18 +604,17 @@ func TestCoreGetObjectMetadata(t *testing.T) { t.Fatal("Error:", err, bucketName) } - metadata := map[string][]string{ - "X-Amz-Meta-Key-1": {"Val-1"}, + metadata := map[string]string{ + "X-Amz-Meta-Key-1": "Val-1", } - _, err = core.PutObject(bucketName, "my-objectname", 5, - bytes.NewReader([]byte("hello")), nil, nil, metadata) + _, err = core.PutObject(bucketName, "my-objectname", + bytes.NewReader([]byte("hello")), 5, "", "", metadata) if err != nil { log.Fatalln(err) } - reader, objInfo, err := core.GetObject(bucketName, "my-objectname", - RequestHeaders{}) + reader, objInfo, err := core.GetObject(bucketName, "my-objectname", GetObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/docs/API.md b/vendor/github.com/minio/minio-go/docs/API.md index bfdd42db6..d1026ee22 100644 --- a/vendor/github.com/minio/minio-go/docs/API.md +++ b/vendor/github.com/minio/minio-go/docs/API.md @@ -54,19 +54,20 @@ func main() { | :--- | :--- | :--- | :--- | :--- | :--- | | [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) | | [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) | -| [`BucketExists`](#BucketExists) | [`PutObjectStreaming`](#PutObjectStreaming) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) | -| [`RemoveBucket`](#RemoveBucket) | [`CopyObject`](#CopyObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) | -| [`ListObjects`](#ListObjects) | [`StatObject`](#StatObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) | -| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObject`](#RemoveObject) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | | -| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveObjects`](#RemoveObjects) | | | [`ListenBucketNotification`](#ListenBucketNotification) | | -| | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | | | +| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) | +| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) | +| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) | +| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`FPutEncryptedObject`](#FPutEncryptedObject) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | | +| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | | | | [`FPutObject`](#FPutObject) | | | | | | | [`FGetObject`](#FGetObject) | | | | | | | [`ComposeObject`](#ComposeObject) | | | | | | | [`NewSourceInfo`](#NewSourceInfo) | | | | | | | [`NewDestinationInfo`](#NewDestinationInfo) | | | | | - - +| | [`PutObjectWithContext`](#PutObjectWithContext) | | | | +| | [`GetObjectWithContext`](#GetObjectWithContext) | | | | +| | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | | +| | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | | ## 1. Constructor @@ -83,7 +84,7 @@ __Parameters__ |`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise | ### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error) -Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when if your application deals with single region. +Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when your application deals with a single region. __Parameters__ @@ -122,7 +123,7 @@ __Example__ ```go -err := minioClient.MakeBucket("mybucket", "us-east-1") +err = minioClient.MakeBucket("mybucket", "us-east-1") if err != nil { fmt.Println(err) return @@ -132,15 +133,16 @@ fmt.Println("Successfully created mybucket.") ### ListBuckets() ([]BucketInfo, error) - Lists all buckets. | Param | Type | Description | |---|---|---| -|`bucketList` | _[]BucketInfo_ | Lists of all buckets | +|`bucketList` | _[]minio.BucketInfo_ | Lists of all buckets | -| Param | Type | Description | +__minio.BucketInfo__ + +| Field | Type | Description | |---|---|---| |`bucket.Name` | _string_ | Name of the bucket | |`bucket.CreationDate` | _time.Time_ | Date of bucket creation | @@ -151,7 +153,7 @@ __Example__ ```go buckets, err := minioClient.ListBuckets() - if err != nil { +if err != nil { fmt.Println(err) return } @@ -162,7 +164,6 @@ for _, bucket := range buckets { ### BucketExists(bucketName string) (found bool, err error) - Checks if a bucket exists. __Parameters__ @@ -197,8 +198,7 @@ if found { ### RemoveBucket(bucketName string) error - -Removes a bucket. +Removes a bucket, bucket should be empty to be successfully removed. __Parameters__ @@ -211,7 +211,7 @@ __Example__ ```go -err := minioClient.RemoveBucket("mybucket") +err = minioClient.RemoveBucket("mybucket") if err != nil { fmt.Println(err) return @@ -220,7 +220,6 @@ if err != nil { ### ListObjects(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo - Lists objects in a bucket. __Parameters__ @@ -238,9 +237,11 @@ __Return Value__ |Param |Type |Description | |:---|:---| :---| -|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: | +|`objectInfo` | _chan minio.ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: | -|Param |Type |Description | +__minio.ObjectInfo__ + +|Field |Type |Description | |:---|:---| :---| |`objectInfo.Key` | _string_ |Name of the object | |`objectInfo.Size` | _int64_ |Size of the object | @@ -269,7 +270,6 @@ for object := range objectCh { ### ListObjectsV2(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo - Lists objects in a bucket using the recommended listing API v2 __Parameters__ @@ -287,14 +287,7 @@ __Return Value__ |Param |Type |Description | |:---|:---| :---| -|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: | - -|Param |Type |Description | -|:---|:---| :---| -|`objectInfo.Key` | _string_ |Name of the object | -|`objectInfo.Size` | _int64_ |Size of the object | -|`objectInfo.ETag` | _string_ |MD5 checksum of the object | -|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified | +|`objectInfo` | _chan minio.ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: | ```go @@ -317,7 +310,6 @@ for object := range objectCh { ### ListIncompleteUploads(bucketName, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo - Lists partially uploaded objects in a bucket. @@ -336,11 +328,11 @@ __Return Value__ |Param |Type |Description | |:---|:---| :---| -|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |Emits multipart objects of the format listed below: | +|`multiPartInfo` | _chan minio.ObjectMultipartInfo_ |Emits multipart objects of the format listed below: | -__Return Value__ +__minio.ObjectMultipartInfo__ -|Param |Type |Description | +|Field |Type |Description | |:---|:---| :---| |`multiPartObjInfo.Key` | _string_ |Name of incompletely uploaded object | |`multiPartObjInfo.UploadID` | _string_ |Upload ID of incompletely uploaded object | @@ -370,8 +362,7 @@ for multiPartObject := range multiPartObjectCh { ## 3. Object operations -### GetObject(bucketName, objectName string) (*Object, error) - +### GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) Returns a stream of the object data. Most of the common errors occur when reading the stream. @@ -382,8 +373,15 @@ __Parameters__ |:---|:---| :---| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | +|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match | +__minio.GetObjectOptions__ + +|Field | Type | Description | +|:---|:---|:---| +| `opts.Materials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | + __Return Value__ @@ -396,7 +394,7 @@ __Example__ ```go -object, err := minioClient.GetObject("mybucket", "photo.jpg") +object, err := minioClient.GetObject("mybucket", "myobject", minio.GetObjectOptions{}) if err != nil { fmt.Println(err) return @@ -413,9 +411,8 @@ if _, err = io.Copy(localFile, object); err != nil { ``` -### FGetObject(bucketName, objectName, filePath string) error - Downloads and saves the object as a file in the local filesystem. - +### FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error +Downloads and saves the object as a file in the local filesystem. __Parameters__ @@ -425,13 +422,127 @@ __Parameters__ |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`filePath` | _string_ |Path to download object to | +|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match | __Example__ ```go -err := minioClient.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg") +err = minioClient.FGetObject("mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{}) +if err != nil { + fmt.Println(err) + return +} +``` + +### GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) +Identical to GetObject operation, but accepts a context for request cancellation. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ |Request context | +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match | + + +__Return Value__ + + +|Param |Type |Description | +|:---|:---| :---| +|`object` | _*minio.Object_ |_minio.Object_ represents object reader. It implements io.Reader, io.Seeker, io.ReaderAt and io.Closer interfaces. | + + +__Example__ + + +```go +ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second) +defer cancel() + +object, err := minioClient.GetObjectWithContext(ctx, "mybucket", "myobject", minio.GetObjectOptions{}) +if err != nil { + fmt.Println(err) + return +} + +localFile, err := os.Create("/tmp/local-file.jpg") +if err != nil { + fmt.Println(err) + return +} + +if _, err = io.Copy(localFile, object); err != nil { + fmt.Println(err) + return +} +``` + + +### FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error +Identical to FGetObject operation, but allows request cancellation. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ |Request context | +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`filePath` | _string_ |Path to download object to | +|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match | + + +__Example__ + + +```go +ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second) +defer cancel() + +err = minioClient.FGetObjectWithContext(ctx, "mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{}) +if err != nil { + fmt.Println(err) + return +} +``` + + +### FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error +Identical to FGetObject operation, but decrypts an encrypted request + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`filePath` | _string_ |Path to download object to | +|`materials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | + + +__Example__ + + +```go +// Generate a master symmetric key +key := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) + +// Build the CBC encryption material +cbcMaterials, err := encrypt.NewCBCSecureMaterials(key) +if err != nil { + fmt.Println(err) + return +} + +err = minioClient.FGetEncryptedObject("mybucket", "myobject", "/tmp/myobject", cbcMaterials) if err != nil { fmt.Println(err) return @@ -439,8 +550,7 @@ if err != nil { ``` -### PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int, err error) - +### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error) Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB. __Parameters__ @@ -451,7 +561,20 @@ __Parameters__ |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`reader` | _io.Reader_ |Any Go type that implements io.Reader | -|`contentType` | _string_ |Content type of the object | +|`objectSize`| _int64_ |Size of the object being uploaded. Pass -1 if stream size is unknown | +|`opts` | _minio.PutObjectOptions_ | Allows user to set optional custom metadata, content headers, encryption keys and number of threads for multipart upload operation. | + +__minio.PutObjectOptions__ + +|Field | Type | Description | +|:--- |:--- | :--- | +| `opts.UserMetadata` | _map[string]string_ | Map of user metadata| +| `opts.Progress` | _io.Reader_ | Reader to fetch progress of an upload | +| `opts.ContentType` | _string_ | Content type of object, e.g "application/text" | +| `opts.ContentEncoding` | _string_ | Content encoding of object, e.g "gzip" | +| `opts.ContentDisposition` | _string_ | Content disposition of object, "inline" | +| `opts.CacheControl` | _string_ | Used to specify directives for caching mechanisms in both requests and responses e.g "max-age=600"| +| `opts.EncryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | __Example__ @@ -465,69 +588,89 @@ if err != nil { } defer file.Close() -n, err := minioClient.PutObject("mybucket", "myobject", file, "application/octet-stream") +fileStat, err := file.Stat() if err != nil { fmt.Println(err) return } + +n, err := minioClient.PutObject("mybucket", "myobject", file, fileStat.Size(), minio.PutObjectOptions{ContentType:"application/octet-stream"}) +if err != nil { + fmt.Println(err) + return +} +fmt.Println("Successfully uploaded bytes: ", n) ``` - -### PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int, err error) - -Uploads an object as multiple chunks keeping memory consumption constant. It is similar to PutObject in how objects are broken into multiple parts. Each part in turn is transferred as multiple chunks with constant memory usage. However resuming previously failed uploads from where it was left is not supported. - - -__Parameters__ - - -|Param |Type |Description | -|:---|:---|:---| -|`bucketName` | _string_ |Name of the bucket | -|`objectName` | _string_ |Name of the object | -|`reader` | _io.Reader_ |Any Go type that implements io.Reader | - -__Example__ - - -```go -file, err := os.Open("my-testfile") -if err != nil { - fmt.Println(err) - return -} -defer file.Close() - -n, err := minioClient.PutObjectStreaming("mybucket", "myobject", file) -if err != nil { - fmt.Println(err) - return -} -``` - - - -### CopyObject(dst DestinationInfo, src SourceInfo) error - -Create or replace an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. See the `SourceInfo` and `DestinationInfo` types for further details. - -To copy multiple source objects into a single destination object see the `ComposeObject` API. +API methods PutObjectWithSize, PutObjectWithMetadata, PutObjectStreaming, and PutObjectWithProgress available in minio-go SDK release v3.0.3 are replaced by the new PutObject call variant that accepts a pointer to PutObjectOptions struct. + +### PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, opts PutObjectOptions) (n int, err error) +Identical to PutObject operation, but allows request cancellation. __Parameters__ |Param |Type |Description | |:---|:---| :---| -|`dst` | _DestinationInfo_ |Argument describing the destination object | -|`src` | _SourceInfo_ |Argument describing the source object | +|`ctx` | _context.Context_ |Request context | +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`reader` | _io.Reader_ |Any Go type that implements io.Reader | +|`objectSize`| _int64_ | size of the object being uploaded. Pass -1 if stream size is unknown | +|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | __Example__ ```go -// Use-case 1: Simple copy object with no conditions, etc +ctx, cancel := context.WithTimeout(context.Background(), 10 * time.Second) +defer cancel() + +file, err := os.Open("my-testfile") +if err != nil { + fmt.Println(err) + return +} +defer file.Close() + +fileStat, err := file.Stat() +if err != nil { + fmt.Println(err) + return +} + +n, err := minioClient.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", file, fileStat.Size(), minio.PutObjectOptions{ + ContentType: "application/octet-stream", +}) +if err != nil { + fmt.Println(err) + return +} +fmt.Println("Successfully uploaded bytes: ", n) +``` + + +### CopyObject(dst DestinationInfo, src SourceInfo) error +Create or replace an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. See the `SourceInfo` and `DestinationInfo` types for further details. + +To copy multiple source objects into a single destination object see the `ComposeObject` API. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`dst` | _minio.DestinationInfo_ |Argument describing the destination object | +|`src` | _minio.SourceInfo_ |Argument describing the source object | + + +__Example__ + + +```go +// Use-case 1: Simple copy object with no conditions. // Source object src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil) @@ -539,13 +682,16 @@ if err != nil { } // Copy object call -err = s3Client.CopyObject(dst, src) +err = minioClient.CopyObject(dst, src) if err != nil { fmt.Println(err) return } +``` -// Use-case 2: Copy object with copy-conditions, and copying only part of the source object. +```go +// Use-case 2: +// Copy object with copy-conditions, and copying only part of the source object. // 1. that matches a given ETag // 2. and modified after 1st April 2014 // 3. but unmodified since 23rd April 2014 @@ -574,7 +720,7 @@ if err != nil { } // Copy object call -err = s3Client.CopyObject(dst, src) +err = minioClient.CopyObject(dst, src) if err != nil { fmt.Println(err) return @@ -582,10 +728,8 @@ if err != nil { ``` -### ComposeObject(dst DestinationInfo, srcs []SourceInfo) error - -Create an object by concatenating a list of source objects using -server-side copying. +### ComposeObject(dst minio.DestinationInfo, srcs []minio.SourceInfo) error +Create an object by concatenating a list of source objects using server-side copying. __Parameters__ @@ -606,14 +750,14 @@ decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "") // Source objects to concatenate. We also specify decryption // key for each -src1 := minio.NewSourceInfo("bucket1", "object1", decKey) -src1.SetMatchETag("31624deb84149d2f8ef9c385918b653a") +src1 := minio.NewSourceInfo("bucket1", "object1", &decKey) +src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a") -src2 := minio.NewSourceInfo("bucket2", "object2", decKey) -src2.SetMatchETag("f8ef9c385918b653a31624deb84149d2") +src2 := minio.NewSourceInfo("bucket2", "object2", &decKey) +src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2") -src3 := minio.NewSourceInfo("bucket3", "object3", decKey) -src3.SetMatchETag("5918b653a31624deb84149d2f8ef9c38") +src3 := minio.NewSourceInfo("bucket3", "object3", &decKey) +src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38") // Create slice of sources. srcs := []minio.SourceInfo{src1, src2, src3} @@ -622,19 +766,24 @@ srcs := []minio.SourceInfo{src1, src2, src3} encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "") // Create destination info -dst := minio.NewDestinationInfo("bucket", "object", encKey, nil) -err = s3Client.ComposeObject(dst, srcs) +dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil) if err != nil { - log.Println(err) - return + fmt.Println(err) + return } -log.Println("Composed object successfully.") +// Compose object call by concatenating multiple source files. +err = minioClient.ComposeObject(dst, srcs) +if err != nil { + fmt.Println(err) + return +} + +fmt.Println("Composed object successfully.") ``` ### NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo - Construct a `SourceInfo` object that can be used as the source for server-side copying operations like `CopyObject` and `ComposeObject`. This object can be used to set copy-conditions on the source. __Parameters__ @@ -647,18 +796,47 @@ __Parameters__ __Example__ -``` go +```go // No decryption parameter. -src := NewSourceInfo("bucket", "object", nil) +src := minio.NewSourceInfo("bucket", "object", nil) +// Destination object +dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil) +if err != nil { + fmt.Println(err) + return +} + +// Copy object call +err = minioClient.CopyObject(dst, src) +if err != nil { + fmt.Println(err) + return +} +``` + +```go // With decryption parameter. -decKey := NewSSEKey([]byte{1,2,3}, "") -src := NewSourceInfo("bucket", "object", decKey) +decKey := minio.NewSSEInfo([]byte{1,2,3}, "") +src := minio.NewSourceInfo("bucket", "object", &decKey) + +// Destination object +dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil) +if err != nil { + fmt.Println(err) + return +} + +// Copy object call +err = minioClient.CopyObject(dst, src) +if err != nil { + fmt.Println(err) + return +} ``` ### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) (DestinationInfo, error) - Construct a `DestinationInfo` object that can be used as the destination object for server-side copying operations like `CopyObject` and `ComposeObject`. __Parameters__ @@ -672,24 +850,48 @@ __Parameters__ __Example__ -``` go +```go // No encryption parameter. -dst, err := NewDestinationInfo("bucket", "object", nil, nil) +src := minio.NewSourceInfo("bucket", "object", nil) +dst, err := minio.NewDestinationInfo("bucket", "object", nil, nil) +if err != nil { + fmt.Println(err) + return +} -// With encryption parameter. -encKey := NewSSEKey([]byte{1,2,3}, "") -dst, err := NewDecryptionInfo("bucket", "object", encKey, nil) +// Copy object call +err = minioClient.CopyObject(dst, src) +if err != nil { + fmt.Println(err) + return +} ``` +```go +src := minio.NewSourceInfo("bucket", "object", nil) + +// With encryption parameter. +encKey := minio.NewSSEInfo([]byte{1,2,3}, "") +dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil) +if err != nil { + fmt.Println(err) + return +} + +// Copy object call +err = minioClient.CopyObject(dst, src) +if err != nil { + fmt.Println(err) + return +} +``` -### FPutObject(bucketName, objectName, filePath, contentType string) (length int64, err error) - +### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error) Uploads contents from a file to objectName. FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB. - __Parameters__ @@ -698,25 +900,56 @@ __Parameters__ |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`filePath` | _string_ |Path to file to be uploaded | -|`contentType` | _string_ |Content type of the object | +|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | __Example__ ```go -n, err := minioClient.FPutObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", "application/csv") +n, err := minioClient.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ + ContentType: "application/csv", +}); if err != nil { fmt.Println(err) return } +fmt.Println("Successfully uploaded bytes: ", n) +``` + + +### FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error) +Identical to FPutObject operation, but allows request cancellation. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ |Request context | +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`filePath` | _string_ |Path to file to be uploaded | +|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. | + +__Example__ + + +```go +ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second) +defer cancel() + +n, err := minioClient.FPutObjectWithContext(ctx, "mybucket", "myobject.csv", "/tmp/otherobject.csv", minio.PutObjectOptions{ContentType:"application/csv"}) +if err != nil { + fmt.Println(err) + return +} +fmt.Println("Successfully uploaded bytes: ", n) ``` -### StatObject(bucketName, objectName string) (ObjectInfo, error) - -Gets metadata of an object. - +### StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) +Fetch metadata of an object. __Parameters__ @@ -725,16 +958,19 @@ __Parameters__ |:---|:---| :---| |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | +|`opts` | _minio.StatObjectOptions_ | Options for GET info/stat requests specifying additional options like encryption, If-Match | __Return Value__ |Param |Type |Description | |:---|:---| :---| -|`objInfo` | _ObjectInfo_ |Object stat information | +|`objInfo` | _minio.ObjectInfo_ |Object stat information | -|Param |Type |Description | +__minio.ObjectInfo__ + +|Field |Type |Description | |:---|:---| :---| |`objInfo.LastModified` | _time.Time_ |Time when object was last modified | |`objInfo.ETag` | _string_ |MD5 checksum of the object| @@ -742,11 +978,11 @@ __Return Value__ |`objInfo.Size` | _int64_ |Size of the object| - __Example__ +__Example__ ```go -objInfo, err := minioClient.StatObject("mybucket", "photo.jpg") +objInfo, err := minioClient.StatObject("mybucket", "myobject", minio.StatObjectOptions{}) if err != nil { fmt.Println(err) return @@ -756,10 +992,8 @@ fmt.Println(objInfo) ### RemoveObject(bucketName, objectName string) error - Removes an object. - __Parameters__ @@ -770,46 +1004,54 @@ __Parameters__ ```go -err := minioClient.RemoveObject("mybucket", "photo.jpg") +err = minioClient.RemoveObject("mybucket", "myobject") if err != nil { fmt.Println(err) return } ``` - -### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError -Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. -The errors observed are sent over the error channel. + +### RemoveObjects(bucketName string, objectsCh chan string) (errorCh <-chan RemoveObjectError) +Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. The errors observed are sent over the error channel. __Parameters__ |Param |Type |Description | |:---|:---| :---| |`bucketName` | _string_ |Name of the bucket | -|`objectsCh` | _chan string_ | Prefix of objects to be removed | +|`objectsCh` | _chan string_ | Channel of objects to be removed | __Return Values__ |Param |Type |Description | |:---|:---| :---| -|`errorCh` | _chan minio.RemoveObjectError | Channel of errors observed during deletion. | - +|`errorCh` | _<-chan minio.RemoveObjectError_ | Receive-only channel of errors observed during deletion. | ```go -errorCh := minioClient.RemoveObjects("mybucket", objectsCh) -for e := range errorCh { - fmt.Println("Error detected during deletion: " + e.Err.Error()) +objectsCh := make(chan string) + +// Send object names that are needed to be removed to objectsCh +go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + for object := range minioClient.ListObjects("my-bucketname", "my-prefixname", true, nil) { + if object.Err != nil { + log.Fatalln(object.Err) + } + objectsCh <- object.Key + } +}() + +for rErr := range minioClient.RemoveObjects("mybucket", objectsCh) { + fmt.Println("Error detected during deletion: ", rErr) } ``` - - ### RemoveIncompleteUpload(bucketName, objectName string) error - Removes a partially uploaded object. __Parameters__ @@ -824,7 +1066,7 @@ __Example__ ```go -err := minioClient.RemoveIncompleteUpload("mybucket", "photo.jpg") +err = minioClient.RemoveIncompleteUpload("mybucket", "myobject") if err != nil { fmt.Println(err) return @@ -834,7 +1076,7 @@ if err != nil { ## 4. Encrypted object operations -### NewSymmetricKey(key []byte) *minio.SymmetricKey +### NewSymmetricKey(key []byte) *encrypt.SymmetricKey __Parameters__ @@ -847,15 +1089,29 @@ __Return Value__ |Param |Type |Description | |:---|:---| :---| -|`symmetricKey` | _*minio.SymmetricKey_ |_minio.SymmetricKey_ represents a symmetric key structure which can be used to encrypt and decrypt data. | +|`symmetricKey` | _*encrypt.SymmetricKey_ | represents a symmetric key structure which can be used to encrypt and decrypt data | ```go -symKey := minio.NewSymmetricKey([]byte("my-secret-key-00")) +symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) + +// Build the CBC encryption material with symmetric key. +cbcMaterials, err := encrypt.NewCBCSecureMaterials(symKey) +if err != nil { + fmt.Println(err) + return +} +fmt.Println("Successfully initialized Symmetric key CBC materials", cbcMaterials) + +object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials) +if err != nil { + fmt.Println(err) + return +} +defer object.Close() ``` - -### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*minio.AsymmetricKey, error) +### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*encrypt.AsymmetricKey, error) __Parameters__ @@ -869,32 +1125,50 @@ __Return Value__ |Param |Type |Description | |:---|:---| :---| -|`asymmetricKey` | _*minio.AsymmetricKey_ | represents an asymmetric key structure which can be used to encrypt and decrypt data. | -|`err` | _error_ | encountered errors. | +|`asymmetricKey` | _*encrypt.AsymmetricKey_ | represents an asymmetric key structure which can be used to encrypt and decrypt data | +|`err` | _error_ | Standard Error | ```go privateKey, err := ioutil.ReadFile("private.key") if err != nil { - log.Fatal(err) + fmt.Println(err) + return } publicKey, err := ioutil.ReadFile("public.key") if err != nil { - log.Fatal(err) + fmt.Println(err) + return } // Initialize the asymmetric key -asymmetricKey, err := minio.NewAsymmetricKey(privateKey, publicKey) +asymmetricKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) if err != nil { - log.Fatal(err) + fmt.Println(err) + return } + +// Build the CBC encryption material for asymmetric key. +cbcMaterials, err := encrypt.NewCBCSecureMaterials(asymmetricKey) +if err != nil { + fmt.Println(err) + return +} +fmt.Println("Successfully initialized Asymmetric key CBC materials", cbcMaterials) + +object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials) +if err != nil { + fmt.Println(err) + return +} +defer object.Close() ``` -### GetEncryptedObject(bucketName, objectName string, encryptMaterials minio.EncryptionMaterials) (io.ReadCloser, error) +### GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) -Returns the decrypted stream of the object data based of the given encryption materiels. Most of the common errors occur when reading the stream. +Returns the decrypted stream of the object data based of the given encryption materials. Most of the common errors occur when reading the stream. __Parameters__ @@ -902,7 +1176,7 @@ __Parameters__ |:---|:---| :---| |`bucketName` | _string_ | Name of the bucket | |`objectName` | _string_ | Name of the object | -|`encryptMaterials` | _minio.EncryptionMaterials_ | The module to decrypt the object data | +|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | __Return Value__ @@ -918,15 +1192,16 @@ __Example__ ```go // Generate a master symmetric key -key := minio.NewSymmetricKey("my-secret-key-00") +key := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) // Build the CBC encryption material -cbcMaterials, err := NewCBCSecureMaterials(key) +cbcMaterials, err := encrypt.NewCBCSecureMaterials(key) if err != nil { - t.Fatal(err) + fmt.Println(err) + return } -object, err := minioClient.GetEncryptedObject("mybucket", "photo.jpg", cbcMaterials) +object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials) if err != nil { fmt.Println(err) return @@ -938,6 +1213,7 @@ if err != nil { fmt.Println(err) return } +defer localFile.Close() if _, err = io.Copy(localFile, object); err != nil { fmt.Println(err) @@ -947,11 +1223,9 @@ if _, err = io.Copy(localFile, object); err != nil { -### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials minio.EncryptionMaterials, metadata map[string][]string, progress io.Reader) (n int, err error) - +### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int, err error) Encrypt and upload an object. - __Parameters__ |Param |Type |Description | @@ -959,10 +1233,7 @@ __Parameters__ |`bucketName` | _string_ |Name of the bucket | |`objectName` | _string_ |Name of the object | |`reader` | _io.Reader_ |Any Go type that implements io.Reader | -|`encryptMaterials` | _minio.EncryptionMaterials_ | The module that encrypts data | -|`metadata` | _map[string][]string_ | Object metadata to be stored | -|`progress` | io.Reader | A reader to update the upload progress | - +|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) | __Example__ @@ -970,25 +1241,29 @@ __Example__ // Load a private key privateKey, err := ioutil.ReadFile("private.key") if err != nil { - log.Fatal(err) + fmt.Println(err) + return } // Load a public key publicKey, err := ioutil.ReadFile("public.key") if err != nil { - log.Fatal(err) + fmt.Println(err) + return } // Build an asymmetric key -key, err := NewAssymetricKey(privateKey, publicKey) +key, err := encrypt.NewAsymmetricKey(privateKey, publicKey) if err != nil { - log.Fatal(err) + fmt.Println(err) + return } // Build the CBC encryption module -cbcMaterials, err := NewCBCSecureMaterials(key) +cbcMaterials, err := encrypt.NewCBCSecureMaterials(key) if err != nil { - t.Fatal(err) + fmt.Println(err) + return } // Open a file to upload @@ -1000,17 +1275,71 @@ if err != nil { defer file.Close() // Upload the encrypted form of the file -n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, encryptMaterials, nil, nil) +n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, cbcMaterials) if err != nil { fmt.Println(err) return } +fmt.Println("Successfully uploaded encrypted bytes: ", n) +``` + + +### FPutEncryptedObject(bucketName, objectName, filePath, encryptMaterials encrypt.Materials) (n int, err error) +Encrypt and upload an object from a file. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`filePath` | _string_ |Path to file to be uploaded | +|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go)The module that encrypts data | + +__Example__ + + +```go +// Load a private key +privateKey, err := ioutil.ReadFile("private.key") +if err != nil { + fmt.Println(err) + return +} + +// Load a public key +publicKey, err := ioutil.ReadFile("public.key") +if err != nil { + fmt.Println(err) + return +} + +// Build an asymmetric key +key, err := encrypt.NewAsymmetricKey(privateKey, publicKey) +if err != nil { + fmt.Println(err) + return +} + +// Build the CBC encryption module +cbcMaterials, err := encrypt.NewCBCSecureMaterials(key) +if err != nil { + fmt.Println(err) + return +} + +n, err := minioClient.FPutEncryptedObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", cbcMaterials) +if err != nil { + fmt.Println(err) + return +} +fmt.Println("Successfully uploaded encrypted bytes: ", n) ``` ### NewSSEInfo(key []byte, algo string) SSEInfo - Create a key object for use as encryption or decryption parameter in operations involving server-side-encryption with customer provided key (SSE-C). __Parameters__ @@ -1020,18 +1349,11 @@ __Parameters__ | `key` | _[]byte_ | Byte-slice of the raw, un-encoded binary key | | `algo` | _string_ | Algorithm to use in encryption or decryption with the given key. Can be empty (defaults to `AES256`) | -__Example__ - -``` go -// Key for use in encryption/decryption -keyInfo := NewSSEInfo([]byte{1,2,3}, "") -``` ## 5. Presigned operations ### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error) - Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. __Parameters__ @@ -1059,11 +1381,11 @@ if err != nil { fmt.Println(err) return } +fmt.Println("Successfully generated presigned URL", presignedURL) ``` ### PresignedPutObject(bucketName, objectName string, expiry time.Duration) (*url.URL, error) - Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. NOTE: you can upload to S3 only with specified object name. @@ -1089,12 +1411,11 @@ if err != nil { fmt.Println(err) return } -fmt.Println(presignedURL) +fmt.Println("Successfully generated presigned URL", presignedURL) ``` ### PresignedHeadObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error) - Generates a presigned URL for HTTP HEAD operations. Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days. __Parameters__ @@ -1121,23 +1442,18 @@ if err != nil { fmt.Println(err) return } +fmt.Println("Successfully generated presigned URL", presignedURL) ``` ### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error) - Allows setting policy conditions to a presigned URL for POST operations. Policies such as bucket name to receive object uploads, key name prefixes, expiry policy may be set. -Create policy : - ```go +// Initialize policy condition config. policy := minio.NewPostPolicy() -``` -Apply upload policy restrictions: - - -```go +// Apply upload policy restrictions: policy.SetBucket("mybucket") policy.SetKey("myobject") policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days @@ -1148,18 +1464,17 @@ policy.SetContentType("image/png") // Only allow content size in range 1KB to 1MB. policy.SetContentLengthRange(1024, 1024*1024) -// Get the POST form key/value object: +// Add a user metadata using the key "custom" and value "user" +policy.SetUserMetadata("custom", "user") +// Get the POST form key/value object: url, formData, err := minioClient.PresignedPostPolicy(policy) if err != nil { fmt.Println(err) return } -``` -POST your content from the command line using `curl`: - -```go +// POST your content from the command line using `curl` fmt.Printf("curl ") for k, v := range formData { fmt.Printf("-F %s=%s ", k, v) @@ -1172,7 +1487,6 @@ fmt.Printf("%s\n", url) ### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error - Set access permissions on bucket or an object prefix. Importing `github.com/minio/minio-go/pkg/policy` package is needed. @@ -1203,7 +1517,9 @@ __Example__ ```go -err := minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite) +// Sets 'mybucket' with a sub-directory 'myprefix' to be anonymously accessible for +// both read and write operations. +err = minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite) if err != nil { fmt.Println(err) return @@ -1212,7 +1528,6 @@ if err != nil { ### GetBucketPolicy(bucketName, objectPrefix string) (policy.BucketPolicy, error) - Get access permissions on a bucket or a prefix. Importing `github.com/minio/minio-go/pkg/policy` package is needed. @@ -1247,7 +1562,6 @@ fmt.Println("Access permissions for mybucket is", bucketPolicy) ### ListBucketPolicies(bucketName, objectPrefix string) (map[string]BucketPolicy, error) - Get access permissions rules associated to the specified bucket and prefix. __Parameters__ @@ -1263,7 +1577,7 @@ __Return Values__ |Param |Type |Description | |:---|:---| :---| -|`bucketPolicies` | _map[string]BucketPolicy_ |Map of object resource paths and their permissions | +|`bucketPolicies` | _map[string]minio.BucketPolicy_ |Map of object resource paths and their permissions | |`err` | _error_ |Standard Error | __Example__ @@ -1282,8 +1596,7 @@ for resource, permission := range bucketPolicies { ### GetBucketNotification(bucketName string) (BucketNotification, error) - -Get all notification configurations related to the specified bucket. +Get notification configuration on a bucket. __Parameters__ @@ -1297,7 +1610,7 @@ __Return Values__ |Param |Type |Description | |:---|:---| :---| -|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations| +|`bucketNotification` | _minio.BucketNotification_ |structure which holds all notification configurations| |`err` | _error_ |Standard Error | __Example__ @@ -1306,10 +1619,12 @@ __Example__ ```go bucketNotification, err := minioClient.GetBucketNotification("mybucket") if err != nil { - log.Fatalf("Failed to get bucket notification configurations for mybucket - %v", err) + fmt.Println("Failed to get bucket notification configurations for mybucket", err) + return } -for _, topicConfig := range bucketNotification.TopicConfigs { - for _, e := range topicConfig.Events { + +for _, queueConfig := range bucketNotification.QueueConfigs { + for _, e := range queueConfig.Events { fmt.Println(e + " event is enabled") } } @@ -1317,7 +1632,6 @@ for _, topicConfig := range bucketNotification.TopicConfigs { ### SetBucketNotification(bucketName string, bucketNotification BucketNotification) error - Set a new bucket notification on a bucket. __Parameters__ @@ -1326,7 +1640,7 @@ __Parameters__ |Param |Type |Description | |:---|:---| :---| |`bucketName` | _string_ |Name of the bucket | -|`bucketNotification` | _BucketNotification_ |Represents the XML to be sent to the configured web service | +|`bucketNotification` | _minio.BucketNotification_ |Represents the XML to be sent to the configured web service | __Return Values__ @@ -1339,24 +1653,25 @@ __Example__ ```go -topicArn := NewArn("aws", "sns", "us-east-1", "804605494417", "PhotoUpdate") +queueArn := minio.NewArn("aws", "sqs", "us-east-1", "804605494417", "PhotoUpdate") -topicConfig := NewNotificationConfig(topicArn) -topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) -lambdaConfig.AddFilterPrefix("photos/") -lambdaConfig.AddFilterSuffix(".jpg") +queueConfig := minio.NewNotificationConfig(queueArn) +queueConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll) +queueConfig.AddFilterPrefix("photos/") +queueConfig.AddFilterSuffix(".jpg") -bucketNotification := BucketNotification{} -bucketNotification.AddTopic(topicConfig) -err := c.SetBucketNotification(bucketName, bucketNotification) +bucketNotification := minio.BucketNotification{} +bucketNotification.AddQueue(queueConfig) + +err = minioClient.SetBucketNotification("mybucket", bucketNotification) if err != nil { - fmt.Println("Unable to set the bucket notification: " + err) + fmt.Println("Unable to set the bucket notification: ", err) + return } ``` ### RemoveAllBucketNotification(bucketName string) error - Remove all configured bucket notifications on a bucket. __Parameters__ @@ -1377,18 +1692,16 @@ __Example__ ```go -err := c.RemoveAllBucketNotification(bucketName) +err = minioClient.RemoveAllBucketNotification("mybucket") if err != nil { fmt.Println("Unable to remove bucket notifications.", err) + return } ``` ### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo - -ListenBucketNotification API receives bucket notification events through the -notification channel. The returned notification channel has two fields -'Records' and 'Err'. +ListenBucketNotification API receives bucket notification events through the notification channel. The returned notification channel has two fields 'Records' and 'Err'. - 'Records' holds the notifications received from the server. - 'Err' indicates any error while processing the received notifications. @@ -1403,17 +1716,20 @@ __Parameters__ |`bucketName` | _string_ | Bucket to listen notifications on | |`prefix` | _string_ | Object key prefix to filter notifications for | |`suffix` | _string_ | Object key suffix to filter notifications for | -|`events` | _[]string_| Enables notifications for specific event types | +|`events` | _[]string_ | Enables notifications for specific event types | |`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification iterator | __Return Values__ |Param |Type |Description | |:---|:---| :---| -|`chan NotificationInfo` | _chan_ | Read channel for all notifications on bucket | -|`NotificationInfo` | _object_ | Notification object represents events info | -|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events | -|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation | +|`notificationInfo` | _chan minio.NotificationInfo_ | Channel of bucket notifications | + +__minio.NotificationInfo__ + +|Field |Type |Description | +|`notificationInfo.Records` | _[]minio.NotificationEvent_ | Collection of notification events | +|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation (Standard Error) | __Example__ @@ -1427,15 +1743,15 @@ doneCh := make(chan struct{}) defer close(doneCh) // Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events. -for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{ +for notificationInfo := range minioClient.ListenBucketNotification("mybucket", "myprefix/", ".mysuffix", []string{ "s3:ObjectCreated:*", "s3:ObjectAccessed:*", "s3:ObjectRemoved:*", }, doneCh) { if notificationInfo.Err != nil { - log.Fatalln(notificationInfo.Err) + fmt.Println(notificationInfo.Err) } - log.Println(notificationInfo) + fmt.Println(notificationInfo) } ``` @@ -1443,7 +1759,7 @@ for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET" ### SetAppInfo(appName, appVersion string) -Adds application details to User-Agent. +Add custom application details to User-Agent. __Parameters__ @@ -1463,8 +1779,7 @@ minioClient.SetAppInfo("myCloudApp", "1.0.0") ### SetCustomTransport(customHTTPTransport http.RoundTripper) -Overrides default HTTP transport. This is usually needed for debugging -or for adding custom TLS certificates. +Overrides default HTTP transport. This is usually needed for debugging or for adding custom TLS certificates. __Parameters__ @@ -1475,8 +1790,7 @@ __Parameters__ ### TraceOn(outputStream io.Writer) -Enables HTTP tracing. The trace is written to the io.Writer -provided. If outputStream is nil, trace is written to os.Stdout. +Enables HTTP tracing. The trace is written to the io.Writer provided. If outputStream is nil, trace is written to os.Stdout. __Parameters__ @@ -1492,7 +1806,7 @@ Disables HTTP tracing. ### SetS3TransferAccelerate(acceleratedEndpoint string) Set AWS S3 transfer acceleration endpoint for all API requests hereafter. -NOTE: This API applies only to AWS S3 and ignored with other S3 compatible object storage services. +NOTE: This API applies only to AWS S3 and is a no operation for S3 compatible object storage services. __Parameters__ diff --git a/vendor/github.com/minio/minio-go/docs/checker.go.template b/vendor/github.com/minio/minio-go/docs/checker.go.template new file mode 100644 index 000000000..2e0f13a53 --- /dev/null +++ b/vendor/github.com/minio/minio-go/docs/checker.go.template @@ -0,0 +1,21 @@ +package main + +import ( + "fmt" + + "github.com/minio/minio-go" +) + +func main() { + // Use a secure connection. + ssl := true + + // Initialize minio client object. + minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl) + if err != nil { + fmt.Println(err) + return + } + + {{.Text}} +} diff --git a/vendor/github.com/minio/minio-go/docs/validator.go b/vendor/github.com/minio/minio-go/docs/validator.go new file mode 100644 index 000000000..7d5cbaaab --- /dev/null +++ b/vendor/github.com/minio/minio-go/docs/validator.go @@ -0,0 +1,227 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + + "github.com/a8m/mark" + "github.com/gernest/wow" + "github.com/gernest/wow/spin" + "github.com/minio/cli" +) + +func init() { + // Validate go binary. + if _, err := exec.LookPath("go"); err != nil { + panic(err) + } +} + +var globalFlags = []cli.Flag{ + cli.StringFlag{ + Name: "m", + Value: "API.md", + Usage: "Path to markdown api documentation.", + }, + cli.StringFlag{ + Name: "t", + Value: "checker.go.template", + Usage: "Template used for generating the programs.", + }, + cli.IntFlag{ + Name: "skip", + Value: 2, + Usage: "Skip entries before validating the code.", + }, +} + +func runGofmt(path string) (msg string, err error) { + cmdArgs := []string{"-s", "-w", "-l", path} + cmd := exec.Command("gofmt", cmdArgs...) + stdoutStderr, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + return string(stdoutStderr), nil +} + +func runGoImports(path string) (msg string, err error) { + cmdArgs := []string{"-w", path} + cmd := exec.Command("goimports", cmdArgs...) + stdoutStderr, err := cmd.CombinedOutput() + if err != nil { + return string(stdoutStderr), err + } + return string(stdoutStderr), nil +} + +func runGoBuild(path string) (msg string, err error) { + // Go build the path. + cmdArgs := []string{"build", "-o", "/dev/null", path} + cmd := exec.Command("go", cmdArgs...) + stdoutStderr, err := cmd.CombinedOutput() + if err != nil { + return string(stdoutStderr), err + } + return string(stdoutStderr), nil +} + +func validatorAction(ctx *cli.Context) error { + if !ctx.IsSet("m") || !ctx.IsSet("t") { + return nil + } + docPath := ctx.String("m") + var err error + docPath, err = filepath.Abs(docPath) + if err != nil { + return err + } + data, err := ioutil.ReadFile(docPath) + if err != nil { + return err + } + + templatePath := ctx.String("t") + templatePath, err = filepath.Abs(templatePath) + if err != nil { + return err + } + + skipEntries := ctx.Int("skip") + m := mark.New(string(data), &mark.Options{ + Gfm: true, // Github markdown support is enabled by default. + }) + + t, err := template.ParseFiles(templatePath) + if err != nil { + return err + } + + tmpDir, err := ioutil.TempDir("", "md-verifier") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + entryN := 1 + for i := mark.NodeText; i < mark.NodeCheckbox; i++ { + if mark.NodeCode != mark.NodeType(i) { + m.AddRenderFn(mark.NodeType(i), func(node mark.Node) (s string) { + return "" + }) + continue + } + m.AddRenderFn(mark.NodeCode, func(node mark.Node) (s string) { + p, ok := node.(*mark.CodeNode) + if !ok { + return + } + p.Text = strings.NewReplacer("<", "<", ">", ">", """, `"`, "&", "&").Replace(p.Text) + if skipEntries > 0 { + skipEntries-- + return + } + + testFilePath := filepath.Join(tmpDir, "example.go") + w, werr := os.Create(testFilePath) + if werr != nil { + panic(werr) + } + t.Execute(w, p) + w.Sync() + w.Close() + entryN++ + + msg, err := runGofmt(testFilePath) + if err != nil { + fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err) + os.Exit(-1) + } + + msg, err = runGoImports(testFilePath) + if err != nil { + fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err) + os.Exit(-1) + } + + msg, err = runGoBuild(testFilePath) + if err != nil { + fmt.Printf("Failed running gobuild on %s, with (%s):(%s)\n", testFilePath, msg, err) + fmt.Printf("Code with possible issue in %s:\n%s", docPath, p.Text) + fmt.Printf("To test `go build %s`\n", testFilePath) + os.Exit(-1) + } + + // Once successfully built remove the test file + os.Remove(testFilePath) + return + }) + } + + w := wow.New(os.Stdout, spin.Get(spin.Moon), fmt.Sprintf(" Running validation tests in %s", tmpDir)) + + w.Start() + // Render markdown executes our checker on each code blocks. + _ = m.Render() + w.PersistWith(spin.Get(spin.Runner), " Successfully finished tests") + w.Stop() + + return nil +} + +func main() { + app := cli.NewApp() + app.Action = validatorAction + app.HideVersion = true + app.HideHelpCommand = true + app.Usage = "Validates code block sections inside API.md" + app.Author = "Minio.io" + app.Flags = globalFlags + // Help template for validator + app.CustomAppHelpTemplate = `NAME: + {{.Name}} - {{.Usage}} + +USAGE: + {{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...] + +COMMANDS: + {{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} + {{end}}{{if .VisibleFlags}} +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +TEMPLATE: + Validator uses Go's 'text/template' formatting so you need to ensure + your template is formatted correctly, check 'docs/checker.go.template' + +USAGE: + go run docs/validator.go -m docs/API.md -t /tmp/mycode.go.template + +` + app.Run(os.Args) + +} diff --git a/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go b/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go index 037e2251c..4c48510da 100644 --- a/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go +++ b/vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go b/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go index 945510db8..20dea30a3 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go +++ b/vendor/github.com/minio/minio-go/examples/s3/bucketexists.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/composeobject.go b/vendor/github.com/minio/minio-go/examples/s3/composeobject.go index 8aec6c158..2f76ff053 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/composeobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/composeobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go index c1d92d73a..a7c3eca45 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/copyobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/copyobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go b/vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go new file mode 100644 index 000000000..6004baa14 --- /dev/null +++ b/vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go @@ -0,0 +1,54 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "time" + + "context" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname + // and my-filename.csv are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + if err := s3Client.FGetObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil { + log.Fatalln(err) + } + log.Println("Successfully saved my-filename.csv") + +} diff --git a/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go b/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go index bef756dd6..819a34f91 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/fgetobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,7 +39,7 @@ func main() { log.Fatalln(err) } - if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv"); err != nil { + if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil { log.Fatalln(err) } log.Println("Successfully saved my-filename.csv") diff --git a/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go new file mode 100644 index 000000000..96eec7e8f --- /dev/null +++ b/vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go @@ -0,0 +1,80 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" + "github.com/minio/minio-go/pkg/encrypt" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // Specify a local file that we will upload + filePath := "my-testfile" + + //// Build an asymmetric key from private and public files + // + // privateKey, err := ioutil.ReadFile("private.key") + // if err != nil { + // t.Fatal(err) + // } + // + // publicKey, err := ioutil.ReadFile("public.key") + // if err != nil { + // t.Fatal(err) + // } + // + // asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey) + // if err != nil { + // t.Fatal(err) + // } + //// + + // Build a symmetric key + symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) + + // Build encryption materials which will encrypt uploaded data + cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey) + if err != nil { + log.Fatalln(err) + } + + // Encrypt file content and upload to the server + n, err := s3Client.FPutEncryptedObject("my-bucketname", "my-objectname", filePath, cbcMaterials) + if err != nil { + log.Fatalln(err) + } + + log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.") +} diff --git a/vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go b/vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go new file mode 100644 index 000000000..d7c941c2b --- /dev/null +++ b/vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go @@ -0,0 +1,53 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "time" + + "context" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname + // and my-filename.csv are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + if _, err := s3Client.FPutObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ContentType: "application/csv"}); err != nil { + log.Fatalln(err) + } + log.Println("Successfully uploaded my-filename.csv") +} diff --git a/vendor/github.com/minio/minio-go/examples/s3/fputobject.go b/vendor/github.com/minio/minio-go/examples/s3/fputobject.go index f4e60acff..34d876804 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/fputobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/fputobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,7 +39,9 @@ func main() { log.Fatalln(err) } - if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil { + if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ + ContentType: "application/csv", + }); err != nil { log.Fatalln(err) } log.Println("Successfully uploaded my-filename.csv") diff --git a/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go index 8f51f26ae..9783bebe8 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go +++ b/vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go index 67f010ef3..19349baaf 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go +++ b/vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go index e5f960403..f9ac89b61 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go +++ b/vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/getobject-context.go b/vendor/github.com/minio/minio-go/examples/s3/getobject-context.go new file mode 100644 index 000000000..c7d41707a --- /dev/null +++ b/vendor/github.com/minio/minio-go/examples/s3/getobject-context.go @@ -0,0 +1,73 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "io" + "log" + "os" + "time" + + "context" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true) + if err != nil { + log.Fatalln(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + opts := minio.GetObjectOptions{} + opts.SetModified(time.Now().Round(10 * time.Minute)) // get object if was modified within the last 10 minutes + reader, err := s3Client.GetObjectWithContext(ctx, "my-bucketname", "my-objectname", opts) + if err != nil { + log.Fatalln(err) + } + defer reader.Close() + + localFile, err := os.Create("my-testfile") + if err != nil { + log.Fatalln(err) + } + defer localFile.Close() + + stat, err := reader.Stat() + if err != nil { + log.Fatalln(err) + } + + if _, err := io.CopyN(localFile, reader, stat.Size); err != nil { + log.Fatalln(err) + } +} diff --git a/vendor/github.com/minio/minio-go/examples/s3/getobject.go b/vendor/github.com/minio/minio-go/examples/s3/getobject.go index 96bb85505..e17ef8172 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/getobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/getobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -40,7 +41,7 @@ func main() { log.Fatalln(err) } - reader, err := s3Client.GetObject("my-bucketname", "my-objectname") + reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go b/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go index 19a2d1b2b..43edd0c3d 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go +++ b/vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go b/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go index 81a99e627..5eae587b4 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go +++ b/vendor/github.com/minio/minio-go/examples/s3/listbuckets.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go b/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go index 34771e44b..a5a79b603 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go +++ b/vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go b/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go index 5dde36746..55bceb470 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go +++ b/vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjects.go b/vendor/github.com/minio/minio-go/examples/s3/listobjects.go index 4fd5c069a..1da2e3faa 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/listobjects.go +++ b/vendor/github.com/minio/minio-go/examples/s3/listobjects.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go b/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go index b52b4dab8..190aec36b 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go +++ b/vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/makebucket.go b/vendor/github.com/minio/minio-go/examples/s3/makebucket.go index ae222a8af..419c96cf2 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/makebucket.go +++ b/vendor/github.com/minio/minio-go/examples/s3/makebucket.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go index 11be0c0a4..fd7fb9e8d 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go index 0332049e5..8dbc0a4b7 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go index 3f37cef38..205ac95a3 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go +++ b/vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go b/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go index 3db6f6e7b..b2f8b4f82 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go b/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go index b8f7e12f2..cdf09ac53 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go +++ b/vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -75,7 +76,7 @@ func main() { } // Encrypt file content and upload to the server - n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials, nil, nil) + n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-context.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-context.go new file mode 100644 index 000000000..acc923f7e --- /dev/null +++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-context.go @@ -0,0 +1,68 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + "os" + "time" + + "context" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and + // my-objectname are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + object, err := os.Open("my-testfile") + if err != nil { + log.Fatalln(err) + } + defer object.Close() + + objectStat, err := object.Stat() + if err != nil { + log.Fatalln(err) + } + + n, err := s3Client.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ + ContentType: "application/octet-stream", + }) + if err != nil { + log.Fatalln(err) + } + log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.") +} diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go index 92e6a4840..3d3b2fd2d 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go +++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +25,6 @@ import ( "encoding/base64" "io/ioutil" "log" - "net/http" minio "github.com/minio/minio-go" ) @@ -54,24 +54,24 @@ func main() { // of the encryption key or to decrypt the contents of the // encrypted object. That means, if you lose the encryption // key, you lose the object. - var metadata = map[string][]string{ - "x-amz-server-side-encryption-customer-algorithm": []string{"AES256"}, - "x-amz-server-side-encryption-customer-key": []string{encryptionKey}, - "x-amz-server-side-encryption-customer-key-MD5": []string{encryptionKeyMD5}, + var metadata = map[string]string{ + "x-amz-server-side-encryption-customer-algorithm": "AES256", + "x-amz-server-side-encryption-customer-key": encryptionKey, + "x-amz-server-side-encryption-customer-key-MD5": encryptionKeyMD5, } // minioClient.TraceOn(os.Stderr) // Enable to debug. - _, err = minioClient.PutObjectWithMetadata("mybucket", "my-encrypted-object.txt", content, metadata, nil) + _, err = minioClient.PutObject("mybucket", "my-encrypted-object.txt", content, 11, minio.PutObjectOptions{UserMetadata: metadata}) if err != nil { log.Fatalln(err) } - var reqHeaders = minio.RequestHeaders{Header: http.Header{}} + opts := minio.GetObjectOptions{} for k, v := range metadata { - reqHeaders.Set(k, v[0]) + opts.Set(k, v) } coreClient := minio.Core{minioClient} - reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", reqHeaders) + reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", opts) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go index 26e77b9e6..0e92dd65e 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go +++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,7 +40,7 @@ func main() { log.Fatalln(err) } - reader, err := s3Client.GetObject("my-bucketname", "my-objectname") + reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{}) if err != nil { log.Fatalln(err) } @@ -54,10 +55,8 @@ func main() { // the Reads inside. progress := pb.New64(objectInfo.Size) progress.Start() + n, err := s3Client.PutObject("my-bucketname", "my-objectname-progress", reader, objectInfo.Size, minio.PutObjectOptions{ContentType: "application/octet-stream", Progress: progress}) - n, err := s3Client.PutObjectWithProgress("my-bucketname", "my-objectname-progress", reader, map[string][]string{ - "Content-Type": []string{"application/octet-stream"}, - }, progress) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go index a26415c7a..06345cd87 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go +++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -48,7 +49,12 @@ func main() { } defer object.Close() - n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream") + objectStat, err := object.Stat() + if err != nil { + log.Fatalln(err) + } + + n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go b/vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go index d10407dbd..85b78dd45 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go +++ b/vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -45,7 +46,7 @@ func main() { } defer object.Close() - n, err := s3Client.PutObjectStreaming("my-bucketname", "my-objectname", object) + n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, -1, minio.PutObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/putobject.go b/vendor/github.com/minio/minio-go/examples/s3/putobject.go index caa731302..b9e4ff16c 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/putobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/putobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -44,8 +45,12 @@ func main() { log.Fatalln(err) } defer object.Close() + objectStat, err := object.Stat() + if err != nil { + log.Fatalln(err) + } - n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream") + n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go index 0f5f3a74d..1186afad8 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go +++ b/vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/removebucket.go b/vendor/github.com/minio/minio-go/examples/s3/removebucket.go index fb013ca24..7a7737ee0 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/removebucket.go +++ b/vendor/github.com/minio/minio-go/examples/s3/removebucket.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go b/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go index d486182af..31cc8790b 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go +++ b/vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeobject.go b/vendor/github.com/minio/minio-go/examples/s3/removeobject.go index 13b00b41e..7e5848576 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/removeobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/removeobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go b/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go index 594606929..b912bc85d 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go +++ b/vendor/github.com/minio/minio-go/examples/s3/removeobjects.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,7 +21,6 @@ package main import ( "log" - "strconv" "github.com/minio/minio-go" ) @@ -44,8 +44,12 @@ func main() { // Send object names that are needed to be removed to objectsCh go func() { defer close(objectsCh) - for i := 0; i < 10; i++ { - objectsCh <- "/path/to/my-objectname" + strconv.Itoa(i) + // List all objects from a bucket-name with a matching prefix. + for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) { + if object.Err != nil { + log.Fatalln(object.Err) + } + objectsCh <- object.Key } }() diff --git a/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go b/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go index 5fe1e318e..b5af30f06 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go +++ b/vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go b/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go index 40906ee92..c81fb5050 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go +++ b/vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/examples/s3/statobject.go b/vendor/github.com/minio/minio-go/examples/s3/statobject.go index 4c5453a07..0b27a83b3 100644 --- a/vendor/github.com/minio/minio-go/examples/s3/statobject.go +++ b/vendor/github.com/minio/minio-go/examples/s3/statobject.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,7 +38,7 @@ func main() { if err != nil { log.Fatalln(err) } - stat, err := s3Client.StatObject("my-bucketname", "my-objectname") + stat, err := s3Client.StatObject("my-bucketname", "my-objectname", minio.StatObjectOptions{}) if err != nil { log.Fatalln(err) } diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go index ec554e4fe..2e05ebda4 100644 --- a/vendor/github.com/minio/minio-go/functional_tests.go +++ b/vendor/github.com/minio/minio-go/functional_tests.go @@ -1,7 +1,8 @@ // +build ignore /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +21,7 @@ package main import ( "bytes" + "context" "encoding/hex" "encoding/json" "errors" @@ -27,27 +29,24 @@ import ( "io" "io/ioutil" "math/rand" + "mime/multipart" "net/http" "net/url" "os" + "path/filepath" "reflect" "strconv" "strings" "time" + humanize "github.com/dustin/go-humanize" minio "github.com/minio/minio-go" log "github.com/sirupsen/logrus" - "github.com/dustin/go-humanize" "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/policy" ) -const ( - sixtyFiveMiB = 65 * humanize.MiByte // 65MiB - thirtyThreeKiB = 33 * humanize.KiByte // 33KiB -) - const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" const ( letterIdxBits = 6 // 6 bits to represent a letter index @@ -93,6 +92,19 @@ func successLogger(function string, args map[string]interface{}, startTime time. return log.WithFields(fields) } +// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, +// and log as NA in that case and continue execution. Otherwise log as failure and return +func logError(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) { + // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests + // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in + // addition to NotImplemented error returned from server + if isErrNotImplemented(err) { + ignoredLog(function, args, startTime, message).Info() + } else { + failureLog(function, args, startTime, alert, message, err).Fatal() + } +} + // log failed test runs func failureLog(function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry { // calculate the test case duration @@ -119,6 +131,47 @@ func ignoredLog(function string, args map[string]interface{}, startTime time.Tim return log.WithFields(fields) } +// Delete objects in given bucket, recursively +func cleanupBucket(bucketName string, c *minio.Client) error { + // Create a done channel to control 'ListObjectsV2' go routine. + doneCh := make(chan struct{}) + // Exit cleanly upon return. + defer close(doneCh) + // Iterate over all objects in the bucket via listObjectsV2 and delete + for objCh := range c.ListObjectsV2(bucketName, "", true, doneCh) { + if objCh.Err != nil { + return objCh.Err + } + if objCh.Key != "" { + err := c.RemoveObject(bucketName, objCh.Key) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(bucketName, "", true, doneCh) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(bucketName) + if err != nil { + return err + } + return err +} + +func isErrNotImplemented(err error) bool { + return minio.ToErrorResponse(err).Code == "NotImplemented" +} + func init() { // If server endpoint is not set, all tests default to // using https://play.minio.io:9000 @@ -130,19 +183,13 @@ func init() { } } -func getDataDir() (dir string) { - dir = os.Getenv("MINT_DATA_DIR") - if dir == "" { - dir = "/mint/data" - } - return -} +var mintDataDir = os.Getenv("MINT_DATA_DIR") -func getFilePath(filename string) (filepath string) { - if getDataDir() != "" { - filepath = getDataDir() + "/" + filename +func getMintDataDirFilePath(filename string) (fp string) { + if mintDataDir == "" { + return } - return + return filepath.Join(mintDataDir, filename) } type sizedReader struct { @@ -165,14 +212,17 @@ func (r *randomReader) Read(b []byte) (int, error) { } // read data from file if it exists or optionally create a buffer of particular size -func getDataReader(fileName string, size int) io.ReadCloser { - if _, err := os.Stat(getFilePath(fileName)); os.IsNotExist(err) { +func getDataReader(fileName string) io.ReadCloser { + if mintDataDir == "" { + size := dataFileMap[fileName] return &sizedReader{ - Reader: io.LimitReader(&randomReader{seed: []byte("a")}, int64(size)), - size: size, + Reader: io.LimitReader(&randomReader{ + seed: []byte("a"), + }, int64(size)), + size: size, } } - reader, _ := os.Open(getFilePath(fileName)) + reader, _ := os.Open(getMintDataDirFilePath(fileName)) return reader } @@ -194,6 +244,19 @@ func randString(n int, src rand.Source, prefix string) string { return prefix + string(b[0:30-len(prefix)]) } +var dataFileMap = map[string]int{ + "datafile-1-b": 1, + "datafile-10-kB": 10 * humanize.KiByte, + "datafile-33-kB": 33 * humanize.KiByte, + "datafile-100-kB": 100 * humanize.KiByte, + "datafile-1.03-MB": 1056 * humanize.KiByte, + "datafile-1-MB": 1 * humanize.MiByte, + "datafile-5-MB": 5 * humanize.MiByte, + "datafile-6-MB": 6 * humanize.MiByte, + "datafile-11-MB": 11 * humanize.MiByte, + "datafile-65-MB": 65 * humanize.MiByte, +} + func isQuickMode() bool { return os.Getenv("MODE") == "quick" } @@ -228,7 +291,8 @@ func testMakeBucketError() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client creation failed", err) + return } // Enable tracing, write to stderr. @@ -243,18 +307,90 @@ func testMakeBucketError() { // Make a new bucket in 'eu-central-1'. if err = c.MakeBucket(bucketName, region); err != nil { - failureLog(function, args, startTime, "", "MakeBucket Failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket Failed", err) + return } if err = c.MakeBucket(bucketName, region); err == nil { - failureLog(function, args, startTime, "", "Bucket already exists", err).Fatal() + logError(function, args, startTime, "", "Bucket already exists", err) + return } // Verify valid error response from server. if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal() + logError(function, args, startTime, "", "Invalid error returned by server", err) + return } - if err = c.RemoveBucket(bucketName); err != nil { - failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(function, args, startTime).Info() +} + +func testMetadataSizeLimit() { + startTime := time.Now() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts.UserMetadata": "", + } + rand.Seed(startTime.Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client creation failed", err) + return + } + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "Make bucket failed", err) + return + } + + const HeaderSizeLimit = 8 * 1024 + const UserMetadataLimit = 2 * 1024 + + // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail + metadata := make(map[string]string) + metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) + return + } + + // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail + metadata = make(map[string]string) + metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(function, args, startTime, "", "Created object with headers exceeding header size limits", nil) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } successLogger(function, args, startTime).Info() @@ -289,7 +425,8 @@ func testMakeBucketRegions() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client creation failed", err) + return } // Enable tracing, write to stderr. @@ -304,11 +441,14 @@ func testMakeBucketRegions() { // Make a new bucket in 'eu-central-1'. if err = c.MakeBucket(bucketName, region); err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } - if err = c.RemoveBucket(bucketName); err != nil { - failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } // Make a new bucket with '.' in its name, in 'us-west-2'. This @@ -317,14 +457,15 @@ func testMakeBucketRegions() { region = "us-west-2" args["region"] = region if err = c.MakeBucket(bucketName+".withperiod", region); err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } - // Remove the newly created bucket. - if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil { - failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - successLogger(function, args, startTime).Info() } @@ -332,11 +473,11 @@ func testMakeBucketRegions() { func testPutObjectReadAt() { // initialize logging params startTime := time.Now() - function := "PutObject(bucketName, objectName, reader, objectContentType)" + function := "PutObject(bucketName, objectName, reader, opts)" args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "objectContentType": "", + "bucketName": "", + "objectName": "", + "opts": "objectContentType", } // Seed random based on current time. @@ -350,7 +491,8 @@ func testPutObjectReadAt() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -366,12 +508,12 @@ func testPutObjectReadAt() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal() + logError(function, args, startTime, "", "Make bucket failed", err) + return } - // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover. - // Use different data for each part for multipart tests to ensure part order at the end. - var reader = getDataReader("datafile-65-MB", sixtyFiveMiB) + bufSize := dataFileMap["datafile-65-MB"] + var reader = getDataReader("datafile-65-MB") defer reader.Close() // Save the data @@ -382,47 +524,50 @@ func testPutObjectReadAt() { objectContentType := "binary/octet-stream" args["objectContentType"] = objectContentType - n, err := c.PutObject(bucketName, objectName, reader, objectContentType) - + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(sixtyFiveMiB) { - failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) + return } // Read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "Get Object failed", err).Fatal() + logError(function, args, startTime, "", "Get Object failed", err) + return } st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat Object failed", err).Fatal() + logError(function, args, startTime, "", "Stat Object failed", err) + return } - if st.Size != int64(sixtyFiveMiB) { - failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) + return } if st.ContentType != objectContentType { - failureLog(function, args, startTime, "", "Content types don't match", err).Fatal() + logError(function, args, startTime, "", "Content types don't match", err) + return } if err := r.Close(); err != nil { - failureLog(function, args, startTime, "", "Object Close failed", err).Fatal() + logError(function, args, startTime, "", "Object Close failed", err) + return } if err := r.Close(); err == nil { - failureLog(function, args, startTime, "", "Object is already closed, didn't return error on Close", err).Fatal() + logError(function, args, startTime, "", "Object is already closed, didn't return error on Close", err) + return } - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } successLogger(function, args, startTime).Info() @@ -432,11 +577,11 @@ func testPutObjectReadAt() { func testPutObjectWithMetadata() { // initialize logging params startTime := time.Now() - function := "PutObjectWithMetadata(bucketName, objectName, reader, metadata, progress)" + function := "PutObject(bucketName, objectName, reader,size, opts)" args := map[string]interface{}{ "bucketName": "", "objectName": "", - "metadata": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", } if isQuickMode() { @@ -455,7 +600,8 @@ func testPutObjectWithMetadata() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -471,12 +617,12 @@ func testPutObjectWithMetadata() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal() + logError(function, args, startTime, "", "Make bucket failed", err) + return } - // Generate data using 2 parts - // Use different data in each part for multipart tests to ensure part order at the end. - var reader = getDataReader("datafile-65-MB", sixtyFiveMiB) + bufSize := dataFileMap["datafile-65-MB"] + var reader = getDataReader("datafile-65-MB") defer reader.Close() // Save the data @@ -486,50 +632,55 @@ func testPutObjectWithMetadata() { // Object custom metadata customContentType := "custom/contenttype" - n, err := c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{ - "Content-Type": {customContentType}, - }, nil) args["metadata"] = map[string][]string{ "Content-Type": {customContentType}, } + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: customContentType}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(sixtyFiveMiB) { - failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err) + return } // Read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } - if st.Size != int64(sixtyFiveMiB) { - failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return } if st.ContentType != customContentType { - failureLog(function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err).Fatal() + logError(function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + return } if err := r.Close(); err != nil { - failureLog(function, args, startTime, "", "Object Close failed", err).Fatal() + logError(function, args, startTime, "", "Object Close failed", err) + return } if err := r.Close(); err == nil { - failureLog(function, args, startTime, "", "Object already closed, should respond with error", err).Fatal() + logError(function, args, startTime, "", "Object already closed, should respond with error", err) + return } - if err = c.RemoveObject(bucketName, objectName); err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - - if err = c.RemoveBucket(bucketName); err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } successLogger(function, args, startTime).Info() @@ -540,10 +691,12 @@ func testPutObjectStreaming() { // initialize logging params objectName := "test-object" startTime := time.Now() - function := "PutObjectStreaming(bucketName, objectName, reader)" + function := "PutObject(bucketName, objectName, reader,size,opts)" args := map[string]interface{}{ "bucketName": "", "objectName": objectName, + "size": -1, + "opts": "", } // Seed random based on current time. @@ -557,7 +710,8 @@ func testPutObjectStreaming() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -573,7 +727,8 @@ func testPutObjectStreaming() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Upload an object. @@ -581,27 +736,24 @@ func testPutObjectStreaming() { for _, size := range sizes { data := bytes.Repeat([]byte("a"), int(size)) - n, err := c.PutObjectStreaming(bucketName, objectName, bytes.NewReader(data)) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal() + logError(function, args, startTime, "", "PutObjectStreaming failed", err) + return } if n != size { - failureLog(function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err).Fatal() + logError(function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err) + return } } - // Remove the object. - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - // Remove the bucket. - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } successLogger(function, args, startTime).Info() } @@ -627,7 +779,8 @@ func testListPartiallyUploaded() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Set user agent. @@ -643,18 +796,21 @@ func testListPartiallyUploaded() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } - r := bytes.NewReader(bytes.Repeat([]byte("0"), sixtyFiveMiB*2)) + bufSize := dataFileMap["datafile-65-MB"] + r := bytes.NewReader(bytes.Repeat([]byte("0"), bufSize*2)) reader, writer := io.Pipe() go func() { i := 0 for i < 25 { - _, cerr := io.CopyN(writer, r, (sixtyFiveMiB*2)/25) + _, cerr := io.CopyN(writer, r, (int64(bufSize)*2)/25) if cerr != nil { - failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + logError(function, args, startTime, "", "Copy failed", err) + return } i++ r.Seek(0, 0) @@ -665,12 +821,14 @@ func testListPartiallyUploaded() { objectName := bucketName + "-resumable" args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream") + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize*2), minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err == nil { - failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal() + logError(function, args, startTime, "", "PutObject should fail", err) + return } if !strings.Contains(err.Error(), "proactively closed to be verified later") { - failureLog(function, args, startTime, "", "String not found in PutObject output", err).Fatal() + logError(function, args, startTime, "", "String not found in PutObject output", err) + return } doneCh := make(chan struct{}) @@ -681,14 +839,17 @@ func testListPartiallyUploaded() { multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) for multiPartObject := range multiPartObjectCh { if multiPartObject.Err != nil { - failureLog(function, args, startTime, "", "Multipart object error", multiPartObject.Err).Fatal() + logError(function, args, startTime, "", "Multipart object error", multiPartObject.Err) + return } } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -713,7 +874,8 @@ func testGetObjectSeekEnd() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -729,11 +891,13 @@ func testGetObjectSeekEnd() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() // Save the data @@ -742,63 +906,84 @@ func testGetObjectSeekEnd() { buf, err := ioutil.ReadAll(reader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err) + return } // Read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } - if st.Size != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return } pos, err := r.Seek(-100, 2) if err != nil { - failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal() + logError(function, args, startTime, "", "Object Seek failed", err) + return } if pos != st.Size-100 { - failureLog(function, args, startTime, "", "Incorrect position", err).Fatal() + logError(function, args, startTime, "", "Incorrect position", err) + return } buf2 := make([]byte, 100) m, err := io.ReadFull(r, buf2) if err != nil { - failureLog(function, args, startTime, "", "Error reading through io.ReadFull", err).Fatal() + logError(function, args, startTime, "", "Error reading through io.ReadFull", err) + return } if m != len(buf2) { - failureLog(function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err).Fatal() + logError(function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) + return } hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) if hexBuf1 != hexBuf2 { - failureLog(function, args, startTime, "", "Values at same index dont match", err).Fatal() + logError(function, args, startTime, "", "Values at same index dont match", err) + return } pos, err = r.Seek(-100, 2) if err != nil { - failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal() + logError(function, args, startTime, "", "Object Seek failed", err) + return } if pos != st.Size-100 { - failureLog(function, args, startTime, "", "Incorrect position", err).Fatal() + logError(function, args, startTime, "", "Incorrect position", err) + return } if err = r.Close(); err != nil { - failureLog(function, args, startTime, "", "ObjectClose failed", err).Fatal() + logError(function, args, startTime, "", "ObjectClose failed", err) + return } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(function, args, startTime).Info() } @@ -823,7 +1008,8 @@ func testGetObjectClosedTwice() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -839,54 +1025,61 @@ func testGetObjectClosedTwice() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err) + return } // Read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } - if st.Size != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return } if err := r.Close(); err != nil { - failureLog(function, args, startTime, "", "Object Close failed", err).Fatal() + logError(function, args, startTime, "", "Object Close failed", err) + return } if err := r.Close(); err == nil { - failureLog(function, args, startTime, "", "Already closed object. No error returned", err).Fatal() + logError(function, args, startTime, "", "Already closed object. No error returned", err) + return } - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -911,7 +1104,8 @@ func testRemoveMultipleObjects() { ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Set user agent. @@ -927,7 +1121,8 @@ func testRemoveMultipleObjects() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) @@ -942,9 +1137,9 @@ func testRemoveMultipleObjects() { // Upload objects and send them to objectsCh for i := 0; i < nrObjects; i++ { objectName := "sample" + strconv.Itoa(i) + ".txt" - _, err = c.PutObject(bucketName, objectName, r, "application/octet-stream") + _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) continue } objectsCh <- objectName @@ -958,15 +1153,17 @@ func testRemoveMultipleObjects() { select { case r, more := <-errorCh: if more { - failureLog(function, args, startTime, "", "Unexpected error", r.Err).Fatal() + logError(function, args, startTime, "", "Unexpected error", r.Err) + return } } - // Clean the bucket created by the test - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -991,7 +1188,8 @@ func testRemovePartiallyUploaded() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Set user agent. @@ -1007,7 +1205,8 @@ func testRemovePartiallyUploaded() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024)) @@ -1018,7 +1217,8 @@ func testRemovePartiallyUploaded() { for i < 25 { _, cerr := io.CopyN(writer, r, 128*1024) if cerr != nil { - failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + logError(function, args, startTime, "", "Copy failed", err) + return } i++ r.Seek(0, 0) @@ -1029,21 +1229,26 @@ func testRemovePartiallyUploaded() { objectName := bucketName + "-resumable" args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream") + _, err = c.PutObject(bucketName, objectName, reader, 128*1024, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err == nil { - failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal() + logError(function, args, startTime, "", "PutObject should fail", err) + return } if !strings.Contains(err.Error(), "proactively closed to be verified later") { - failureLog(function, args, startTime, "", "String not found", err).Fatal() + logError(function, args, startTime, "", "String not found", err) + return } err = c.RemoveIncompleteUpload(bucketName, objectName) if err != nil { - failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal() + logError(function, args, startTime, "", "RemoveIncompleteUpload failed", err) + return } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -1051,12 +1256,12 @@ func testRemovePartiallyUploaded() { func testFPutObjectMultipart() { // initialize logging params startTime := time.Now() - function := "FPutObject(bucketName, objectName, fileName, objectContentType)" + function := "FPutObject(bucketName, objectName, fileName, opts)" args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "objectContentType": "", + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", } // Seed random based on current time. @@ -1070,7 +1275,8 @@ func testFPutObjectMultipart() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -1086,30 +1292,32 @@ func testFPutObjectMultipart() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - var fileName = getFilePath("datafile-65-MB") - if os.Getenv("MINT_DATA_DIR") == "" { + var fileName = getMintDataDirFilePath("datafile-65-MB") + if fileName == "" { // Make a temp file with minPartSize bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") if err != nil { - failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal() + logError(function, args, startTime, "", "TempFile creation failed", err) + return } - // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - _, err = io.Copy(file, getDataReader("non-existent", sixtyFiveMiB)) - if err != nil { - failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil { + logError(function, args, startTime, "", "Copy failed", err) + return } - err = file.Close() - if err != nil { - failureLog(function, args, startTime, "", "File Close failed", err).Fatal() + if err = file.Close(); err != nil { + logError(function, args, startTime, "", "File Close failed", err) + return } fileName = file.Name() args["fileName"] = fileName } - totalSize := sixtyFiveMiB * 1 + totalSize := dataFileMap["datafile-65-MB"] // Set base object name objectName := bucketName + "FPutObject" + "-standard" args["objectName"] = objectName @@ -1118,39 +1326,41 @@ func testFPutObjectMultipart() { args["objectContentType"] = objectContentType // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err := c.FPutObject(bucketName, objectName, fileName, objectContentType) + n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) if err != nil { - failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + logError(function, args, startTime, "", "FPutObject failed", err) + return } if n != int64(totalSize) { - failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + logError(function, args, startTime, "", "FPutObject failed", err) + return } - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } objInfo, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Unexpected error", err).Fatal() + logError(function, args, startTime, "", "Unexpected error", err) + return } if objInfo.Size != int64(totalSize) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) + return } if objInfo.ContentType != objectContentType { - failureLog(function, args, startTime, "", "ContentType doesn't match", err).Fatal() + logError(function, args, startTime, "", "ContentType doesn't match", err) + return } - // Remove all objects and bucket and temp file - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } successLogger(function, args, startTime).Info() } @@ -1158,10 +1368,12 @@ func testFPutObjectMultipart() { func testFPutObject() { // initialize logging params startTime := time.Now() - function := "FPutObject(bucketName, objectName, fileName, objectContentType)" + function := "FPutObject(bucketName, objectName, fileName, opts)" args := map[string]interface{}{ "bucketName": "", "objectName": "", + "fileName": "", + "opts": "", } // Seed random based on current time. @@ -1175,7 +1387,8 @@ func testFPutObject() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -1186,136 +1399,443 @@ func testFPutObject() { // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getFilePath("datafile-65-MB") - if os.Getenv("MINT_DATA_DIR") == "" { + var fName = getMintDataDirFilePath("datafile-65-MB") + if fName == "" { // Make a temp file with minPartSize bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") if err != nil { - failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal() + logError(function, args, startTime, "", "TempFile creation failed", err) + return } - // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - var buffer = bytes.Repeat([]byte(string('a')), sixtyFiveMiB) - if _, err = file.Write(buffer); err != nil { - failureLog(function, args, startTime, "", "File write failed", err).Fatal() + // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil { + logError(function, args, startTime, "", "File copy failed", err) + return } // Close the file pro-actively for windows. - err = file.Close() - if err != nil { - failureLog(function, args, startTime, "", "File close failed", err).Fatal() + if err = file.Close(); err != nil { + logError(function, args, startTime, "", "File close failed", err) + return } + defer os.Remove(file.Name()) fName = file.Name() } - var totalSize = sixtyFiveMiB * 1 + totalSize := dataFileMap["datafile-65-MB"] // Set base object name objectName := bucketName + "FPutObject" args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err := c.FPutObject(bucketName, objectName+"-standard", fName, "application/octet-stream") + n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { - failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + logError(function, args, startTime, "", "FPutObject failed", err) + return } if n != int64(totalSize) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + return } // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, "") + n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "File close failed", err).Fatal() + logError(function, args, startTime, "", "File close failed", err) + return } if n != int64(totalSize) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + return } srcFile, err := os.Open(fName) if err != nil { - failureLog(function, args, startTime, "", "File open failed", err).Fatal() + logError(function, args, startTime, "", "File open failed", err) + return } defer srcFile.Close() // Add extension to temp file name tmpFile, err := os.Create(fName + ".gtar") if err != nil { - failureLog(function, args, startTime, "", "File create failed", err).Fatal() + logError(function, args, startTime, "", "File create failed", err) + return } defer tmpFile.Close() _, err = io.Copy(tmpFile, srcFile) if err != nil { - failureLog(function, args, startTime, "", "File copy failed", err).Fatal() + logError(function, args, startTime, "", "File copy failed", err) + return } // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", "") + n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + logError(function, args, startTime, "", "FPutObject failed", err) + return } if n != int64(totalSize) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + return } // Check headers - rStandard, err := c.StatObject(bucketName, objectName+"-standard") + rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + logError(function, args, startTime, "", "StatObject failed", err) + return } if rStandard.ContentType != "application/octet-stream" { - failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal() + logError(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) + return } - rOctet, err := c.StatObject(bucketName, objectName+"-Octet") + rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + logError(function, args, startTime, "", "StatObject failed", err) + return } if rOctet.ContentType != "application/octet-stream" { - failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal() + logError(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) + return } - rGTar, err := c.StatObject(bucketName, objectName+"-GTar") + rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + logError(function, args, startTime, "", "StatObject failed", err) + return } if rGTar.ContentType != "application/x-gtar" { - failureLog(function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err).Fatal() + logError(function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err) + return } - // Remove all objects and bucket and temp file - err = c.RemoveObject(bucketName, objectName+"-standard") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - err = c.RemoveObject(bucketName, objectName+"-Octet") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + if err = os.Remove(fName + ".gtar"); err != nil { + logError(function, args, startTime, "", "File remove failed", err) + return } - err = c.RemoveObject(bucketName, objectName+"-GTar") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } - - err = os.Remove(fName + ".gtar") - if err != nil { - failureLog(function, args, startTime, "", "File remove failed", err).Fatal() - } successLogger(function, args, startTime).Info() } +// Tests FPutObjectWithContext request context cancels after timeout +func testFPutObjectWithContext() { + // initialize logging params + startTime := time.Now() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") + if err != nil { + logError(function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + totalSize := dataFileMap["datafile-1-MB"] + + // Set base object name + objectName := bucketName + "FPutObjectWithContext" + args["objectName"] = objectName + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + args["ctx"] = ctx + defer cancel() + + // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(function, args, startTime, "", "Request context cancellation failed", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed + n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "FPutObjectWithContext failed", err) + return + } + if n != int64(totalSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err) + return + } + + _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "StatObject failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() + +} + +// Tests FPutObjectWithContext request context cancels after timeout +func testFPutObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + var fName = getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest") + if err != nil { + logError(function, args, startTime, "", "Temp file creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(function, args, startTime, "", "File copy failed", err) + return + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + totalSize := dataFileMap["datafile-1-MB"] + + // Set base object name + objectName := bucketName + "FPutObjectWithContext" + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Millisecond) + args["ctx"] = ctx + defer cancel() + + // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(function, args, startTime, "", "FPutObjectWithContext with short timeout failed", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed + n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "FPutObjectWithContext with long timeout failed", err) + return + } + if n != int64(totalSize) { + logError(function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err) + return + } + + _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "StatObject failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() + +} + +// Test validates putObject with context to see if request cancellation is honored. +func testPutObjectWithContext() { + // initialize logging params + startTime := time.Now() + function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "opts": "", + } + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket call failed", err) + return + } + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + args["ctx"] = ctx + args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} + defer cancel() + + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(function, args, startTime, "", "PutObjectWithContext with short timeout failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() + +} + // Tests get object ReaderSeeker interface methods. func testGetObjectReadSeekFunctional() { // initialize logging params @@ -1337,7 +1857,8 @@ func testGetObjectReadSeekFunctional() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -1353,11 +1874,13 @@ func testGetObjectReadSeekFunctional() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -1365,43 +1888,46 @@ func testGetObjectReadSeekFunctional() { buf, err := ioutil.ReadAll(reader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return } defer func() { - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } }() // Read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat object failed", err).Fatal() + logError(function, args, startTime, "", "Stat object failed", err) + return } - if st.Size != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return } // This following function helps us to compare data from the reader after seek @@ -1411,13 +1937,15 @@ func testGetObjectReadSeekFunctional() { return } buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(thirtyThreeKiB)); err != nil { + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { if err != io.EOF { - failureLog(function, args, startTime, "", "CopyN failed", err).Fatal() + logError(function, args, startTime, "", "CopyN failed", err) + return } } if !bytes.Equal(buf[start:end], buffer.Bytes()) { - failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal() + logError(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return } } @@ -1436,23 +1964,23 @@ func testGetObjectReadSeekFunctional() { // Start from offset 0, fetch data and compare {0, 0, 0, nil, true, 0, 0}, // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, thirtyThreeKiB}, + {2048, 0, 2048, nil, true, 2048, bufSize}, // Start from offset larger than possible - {int64(thirtyThreeKiB) + 1024, 0, 0, seekErr, false, 0, 0}, + {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, // Move to offset 0 without comparing {0, 0, 0, nil, false, 0, 0}, // Move one step forward and compare - {1, 1, 1, nil, true, 1, thirtyThreeKiB}, + {1, 1, 1, nil, true, 1, bufSize}, // Move larger than possible - {int64(thirtyThreeKiB), 1, 0, seekErr, false, 0, 0}, + {int64(bufSize), 1, 0, seekErr, false, 0, 0}, // Provide negative offset with CUR_SEEK {int64(-1), 1, 0, seekErr, false, 0, 0}, // Test with whence SEEK_END and with positive offset - {1024, 2, int64(thirtyThreeKiB) - 1024, io.EOF, true, 0, 0}, + {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(thirtyThreeKiB) - 1024, nil, true, thirtyThreeKiB - 1024, thirtyThreeKiB}, + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, // Test with whence SEEK_END and with large negative offset - {-int64(thirtyThreeKiB) * 2, 2, 0, seekErr, true, 0, 0}, + {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, } for i, testCase := range testCases { @@ -1460,11 +1988,13 @@ func testGetObjectReadSeekFunctional() { n, err := r.Seek(testCase.offset, testCase.whence) // We expect an error if testCase.err == seekErr && err == nil { - failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal() + logError(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return } // We expect a specific error if testCase.err != seekErr && testCase.err != err { - failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal() + logError(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return } // If we expect an error go to the next loop if testCase.err != nil { @@ -1472,7 +2002,8 @@ func testGetObjectReadSeekFunctional() { } // Check the returned seek pos if n != testCase.pos { - failureLog(function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) + return } // Compare only if shouldCmp is activated if testCase.shouldCmp { @@ -1503,7 +2034,8 @@ func testGetObjectReadAtFunctional() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -1519,11 +2051,13 @@ func testGetObjectReadAtFunctional() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -1531,23 +2065,27 @@ func testGetObjectReadAtFunctional() { buf, err := ioutil.ReadAll(reader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return } // read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } offset := int64(2048) @@ -1560,56 +2098,70 @@ func testGetObjectReadAtFunctional() { // Test readAt before stat is called. m, err := r.ReadAt(buf1, offset) if err != nil { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } if m != len(buf1) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return } if !bytes.Equal(buf1, buf[offset:offset+512]) { - failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + logError(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return } offset += 512 st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } - if st.Size != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return } m, err = r.ReadAt(buf2, offset) if err != nil { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } if m != len(buf2) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return } if !bytes.Equal(buf2, buf[offset:offset+512]) { - failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + logError(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return } offset += 512 m, err = r.ReadAt(buf3, offset) if err != nil { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } if m != len(buf3) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return } if !bytes.Equal(buf3, buf[offset:offset+512]) { - failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + logError(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return } offset += 512 m, err = r.ReadAt(buf4, offset) if err != nil { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } if m != len(buf4) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return } if !bytes.Equal(buf4, buf[offset:offset+512]) { - failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + logError(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return } buf5 := make([]byte, n) @@ -1617,14 +2169,17 @@ func testGetObjectReadAtFunctional() { m, err = r.ReadAt(buf5, 0) if err != nil { if err != io.EOF { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } } if m != len(buf5) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return } if !bytes.Equal(buf, buf5) { - failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal() + logError(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return } buf6 := make([]byte, n+1) @@ -1632,16 +2187,14 @@ func testGetObjectReadAtFunctional() { _, err = r.ReadAt(buf6, 0) if err != nil { if err != io.EOF { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } } - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } successLogger(function, args, startTime).Info() } @@ -1666,7 +2219,8 @@ func testPresignedPostPolicy() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -1681,75 +2235,166 @@ func testPresignedPostPolicy() { // Make a new bucket in 'us-east-1' (source bucket). err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") buf, err := ioutil.ReadAll(reader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) + return } policy := minio.NewPostPolicy() if err := policy.SetBucket(""); err == nil { - failureLog(function, args, startTime, "", "SetBucket did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) + return } if err := policy.SetKey(""); err == nil { - failureLog(function, args, startTime, "", "SetKey did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetKey did not fail for invalid conditions", err) + return } if err := policy.SetKeyStartsWith(""); err == nil { - failureLog(function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) + return } if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { - failureLog(function, args, startTime, "", "SetExpires did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) + return } if err := policy.SetContentType(""); err == nil { - failureLog(function, args, startTime, "", "SetContentType did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) + return } if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { - failureLog(function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) + return + } + if err := policy.SetUserMetadata("", ""); err == nil { + logError(function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) + return } policy.SetBucket(bucketName) policy.SetKey(objectName) policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days - policy.SetContentType("image/png") - policy.SetContentLengthRange(1024, 1024*1024) - args["policy"] = policy + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + args["policy"] = policy.String() - _, _, err = c.PresignedPostPolicy(policy) + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy) if err != nil { - failureLog(function, args, startTime, "", "PresignedPostPolicy failed", err).Fatal() + logError(function, args, startTime, "", "PresignedPostPolicy failed", err) + return } - policy = minio.NewPostPolicy() - - // Remove all objects and buckets - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Get a 33KB file to upload and test if set post policy works + var filePath = getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + // make post request with correct form data + res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + logError(function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + // expected path should be absolute path of the object + var scheme string + if mustParseBool(os.Getenv(enableHTTPS)) { + scheme = "https://" + } else { + scheme = "http://" + } + + expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + + if val, ok := res.Header["Location"]; ok { + if val[0] != expectedLocation { + logError(function, args, startTime, "", "Location in header response is incorrect", err) + return + } + } else { + logError(function, args, startTime, "", "Location not found in header response", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(function, args, startTime).Info() } @@ -1773,7 +2418,8 @@ func testCopyObject() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -1788,37 +2434,44 @@ func testCopyObject() { // Make a new bucket in 'us-east-1' (source bucket). err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Make a new bucket in 'us-east-1' (destination bucket). err = c.MakeBucket(bucketName+"-copy", "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err) + return } - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } // Check the various fields of source object against destination object. objInfo, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } // Copy Source @@ -1829,103 +2482,108 @@ func testCopyObject() { // All invalid conditions first. err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) if err == nil { - failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) + return } err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) if err == nil { - failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) + return } err = src.SetMatchETagCond("") if err == nil { - failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) + return } err = src.SetMatchETagExceptCond("") if err == nil { - failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) + return } err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) if err != nil { - failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal() + logError(function, args, startTime, "", "SetModifiedSinceCond failed", err) + return } err = src.SetMatchETagCond(objInfo.ETag) if err != nil { - failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal() + logError(function, args, startTime, "", "SetMatchETagCond failed", err) + return } args["src"] = src dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) args["dst"] = dst if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } // Perform the Copy err = c.CopyObject(dst, src) if err != nil { - failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + logError(function, args, startTime, "", "CopyObject failed", err) + return } // Source object - r, err = c.GetObject(bucketName, objectName) + r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } // Destination object - readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy") + readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } // Check the various fields of source object against destination object. objInfo, err = r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } objInfoCopy, err := readerCopy.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } if objInfo.Size != objInfoCopy.Size { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) + return } // CopyObject again but with wrong conditions src = minio.NewSourceInfo(bucketName, objectName, nil) err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) if err != nil { - failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal() + logError(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) + return } err = src.SetMatchETagExceptCond(objInfo.ETag) if err != nil { - failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal() + logError(function, args, startTime, "", "SetMatchETagExceptCond failed", err) + return } // Perform the Copy which should fail err = c.CopyObject(dst, src) if err == nil { - failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return } - // Remove all objects and buckets - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - - err = c.RemoveObject(bucketName+"-copy", objectName+"-copy") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } - - err = c.RemoveBucket(bucketName + "-copy") - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + if err = cleanupBucket(bucketName+"-copy", c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } successLogger(function, args, startTime).Info() } @@ -1952,7 +2610,8 @@ func testEncryptionPutGet() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -1968,7 +2627,8 @@ func testEncryptionPutGet() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate a symmetric key @@ -2000,7 +2660,8 @@ func testEncryptionPutGet() { "9945cb5c7d") if err != nil { - failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal() + logError(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) + return } publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" + @@ -2010,13 +2671,191 @@ func testEncryptionPutGet() { "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" + "80a89e43f29b570203010001") if err != nil { - failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal() + logError(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) + return } // Generate an asymmetric key asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) if err != nil { - failureLog(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err).Fatal() + logError(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err) + return + } + + testCases := []struct { + buf []byte + encKey encrypt.Key + }{ + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)}, + {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)}, + {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey) + args["cbcMaterials"] = cbcMaterials + + if err != nil { + logError(function, args, startTime, "", "NewCBCSecureMaterials failed", err) + return + } + + // Put encrypted data + _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials) + if err != nil { + logError(function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials) + if err != nil { + logError(function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + successLogger(function, args, startTime).Info() + + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() +} + +// TestEncryptionFPut tests client side encryption +func testEncryptionFPut() { + // initialize logging params + startTime := time.Now() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, cbcMaterials)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "cbcMaterials": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate a symmetric key + symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00")) + + // Generate an assymmetric key from predefine public and private certificates + privateKey, err := hex.DecodeString( + "30820277020100300d06092a864886f70d0101010500048202613082025d" + + "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" + + "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" + + "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" + + "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" + + "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" + + "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" + + "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" + + "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" + + "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" + + "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" + + "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" + + "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" + + "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" + + "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" + + "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" + + "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" + + "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" + + "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" + + "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" + + "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" + + "9945cb5c7d") + + if err != nil { + logError(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) + return + } + + publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" + + "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" + + "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" + + "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" + + "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" + + "80a89e43f29b570203010001") + if err != nil { + logError(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err) + return + } + + // Generate an asymmetric key + asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey) + if err != nil { + logError(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err) + return } // Object custom metadata @@ -2057,48 +2896,63 @@ func testEncryptionPutGet() { args["cbcMaterials"] = cbcMaterials if err != nil { - failureLog(function, args, startTime, "", "NewCBCSecureMaterials failed", err).Fatal() + logError(function, args, startTime, "", "NewCBCSecureMaterials failed", err) + return } - - // Put encrypted data - _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials, map[string][]string{"Content-Type": {customContentType}}, nil) + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) if err != nil { - failureLog(function, args, startTime, "", "PutEncryptedObject failed", err).Fatal() + logError(function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutEncryptedObject(bucketName, objectName, fileName, cbcMaterials); err != nil { + logError(function, args, startTime, "", "FPutEncryptedObject failed", err) + return } // Read the data back r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials) if err != nil { - failureLog(function, args, startTime, "", "GetEncryptedObject failed", err).Fatal() + logError(function, args, startTime, "", "GetEncryptedObject failed", err) + return } defer r.Close() // Compare the sent object with the received one recvBuffer := bytes.NewBuffer([]byte{}) if _, err = io.Copy(recvBuffer, r); err != nil { - failureLog(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err).Fatal() + logError(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return } if recvBuffer.Len() != len(testCase.buf) { - failureLog(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err).Fatal() + logError(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return } if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - failureLog(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err).Fatal() + logError(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return } - // Remove test object - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "Test "+string(i+1)+", RemoveObject failed with: "+err.Error(), err).Fatal() + if err = os.Remove(fileName); err != nil { + logError(function, args, startTime, "", "File remove failed", err) + return } - } - // Remove test bucket - err = c.RemoveBucket(bucketName) - if err != nil { - err = c.RemoveBucket(bucketName) - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -2129,7 +2983,8 @@ func testBucketNotification() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable to debug @@ -2160,7 +3015,8 @@ func testBucketNotification() { // because it is duplicated bNotification.AddTopic(topicConfig) if len(bNotification.TopicConfigs) != 1 { - failureLog(function, args, startTime, "", "Duplicate entry added", err).Fatal() + logError(function, args, startTime, "", "Duplicate entry added", err) + return } // Add and remove a queue config @@ -2169,26 +3025,38 @@ func testBucketNotification() { err = c.SetBucketNotification(bucketName, bNotification) if err != nil { - failureLog(function, args, startTime, "", "SetBucketNotification failed", err).Fatal() + logError(function, args, startTime, "", "SetBucketNotification failed", err) + return } bNotification, err = c.GetBucketNotification(bucketName) if err != nil { - failureLog(function, args, startTime, "", "GetBucketNotification failed", err).Fatal() + logError(function, args, startTime, "", "GetBucketNotification failed", err) + return } if len(bNotification.TopicConfigs) != 1 { - failureLog(function, args, startTime, "", "Topic config is empty", err).Fatal() + logError(function, args, startTime, "", "Topic config is empty", err) + return } if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { - failureLog(function, args, startTime, "", "Couldn't get the suffix", err).Fatal() + logError(function, args, startTime, "", "Couldn't get the suffix", err) + return } err = c.RemoveAllBucketNotification(bucketName) if err != nil { - failureLog(function, args, startTime, "", "RemoveAllBucketNotification failed", err).Fatal() + logError(function, args, startTime, "", "RemoveAllBucketNotification failed", err) + return } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(function, args, startTime).Info() } @@ -2208,7 +3076,8 @@ func testFunctional() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, nil, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, nil, startTime, "", "Minio client object creation failed", err) + return } // Enable to debug @@ -2228,20 +3097,23 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate a random file name. fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") file, err := os.Create(fileName) if err != nil { - failureLog(function, args, startTime, "", "File creation failed", err).Fatal() + logError(function, args, startTime, "", "File creation failed", err) + return } for i := 0; i < 3; i++ { buf := make([]byte, rand.Intn(1<<19)) _, err = file.Write(buf) if err != nil { - failureLog(function, args, startTime, "", "File write failed", err).Fatal() + logError(function, args, startTime, "", "File write failed", err) + return } } file.Close() @@ -2255,10 +3127,12 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal() + logError(function, args, startTime, "", "BucketExists failed", err) + return } if !exists { - failureLog(function, args, startTime, "", "Could not find the bucket", err).Fatal() + logError(function, args, startTime, "", "Could not find the bucket", err) + return } // Asserting the default bucket policy. @@ -2270,10 +3144,12 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal() + logError(function, args, startTime, "", "GetBucketPolicy failed", err) + return } if policyAccess != "none" { - failureLog(function, args, startTime, "", "policy should be set to none", err).Fatal() + logError(function, args, startTime, "", "policy should be set to none", err) + return } // Set the bucket policy to 'public readonly'. err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly) @@ -2285,7 +3161,8 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal() + logError(function, args, startTime, "", "SetBucketPolicy failed", err) + return } // should return policy `readonly`. policyAccess, err = c.GetBucketPolicy(bucketName, "") @@ -2296,10 +3173,12 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal() + logError(function, args, startTime, "", "GetBucketPolicy failed", err) + return } if policyAccess != "readonly" { - failureLog(function, args, startTime, "", "policy should be set to readonly", err).Fatal() + logError(function, args, startTime, "", "policy should be set to readonly", err) + return } // Make the bucket 'public writeonly'. @@ -2312,7 +3191,8 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal() + logError(function, args, startTime, "", "SetBucketPolicy failed", err) + return } // should return policy `writeonly`. policyAccess, err = c.GetBucketPolicy(bucketName, "") @@ -2323,10 +3203,12 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal() + logError(function, args, startTime, "", "GetBucketPolicy failed", err) + return } if policyAccess != "writeonly" { - failureLog(function, args, startTime, "", "policy should be set to writeonly", err).Fatal() + logError(function, args, startTime, "", "policy should be set to writeonly", err) + return } // Make the bucket 'public read/write'. err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite) @@ -2338,7 +3220,8 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal() + logError(function, args, startTime, "", "SetBucketPolicy failed", err) + return } // should return policy `readwrite`. policyAccess, err = c.GetBucketPolicy(bucketName, "") @@ -2349,10 +3232,12 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal() + logError(function, args, startTime, "", "GetBucketPolicy failed", err) + return } if policyAccess != "readwrite" { - failureLog(function, args, startTime, "", "policy should be set to readwrite", err).Fatal() + logError(function, args, startTime, "", "policy should be set to readwrite", err) + return } // List all buckets. buckets, err := c.ListBuckets() @@ -2360,10 +3245,12 @@ func testFunctional() { args = nil if len(buckets) == 0 { - failureLog(function, args, startTime, "", "Found bucket list to be empty", err).Fatal() + logError(function, args, startTime, "", "Found bucket list to be empty", err) + return } if err != nil { - failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal() + logError(function, args, startTime, "", "ListBuckets failed", err) + return } // Verify if previously created bucket is listed in list buckets. @@ -2376,7 +3263,8 @@ func testFunctional() { // If bucket not found error out. if !bucketFound { - failureLog(function, args, startTime, "", "Bucket: "+bucketName+" not found", err).Fatal() + logError(function, args, startTime, "", "Bucket: "+bucketName+" not found", err) + return } objectName := bucketName + "unique" @@ -2384,7 +3272,6 @@ func testFunctional() { // Generate data buf := bytes.Repeat([]byte("f"), 1<<19) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "") function = "PutObject(bucketName, objectName, reader, contentType)" args = map[string]interface{}{ "bucketName": bucketName, @@ -2392,27 +3279,32 @@ func testFunctional() { "contentType": "", } + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } if n != int64(len(buf)) { - failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) + return } - n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream") args = map[string]interface{}{ "bucketName": bucketName, "objectName": objectName + "-nolength", "contentType": "binary/octet-stream", } + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } if n != int64(len(buf)) { - failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err) + return } // Instantiate a done channel to close all listing. @@ -2436,7 +3328,8 @@ func testFunctional() { } } if !objFound { - failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal() + logError(function, args, startTime, "", "Object "+objectName+" not found", err) + return } objFound = false @@ -2455,7 +3348,8 @@ func testFunctional() { } } if !objFound { - failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal() + logError(function, args, startTime, "", "Object "+objectName+" not found", err) + return } incompObjNotFound := true @@ -2474,10 +3368,11 @@ func testFunctional() { } } if !incompObjNotFound { - failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal() + logError(function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return } - newReader, err := c.GetObject(bucketName, objectName) + newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) function = "GetObject(bucketName, objectName)" args = map[string]interface{}{ "bucketName": bucketName, @@ -2485,19 +3380,22 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } newReadBytes, err := ioutil.ReadAll(newReader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } if !bytes.Equal(newReadBytes, buf) { - failureLog(function, args, startTime, "", "GetObject bytes mismatch", err).Fatal() + logError(function, args, startTime, "", "GetObject bytes mismatch", err) + return } - err = c.FGetObject(bucketName, objectName, fileName+"-f") + err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) function = "FGetObject(bucketName, objectName, fileName)" args = map[string]interface{}{ "bucketName": bucketName, @@ -2506,7 +3404,19 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "FGetObject failed", err).Fatal() + logError(function, args, startTime, "", "FGetObject failed", err) + return + } + + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil { + logError(function, args, startTime, "", "PresignedHeadObject success", err) + return } // Generate presigned HEAD object url. @@ -2519,21 +3429,37 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedHeadObject failed", err) + return } // Verify if presigned url works. resp, err := http.Head(presignedHeadURL.String()) if err != nil { - failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal() + logError(function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return } if resp.StatusCode != http.StatusOK { - failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err).Fatal() + logError(function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) + return } if resp.Header.Get("ETag") == "" { - failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal() + logError(function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return } resp.Body.Close() + _, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil) + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if err == nil { + logError(function, args, startTime, "", "PresignedGetObject success", err) + return + } + // Generate presigned GET object url. presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" @@ -2544,24 +3470,29 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject failed", err) + return } // Verify if presigned url works. resp, err = http.Get(presignedGetURL.String()) if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject response incorrect", err) + return } if resp.StatusCode != http.StatusOK { - failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return } newPresignedBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject response incorrect", err) + return } resp.Body.Close() if !bytes.Equal(newPresignedBytes, buf) { - failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject response incorrect", err) + return } // Set request parameters. @@ -2576,29 +3507,46 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject failed", err) + return } // Verify if presigned url works. resp, err = http.Get(presignedGetURL.String()) if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject response incorrect", err) + return } if resp.StatusCode != http.StatusOK { - failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return } newPresignedBytes, err = ioutil.ReadAll(resp.Body) if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject response incorrect", err) + return } if !bytes.Equal(newPresignedBytes, buf) { - failureLog(function, args, startTime, "", "Bytes mismatch for presigned GET URL", err).Fatal() + logError(function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) + return } if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - failureLog(function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err).Fatal() + logError(function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) + return + } + + _, err = c.PresignedPutObject(bucketName, "", 3600*time.Second) + function = "PresignedPutObject(bucketName, objectName, expires)" + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if err == nil { + logError(function, args, startTime, "", "PresignedPutObject success", err) + return } presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) - function = "PresignedPutObject(bucketName, objectName, expires)" args = map[string]interface{}{ "bucketName": bucketName, @@ -2607,14 +3555,16 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedPutObject failed", err) + return } buf = bytes.Repeat([]byte("g"), 1<<19) req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) if err != nil { - failureLog(function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err).Fatal() + logError(function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) + return } httpClient := &http.Client{ // Setting a sensible time out of 30secs to wait for response @@ -2625,21 +3575,25 @@ func testFunctional() { } resp, err = httpClient.Do(req) if err != nil { - failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedPutObject failed", err) + return } - newReader, err = c.GetObject(bucketName, objectName+"-presigned") + newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject after PresignedPutObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject after PresignedPutObject failed", err) + return } newReadBytes, err = ioutil.ReadAll(newReader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll after GetObject failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll after GetObject failed", err) + return } if !bytes.Equal(newReadBytes, buf) { - failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + logError(function, args, startTime, "", "Bytes mismatch", err) + return } err = c.RemoveObject(bucketName, objectName) @@ -2650,27 +3604,31 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + logError(function, args, startTime, "", "RemoveObject failed", err) + return } err = c.RemoveObject(bucketName, objectName+"-f") args["objectName"] = objectName + "-f" if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + logError(function, args, startTime, "", "RemoveObject failed", err) + return } err = c.RemoveObject(bucketName, objectName+"-nolength") args["objectName"] = objectName + "-nolength" if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + logError(function, args, startTime, "", "RemoveObject failed", err) + return } err = c.RemoveObject(bucketName, objectName+"-presigned") args["objectName"] = objectName + "-presigned" if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + logError(function, args, startTime, "", "RemoveObject failed", err) + return } err = c.RemoveBucket(bucketName) @@ -2680,20 +3638,26 @@ func testFunctional() { } if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + logError(function, args, startTime, "", "RemoveBucket failed", err) + return } err = c.RemoveBucket(bucketName) if err == nil { - failureLog(function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err).Fatal() + logError(function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) + return } if err.Error() != "The specified bucket does not exist" { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + logError(function, args, startTime, "", "RemoveBucket failed", err) + return } + if err = os.Remove(fileName); err != nil { - failureLog(function, args, startTime, "", "File Remove failed", err).Fatal() + logError(function, args, startTime, "", "File Remove failed", err) + return } if err = os.Remove(fileName + "-f"); err != nil { - failureLog(function, args, startTime, "", "File Remove failed", err).Fatal() + logError(function, args, startTime, "", "File Remove failed", err) + return } function = "testFunctional()" successLogger(function, args, startTime).Info() @@ -2718,7 +3682,8 @@ func testGetObjectObjectModified() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -2733,23 +3698,26 @@ func testGetObjectObjectModified() { err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } defer c.RemoveBucket(bucketName) // Upload an object. objectName := "myobject" content := "helloworld" - _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), "application/text") + _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) if err != nil { - failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal() + logError(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return } defer c.RemoveObject(bucketName, objectName) - reader, err := c.GetObject(bucketName, objectName) + reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err).Fatal() + logError(function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) + return } defer reader.Close() @@ -2757,28 +3725,38 @@ func testGetObjectObjectModified() { b := make([]byte, 5) n, err := reader.ReadAt(b, 0) if err != nil { - failureLog(function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err).Fatal() + logError(function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) + return } // Upload different contents to the same object while object is being read. newContent := "goodbyeworld" - _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), "application/text") + _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) if err != nil { - failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal() + logError(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return } // Confirm that a Stat() call in between doesn't change the Object's cached etag. _, err = reader.Stat() expectedError := "At least one of the pre-conditions you specified did not hold" if err.Error() != expectedError { - failureLog(function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err).Fatal() + logError(function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) + return } // Read again only to find object contents have been modified since last read. _, err = reader.ReadAt(b, int64(n)) if err.Error() != expectedError { - failureLog(function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err).Fatal() + logError(function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) + return } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(function, args, startTime).Info() } @@ -2802,7 +3780,8 @@ func testPutObjectUploadSeekedObject() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -2817,83 +3796,92 @@ func testPutObjectUploadSeekedObject() { err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } defer c.RemoveBucket(bucketName) - tempfile, err := ioutil.TempFile("", "minio-go-upload-test-") - args["fileToUpload"] = tempfile + var tempfile *os.File - if err != nil { - failureLog(function, args, startTime, "", "TempFile create failed", err).Fatal() - } - - var data []byte - if fileName := getFilePath("datafile-100-kB"); fileName != "" { - data, _ = ioutil.ReadFile(fileName) + if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { + tempfile, err = os.Open(fileName) + if err != nil { + logError(function, args, startTime, "", "File open failed", err) + return + } + args["fileToUpload"] = fileName } else { - // Generate data more than 32K - data = bytes.Repeat([]byte("1"), 120000) - } - var length = len(data) - if _, err = tempfile.Write(data); err != nil { - failureLog(function, args, startTime, "", "TempFile write failed", err).Fatal() - } + tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") + if err != nil { + logError(function, args, startTime, "", "TempFile create failed", err) + return + } + args["fileToUpload"] = tempfile.Name() + // Generate 100kB data + if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { + logError(function, args, startTime, "", "File copy failed", err) + return + } + + defer os.Remove(tempfile.Name()) + + // Seek back to the beginning of the file. + tempfile.Seek(0, 0) + } + var length = 100 * humanize.KiByte objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) args["objectName"] = objectName offset := length / 2 - if _, err := tempfile.Seek(int64(offset), 0); err != nil { - failureLog(function, args, startTime, "", "TempFile seek failed", err).Fatal() + if _, err = tempfile.Seek(int64(offset), 0); err != nil { + logError(function, args, startTime, "", "TempFile seek failed", err) + return } - n, err := c.PutObject(bucketName, objectName, tempfile, "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } if n != int64(length-offset) { - failureLog(function, args, startTime, "", "Invalid length returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err) + return } tempfile.Close() - if err = os.Remove(tempfile.Name()); err != nil { - failureLog(function, args, startTime, "", "File remove failed", err).Fatal() - } - length = int(n) - - obj, err := c.GetObject(bucketName, objectName) + obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } n, err = obj.Seek(int64(offset), 0) if err != nil { - failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + logError(function, args, startTime, "", "Seek failed", err) + return } if n != int64(offset) { - failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(offset))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) + return } - n, err = c.PutObject(bucketName, objectName+"getobject", obj, "binary/octet-stream") + n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } if n != int64(length-offset) { - failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) + return } - if err = c.RemoveObject(bucketName, objectName); err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - if err = c.RemoveObject(bucketName, objectName+"getobject"); err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - - if err = c.RemoveBucket(bucketName); err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } successLogger(function, args, startTime).Info() } @@ -2923,7 +3911,8 @@ func testMakeBucketErrorV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -2938,19 +3927,24 @@ func testMakeBucketErrorV2() { // Make a new bucket in 'eu-west-1'. if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil { - failureLog(function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err).Fatal() + logError(function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) + return } // Verify valid error response from server. if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal() + logError(function, args, startTime, "", "Invalid error returned by server", err) } - if err = c.RemoveBucket(bucketName); err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -2975,7 +3969,8 @@ func testGetObjectClosedTwiceV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -2991,55 +3986,62 @@ func testGetObjectClosedTwiceV2() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName - n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) + return } // Read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } - if st.Size != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return } if err := r.Close(); err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } if err := r.Close(); err == nil { - failureLog(function, args, startTime, "", "Object is already closed, should return error", err).Fatal() + logError(function, args, startTime, "", "Object is already closed, should return error", err) + return } - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -3064,7 +4066,8 @@ func testRemovePartiallyUploadedV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // Set user agent. @@ -3080,7 +4083,8 @@ func testRemovePartiallyUploadedV2() { // make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024)) @@ -3091,7 +4095,8 @@ func testRemovePartiallyUploadedV2() { for i < 25 { _, cerr := io.CopyN(writer, r, 128*1024) if cerr != nil { - failureLog(function, args, startTime, "", "Copy failed", cerr).Fatal() + logError(function, args, startTime, "", "Copy failed", cerr) + return } i++ r.Seek(0, 0) @@ -3102,21 +4107,26 @@ func testRemovePartiallyUploadedV2() { objectName := bucketName + "-resumable" args["objectName"] = objectName - _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream") + _, err = c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err == nil { - failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal() + logError(function, args, startTime, "", "PutObject should fail", err) + return } if err.Error() != "proactively closed to be verified later" { - failureLog(function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err).Fatal() + logError(function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err) + return } err = c.RemoveIncompleteUpload(bucketName, objectName) if err != nil { - failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal() + logError(function, args, startTime, "", "RemoveIncompleteUpload failed", err) + return } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -3124,12 +4134,12 @@ func testRemovePartiallyUploadedV2() { func testFPutObjectV2() { // initialize logging params startTime := time.Now() - function := "FPutObject(bucketName, objectName, fileName, contentType)" + function := "FPutObject(bucketName, objectName, fileName, opts)" args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "contentType": "application/octet-stream", + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", } // Seed random based on current time. @@ -3143,7 +4153,8 @@ func testFPutObjectV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -3159,28 +4170,33 @@ func testFPutObjectV2() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Make a temp file with 11*1024*1024 bytes of data. file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") if err != nil { - failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal() + logError(function, args, startTime, "", "TempFile creation failed", err) + return } r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) n, err := io.CopyN(file, r, 11*1024*1024) if err != nil { - failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + logError(function, args, startTime, "", "Copy failed", err) + return } if n != int64(11*1024*1024) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return } // Close the file pro-actively for windows. err = file.Close() if err != nil { - failureLog(function, args, startTime, "", "File close failed", err).Fatal() + logError(function, args, startTime, "", "File close failed", err) + return } // Set base object name @@ -3189,95 +4205,94 @@ func testFPutObjectV2() { args["fileName"] = file.Name() // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream") + n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + logError(function, args, startTime, "", "FPutObject failed", err) + return } if n != int64(11*1024*1024) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return } // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "") args["objectName"] = objectName + "-Octet" args["contentType"] = "" + n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + logError(function, args, startTime, "", "FPutObject failed", err) + return } if n != int64(11*1024*1024) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return } // Add extension to temp file name fileName := file.Name() err = os.Rename(file.Name(), fileName+".gtar") if err != nil { - failureLog(function, args, startTime, "", "Rename failed", err).Fatal() + logError(function, args, startTime, "", "Rename failed", err) + return } // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "") args["objectName"] = objectName + "-Octet" args["contentType"] = "" args["fileName"] = fileName + ".gtar" + n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal() + logError(function, args, startTime, "", "FPutObject failed", err) + return } if n != int64(11*1024*1024) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return } // Check headers - rStandard, err := c.StatObject(bucketName, objectName+"-standard") + rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + logError(function, args, startTime, "", "StatObject failed", err) + return } if rStandard.ContentType != "application/octet-stream" { - failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err).Fatal() + logError(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) + return } - rOctet, err := c.StatObject(bucketName, objectName+"-Octet") + rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + logError(function, args, startTime, "", "StatObject failed", err) + return } if rOctet.ContentType != "application/octet-stream" { - failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err).Fatal() + logError(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) + return } - rGTar, err := c.StatObject(bucketName, objectName+"-GTar") + rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + logError(function, args, startTime, "", "StatObject failed", err) + return } if rGTar.ContentType != "application/x-gtar" { - failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err).Fatal() + logError(function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) + return } - // Remove all objects and bucket and temp file - err = c.RemoveObject(bucketName, objectName+"-standard") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - - err = c.RemoveObject(bucketName, objectName+"-Octet") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - - err = c.RemoveObject(bucketName, objectName+"-GTar") - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } - - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } err = os.Remove(fileName + ".gtar") if err != nil { - failureLog(function, args, startTime, "", "File remove failed", err).Fatal() + logError(function, args, startTime, "", "File remove failed", err) + return } successLogger(function, args, startTime).Info() } @@ -3308,7 +4323,8 @@ func testMakeBucketRegionsV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -3323,11 +4339,13 @@ func testMakeBucketRegionsV2() { // Make a new bucket in 'eu-central-1'. if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } - if err = c.RemoveBucket(bucketName); err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } // Make a new bucket with '.' in its name, in 'us-west-2'. This @@ -3336,13 +4354,16 @@ func testMakeBucketRegionsV2() { if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil { args["bucketName"] = bucketName + ".withperiod" args["region"] = "us-west-2" - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } - // Remove the newly created bucket. - if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -3367,7 +4388,8 @@ func testGetObjectReadSeekFunctionalV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -3383,11 +4405,13 @@ func testGetObjectReadSeekFunctionalV2() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -3395,99 +4419,116 @@ func testGetObjectReadSeekFunctionalV2() { buf, err := ioutil.ReadAll(reader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } // Save the data. - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) + return } // Read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } - if st.Size != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return } offset := int64(2048) n, err = r.Seek(offset, 0) if err != nil { - failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + logError(function, args, startTime, "", "Seek failed", err) + return } if n != offset { - failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return } n, err = r.Seek(0, 1) if err != nil { - failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + logError(function, args, startTime, "", "Seek failed", err) + return } if n != offset { - failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return } _, err = r.Seek(offset, 2) if err == nil { - failureLog(function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err).Fatal() + logError(function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) + return } n, err = r.Seek(-offset, 2) if err != nil { - failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + logError(function, args, startTime, "", "Seek failed", err) + return } if n != st.Size-offset { - failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) + return } var buffer1 bytes.Buffer if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { if err != io.EOF { - failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + logError(function, args, startTime, "", "Copy failed", err) + return } } if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { - failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal() + logError(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return } // Seek again and read again. n, err = r.Seek(offset-1, 0) if err != nil { - failureLog(function, args, startTime, "", "Seek failed", err).Fatal() + logError(function, args, startTime, "", "Seek failed", err) + return } if n != (offset - 1) { - failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) + return } var buffer2 bytes.Buffer if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { if err != io.EOF { - failureLog(function, args, startTime, "", "Copy failed", err).Fatal() + logError(function, args, startTime, "", "Copy failed", err) + return } } // Verify now lesser bytes. if !bytes.Equal(buf[2047:], buffer2.Bytes()) { - failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal() + logError(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return } - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -3512,7 +4553,8 @@ func testGetObjectReadAtFunctionalV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -3528,11 +4570,13 @@ func testGetObjectReadAtFunctionalV2() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") @@ -3540,32 +4584,38 @@ func testGetObjectReadAtFunctionalV2() { buf, err := ioutil.ReadAll(reader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } // Save the data - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err) + return } // Read the data back - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } st, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } - if st.Size != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal() + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return } offset := int64(2048) @@ -3577,35 +4627,44 @@ func testGetObjectReadAtFunctionalV2() { m, err := r.ReadAt(buf2, offset) if err != nil { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } if m != len(buf2) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) + return } if !bytes.Equal(buf2, buf[offset:offset+512]) { - failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + logError(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return } offset += 512 m, err = r.ReadAt(buf3, offset) if err != nil { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } if m != len(buf3) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) + return } if !bytes.Equal(buf3, buf[offset:offset+512]) { - failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + logError(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return } offset += 512 m, err = r.ReadAt(buf4, offset) if err != nil { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } if m != len(buf4) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) + return } if !bytes.Equal(buf4, buf[offset:offset+512]) { - failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal() + logError(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return } buf5 := make([]byte, n) @@ -3613,14 +4672,17 @@ func testGetObjectReadAtFunctionalV2() { m, err = r.ReadAt(buf5, 0) if err != nil { if err != io.EOF { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } } if m != len(buf5) { - failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err).Fatal() + logError(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) + return } if !bytes.Equal(buf, buf5) { - failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal() + logError(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return } buf6 := make([]byte, n+1) @@ -3628,17 +4690,16 @@ func testGetObjectReadAtFunctionalV2() { _, err = r.ReadAt(buf6, 0) if err != nil { if err != io.EOF { - failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal() + logError(function, args, startTime, "", "ReadAt failed", err) + return } } - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -3663,7 +4724,8 @@ func testCopyObjectV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // Enable tracing, write to stderr. @@ -3678,38 +4740,45 @@ func testCopyObjectV2() { // Make a new bucket in 'us-east-1' (source bucket). err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Make a new bucket in 'us-east-1' (destination bucket). err = c.MakeBucket(bucketName+"-copy", "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate 33K of data. - var reader = getDataReader("datafile-33-kB", thirtyThreeKiB) + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") defer reader.Close() // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream") + n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } - if n != int64(thirtyThreeKiB) { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err) + return } - r, err := c.GetObject(bucketName, objectName) + r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } // Check the various fields of source object against destination object. objInfo, err := r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } // Copy Source @@ -3720,102 +4789,107 @@ func testCopyObjectV2() { // All invalid conditions first. err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) if err == nil { - failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err) + return } err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)) if err == nil { - failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err) + return } err = src.SetMatchETagCond("") if err == nil { - failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err) + return } err = src.SetMatchETagExceptCond("") if err == nil { - failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err) + return } err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) if err != nil { - failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal() + logError(function, args, startTime, "", "SetModifiedSinceCond failed", err) + return } err = src.SetMatchETagCond(objInfo.ETag) if err != nil { - failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal() + logError(function, args, startTime, "", "SetMatchETagCond failed", err) + return } args["source"] = src dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil) if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } args["destination"] = dst // Perform the Copy err = c.CopyObject(dst, src) if err != nil { - failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + logError(function, args, startTime, "", "CopyObject failed", err) + return } // Source object - r, err = c.GetObject(bucketName, objectName) + r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } // Destination object - readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy") + readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } // Check the various fields of source object against destination object. objInfo, err = r.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } objInfoCopy, err := readerCopy.Stat() if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } if objInfo.Size != objInfoCopy.Size { - failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err).Fatal() + logError(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) + return } // CopyObject again but with wrong conditions src = minio.NewSourceInfo(bucketName, objectName, nil) err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC)) if err != nil { - failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal() + logError(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err) + return } err = src.SetMatchETagExceptCond(objInfo.ETag) if err != nil { - failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal() + logError(function, args, startTime, "", "SetMatchETagExceptCond failed", err) + return } // Perform the Copy which should fail err = c.CopyObject(dst, src) if err == nil { - failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal() + logError(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return } - // Remove all objects and buckets - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - - err = c.RemoveObject(bucketName+"-copy", objectName+"-copy") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } - - err = c.RemoveBucket(bucketName + "-copy") - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() + if err = cleanupBucket(bucketName+"-copy", c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } successLogger(function, args, startTime).Info() } @@ -3833,7 +4907,8 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) { err := c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Test that more than 10K source objects cannot be @@ -3842,13 +4917,16 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) { srcSlice := srcArr[:] dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil) if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } if err := c.ComposeObject(dst, srcSlice); err == nil { - failureLog(function, args, startTime, "", "Expected error in ComposeObject", err).Fatal() + logError(function, args, startTime, "", "Expected error in ComposeObject", err) + return } else if err.Error() != "There must be as least one and up to 10000 source objects." { - failureLog(function, args, startTime, "", "Got unexpected error", err).Fatal() + logError(function, args, startTime, "", "Got unexpected error", err) + return } // Create a source with invalid offset spec and check that @@ -3856,23 +4934,34 @@ func testComposeObjectErrorCasesWrapper(c *minio.Client) { // 1. Create the source object. const badSrcSize = 5 * 1024 * 1024 buf := bytes.Repeat([]byte("1"), badSrcSize) - _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), "") + _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } // 2. Set invalid range spec on the object (going beyond // object size) badSrc := minio.NewSourceInfo(bucketName, "badObject", nil) err = badSrc.SetRange(1, badSrcSize) if err != nil { - failureLog(function, args, startTime, "", "Setting NewSourceInfo failed", err).Fatal() + logError(function, args, startTime, "", "Setting NewSourceInfo failed", err) + return } // 3. ComposeObject call should fail. if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil { - failureLog(function, args, startTime, "", "ComposeObject expected to fail", err).Fatal() + logError(function, args, startTime, "", "ComposeObject expected to fail", err) + return } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { - failureLog(function, args, startTime, "", "Got invalid error", err).Fatal() + logError(function, args, startTime, "", "Got invalid error", err) + return } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(function, args, startTime).Info() } @@ -3891,7 +4980,8 @@ func testComposeObjectErrorCasesV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } testComposeObjectErrorCasesWrapper(c) @@ -3911,15 +5001,17 @@ func testComposeMultipleSources(c *minio.Client) { // Make a new bucket in 'us-east-1' (source bucket). err := c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Upload a small source object const srcSize = 1024 * 1024 * 5 buf := bytes.Repeat([]byte("1"), srcSize) - _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), "binary/octet-stream") + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } // We will append 10 copies of the object. @@ -3930,7 +5022,8 @@ func testComposeMultipleSources(c *minio.Client) { // make the last part very small err = srcs[9].SetRange(0, 0) if err != nil { - failureLog(function, args, startTime, "", "SetRange failed", err).Fatal() + logError(function, args, startTime, "", "SetRange failed", err) + return } args["sources"] = srcs @@ -3938,20 +5031,29 @@ func testComposeMultipleSources(c *minio.Client) { args["destination"] = dst if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } err = c.ComposeObject(dst, srcs) if err != nil { - failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal() + logError(function, args, startTime, "", "ComposeObject failed", err) + return } - objProps, err := c.StatObject(bucketName, "dstObject") + objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "StatObject failed", err).Fatal() + logError(function, args, startTime, "", "StatObject failed", err) + return } if objProps.Size != 9*srcSize+1 { - failureLog(function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err).Fatal() + logError(function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } successLogger(function, args, startTime).Info() } @@ -3971,7 +5073,8 @@ func testCompose10KSourcesV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } testComposeMultipleSources(c) @@ -3988,7 +5091,8 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) { // Make a new bucket in 'us-east-1' (source bucket). err := c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256") @@ -3997,46 +5101,58 @@ func testEncryptedCopyObjectWrapper(c *minio.Client) { // 1. create an sse-c encrypted object to copy by uploading const srcSize = 1024 * 1024 buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - metadata := make(map[string][]string) + metadata := make(map[string]string) for k, v := range key1.GetSSEHeaders() { - metadata[k] = append(metadata[k], v) + metadata[k] = v } - _, err = c.PutObjectWithSize(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), metadata, nil) + _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: metadata, Progress: nil}) if err != nil { - failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal() + logError(function, args, startTime, "", "PutObject call failed", err) + return } // 2. copy object and change encryption key src := minio.NewSourceInfo(bucketName, "srcObject", &key1) dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil) if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } err = c.CopyObject(dst, src) if err != nil { - failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + logError(function, args, startTime, "", "CopyObject failed", err) + return } // 3. get copied object and check if content is equal - reqH := minio.NewGetReqHeaders() + opts := minio.GetObjectOptions{} for k, v := range key2.GetSSEHeaders() { - reqH.Set(k, v) + opts.Set(k, v) } coreClient := minio.Core{c} - reader, _, err := coreClient.GetObject(bucketName, "dstObject", reqH) + reader, _, err := coreClient.GetObject(bucketName, "dstObject", opts) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } defer reader.Close() decBytes, err := ioutil.ReadAll(reader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } if !bytes.Equal(decBytes, buf) { - failureLog(function, args, startTime, "", "Downloaded object mismatched for encrypted object", err).Fatal() + logError(function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(function, args, startTime).Info() } @@ -4055,7 +5171,8 @@ func testEncryptedCopyObject() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } // c.TraceOn(os.Stderr) @@ -4077,7 +5194,8 @@ func testEncryptedCopyObjectV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio v2 client object creation failed", err) + return } testEncryptedCopyObjectWrapper(c) @@ -4097,7 +5215,8 @@ func testUserMetadataCopying() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } // c.TraceOn(os.Stderr) @@ -4118,13 +5237,15 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { // Make a new bucket in 'us-east-1' (source bucket). err := c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(bucketName, object) + objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "Stat failed", err).Fatal() + logError(function, args, startTime, "", "Stat failed", err) + return } h = make(http.Header) for k, vs := range objInfo.Metadata { @@ -4142,13 +5263,17 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB metadata := make(http.Header) metadata.Set("x-amz-meta-myheader", "myvalue") - _, err = c.PutObjectWithMetadata(bucketName, "srcObject", - bytes.NewReader(buf), metadata, nil) + m := make(map[string]string) + m["x-amz-meta-myheader"] = "myvalue" + _, err = c.PutObject(bucketName, "srcObject", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) if err != nil { - failureLog(function, args, startTime, "", "PutObjectWithMetadata failed", err).Fatal() + logError(function, args, startTime, "", "PutObjectWithMetadata failed", err) + return } if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { - failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + logError(function, args, startTime, "", "Metadata match failed", err) + return } // 2. create source @@ -4156,7 +5281,8 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { // 2.1 create destination with metadata set dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"}) if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } // 3. Check that copying to an object with metadata set resets @@ -4166,20 +5292,22 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { args["source"] = src if err != nil { - failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + logError(function, args, startTime, "", "CopyObject failed", err) + return } expectedHeaders := make(http.Header) expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { - failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + logError(function, args, startTime, "", "Metadata match failed", err) + return } // 4. create destination with no metadata set and same source dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil) if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() - + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } src = minio.NewSourceInfo(bucketName, "srcObject", nil) @@ -4190,12 +5318,14 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { args["source"] = src if err != nil { - failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal() + logError(function, args, startTime, "", "CopyObject failed", err) + return } expectedHeaders = metadata if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { - failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + logError(function, args, startTime, "", "Metadata match failed", err) + return } // 6. Compose a pair of sources. @@ -4205,7 +5335,8 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { } dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil) if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } err = c.ComposeObject(dst3, srcs) @@ -4214,12 +5345,14 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { args["source"] = srcs if err != nil { - failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal() + logError(function, args, startTime, "", "ComposeObject failed", err) + return } // Check that no headers are copied in this case if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { - failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + logError(function, args, startTime, "", "Metadata match failed", err) + return } // 7. Compose a pair of sources with dest user metadata set. @@ -4229,7 +5362,8 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { } dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"}) if err != nil { - failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal() + logError(function, args, startTime, "", "NewDestinationInfo failed", err) + return } err = c.ComposeObject(dst4, srcs) @@ -4238,15 +5372,24 @@ func testUserMetadataCopyingWrapper(c *minio.Client) { args["source"] = srcs if err != nil { - failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal() + logError(function, args, startTime, "", "ComposeObject failed", err) + return } // Check that no headers are copied in this case expectedHeaders = make(http.Header) expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { - failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal() + logError(function, args, startTime, "", "Metadata match failed", err) + return } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + successLogger(function, args, startTime).Info() } @@ -4264,7 +5407,8 @@ func testUserMetadataCopyingV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client v2 object creation failed", err) + return } // c.TraceOn(os.Stderr) @@ -4275,12 +5419,12 @@ func testUserMetadataCopyingV2() { func testPutObjectNoLengthV2() { // initialize logging params startTime := time.Now() - function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)" + function := "PutObject(bucketName, objectName, reader, size, opts)" args := map[string]interface{}{ "bucketName": "", "objectName": "", "size": -1, - "metadata": nil, + "opts": "", } // Seed random based on current time. @@ -4294,7 +5438,8 @@ func testPutObjectNoLengthV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client v2 object creation failed", err) + return } // Enable tracing, write to stderr. @@ -4311,37 +5456,35 @@ func testPutObjectNoLengthV2() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } objectName := bucketName + "unique" args["objectName"] = objectName - // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover. - // Use different data for each part for multipart tests to ensure part order at the end. - var reader = getDataReader("datafile-65-MB", sixtyFiveMiB) + bufSize := dataFileMap["datafile-65-MB"] + var reader = getDataReader("datafile-65-MB") defer reader.Close() // Upload an object. - n, err := c.PutObjectWithSize(bucketName, objectName, reader, -1, nil, nil) + n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{}) + if err != nil { - failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal() + logError(function, args, startTime, "", "PutObjectWithSize failed", err) + return } - if n != int64(sixtyFiveMiB) { - failureLog(function, args, startTime, "", "Expected upload object size "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal() + if n != int64(bufSize) { + logError(function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err) + return } - // Remove the object. - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - // Remove the bucket. - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } successLogger(function, args, startTime).Info() } @@ -4349,10 +5492,12 @@ func testPutObjectNoLengthV2() { func testPutObjectsUnknownV2() { // initialize logging params startTime := time.Now() - function := "PutObjectStreaming(bucketName, objectName, reader)" + function := "PutObject(bucketName, objectName, reader,size,opts)" args := map[string]interface{}{ "bucketName": "", "objectName": "", + "size": "", + "opts": "", } // Seed random based on current time. @@ -4366,7 +5511,8 @@ func testPutObjectsUnknownV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client v2 object creation failed", err) + return } // Enable tracing, write to stderr. @@ -4383,7 +5529,8 @@ func testPutObjectsUnknownV2() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Issues are revealed by trying to upload multiple files of unknown size @@ -4403,26 +5550,24 @@ func testPutObjectsUnknownV2() { objectName := fmt.Sprintf("%sunique%d", bucketName, i) args["objectName"] = objectName - n, err := c.PutObjectStreaming(bucketName, objectName, rpipe) + n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal() + logError(function, args, startTime, "", "PutObjectStreaming failed", err) + return } if n != int64(4) { - failureLog(function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err) + return } - // Remove the object. - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } } - // Remove the bucket. - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + successLogger(function, args, startTime).Info() } @@ -4430,12 +5575,12 @@ func testPutObjectsUnknownV2() { func testPutObject0ByteV2() { // initialize logging params startTime := time.Now() - function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)" + function := "PutObject(bucketName, objectName, reader, size, opts)" args := map[string]interface{}{ "bucketName": "", "objectName": "", "size": 0, - "metadata": nil, + "opts": "", } // Seed random based on current time. @@ -4449,7 +5594,8 @@ func testPutObject0ByteV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client v2 object creation failed", err) + return } // Enable tracing, write to stderr. @@ -4461,35 +5607,37 @@ func testPutObject0ByteV2() { // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} // Upload an object. - n, err := c.PutObjectWithSize(bucketName, objectName, bytes.NewReader([]byte("")), 0, nil, nil) + n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) + if err != nil { - failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal() + logError(function, args, startTime, "", "PutObjectWithSize failed", err) + return } if n != 0 { - failureLog(function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err) + return } - // Remove the object. - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } - // Remove the bucket. - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } successLogger(function, args, startTime).Info() } @@ -4508,7 +5656,8 @@ func testComposeObjectErrorCases() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } testComposeObjectErrorCasesWrapper(c) @@ -4529,7 +5678,8 @@ func testCompose10KSources() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client object creation failed", err) + return } testComposeMultipleSources(c) @@ -4552,7 +5702,8 @@ func testFunctionalV2() { mustParseBool(os.Getenv(enableHTTPS)), ) if err != nil { - failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal() + logError(function, args, startTime, "", "Minio client v2 object creation failed", err) + return } // Enable to debug @@ -4567,20 +5718,23 @@ func testFunctionalV2() { // Make a new bucket. err = c.MakeBucket(bucketName, "us-east-1") if err != nil { - failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal() + logError(function, args, startTime, "", "MakeBucket failed", err) + return } // Generate a random file name. fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") file, err := os.Create(fileName) if err != nil { - failureLog(function, args, startTime, "", "file create failed", err).Fatal() + logError(function, args, startTime, "", "file create failed", err) + return } for i := 0; i < 3; i++ { buf := make([]byte, rand.Intn(1<<19)) _, err = file.Write(buf) if err != nil { - failureLog(function, args, startTime, "", "file write failed", err).Fatal() + logError(function, args, startTime, "", "file write failed", err) + return } } file.Close() @@ -4589,25 +5743,30 @@ func testFunctionalV2() { var exists bool exists, err = c.BucketExists(bucketName) if err != nil { - failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal() + logError(function, args, startTime, "", "BucketExists failed", err) + return } if !exists { - failureLog(function, args, startTime, "", "Could not find existing bucket "+bucketName, err).Fatal() + logError(function, args, startTime, "", "Could not find existing bucket "+bucketName, err) + return } // Make the bucket 'public read/write'. err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite) if err != nil { - failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal() + logError(function, args, startTime, "", "SetBucketPolicy failed", err) + return } // List all buckets. buckets, err := c.ListBuckets() if len(buckets) == 0 { - failureLog(function, args, startTime, "", "List buckets cannot be empty", err).Fatal() + logError(function, args, startTime, "", "List buckets cannot be empty", err) + return } if err != nil { - failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal() + logError(function, args, startTime, "", "ListBuckets failed", err) + return } // Verify if previously created bucket is listed in list buckets. @@ -4620,7 +5779,8 @@ func testFunctionalV2() { // If bucket not found error out. if !bucketFound { - failureLog(function, args, startTime, "", "Bucket "+bucketName+"not found", err).Fatal() + logError(function, args, startTime, "", "Bucket "+bucketName+"not found", err) + return } objectName := bucketName + "unique" @@ -4628,21 +5788,25 @@ func testFunctionalV2() { // Generate data buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) - n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "") + n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } if n != int64(len(buf)) { - failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) + return } - n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream") + n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - failureLog(function, args, startTime, "", "PutObject failed", err).Fatal() + logError(function, args, startTime, "", "PutObject failed", err) + return } if n != int64(len(buf)) { - failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal() + logError(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err) + return } // Instantiate a done channel to close all listing. @@ -4658,7 +5822,8 @@ func testFunctionalV2() { } } if !objFound { - failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal() + logError(function, args, startTime, "", "Could not find existing object "+objectName, err) + return } objFound = false @@ -4670,7 +5835,8 @@ func testFunctionalV2() { } } if !objFound { - failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal() + logError(function, args, startTime, "", "Could not find existing object "+objectName, err) + return } incompObjNotFound := true @@ -4681,66 +5847,80 @@ func testFunctionalV2() { } } if !incompObjNotFound { - failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal() + logError(function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return } - newReader, err := c.GetObject(bucketName, objectName) + newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } newReadBytes, err := ioutil.ReadAll(newReader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } if !bytes.Equal(newReadBytes, buf) { - failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + logError(function, args, startTime, "", "Bytes mismatch", err) + return } - err = c.FGetObject(bucketName, objectName, fileName+"-f") + err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "FgetObject failed", err).Fatal() + logError(function, args, startTime, "", "FgetObject failed", err) + return } // Generate presigned HEAD object url. presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil) if err != nil { - failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedHeadObject failed", err) + return } // Verify if presigned url works. resp, err := http.Head(presignedHeadURL.String()) if err != nil { - failureLog(function, args, startTime, "", "PresignedHeadObject URL head request failed", err).Fatal() + logError(function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return } if resp.StatusCode != http.StatusOK { - failureLog(function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err).Fatal() + logError(function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) + return } if resp.Header.Get("ETag") == "" { - failureLog(function, args, startTime, "", "Got empty ETag", err).Fatal() + logError(function, args, startTime, "", "Got empty ETag", err) + return } resp.Body.Close() // Generate presigned GET object url. presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil) if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject failed", err) + return } // Verify if presigned url works. resp, err = http.Get(presignedGetURL.String()) if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject URL GET request failed", err) + return } if resp.StatusCode != http.StatusOK { - failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return } newPresignedBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } resp.Body.Close() if !bytes.Equal(newPresignedBytes, buf) { - failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + logError(function, args, startTime, "", "Bytes mismatch", err) + return } // Set request parameters. @@ -4749,38 +5929,47 @@ func testFunctionalV2() { // Generate presigned GET object url. presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams) if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject failed", err) + return } // Verify if presigned url works. resp, err = http.Get(presignedGetURL.String()) if err != nil { - failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject URL GET request failed", err) + return } if resp.StatusCode != http.StatusOK { - failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal() + logError(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return } newPresignedBytes, err = ioutil.ReadAll(resp.Body) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } if !bytes.Equal(newPresignedBytes, buf) { - failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + logError(function, args, startTime, "", "Bytes mismatch", err) + return } // Verify content disposition. if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - failureLog(function, args, startTime, "", "wrong Content-Disposition received ", err).Fatal() + logError(function, args, startTime, "", "wrong Content-Disposition received ", err) + return } presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second) if err != nil { - failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal() + logError(function, args, startTime, "", "PresignedPutObject failed", err) + return } + // Generate data more than 32K buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf)) if err != nil { - failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal() + logError(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return } httpClient := &http.Client{ // Setting a sensible time out of 30secs to wait for response @@ -4791,59 +5980,504 @@ func testFunctionalV2() { } resp, err = httpClient.Do(req) if err != nil { - failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal() + logError(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return } - newReader, err = c.GetObject(bucketName, objectName+"-presigned") + newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{}) if err != nil { - failureLog(function, args, startTime, "", "GetObject failed", err).Fatal() + logError(function, args, startTime, "", "GetObject failed", err) + return } newReadBytes, err = ioutil.ReadAll(newReader) if err != nil { - failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal() + logError(function, args, startTime, "", "ReadAll failed", err) + return } if !bytes.Equal(newReadBytes, buf) { - failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal() + logError(function, args, startTime, "", "Bytes mismatch", err) + return } - err = c.RemoveObject(bucketName, objectName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveObject(bucketName, objectName+"-f") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveObject(bucketName, objectName+"-nolength") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveObject(bucketName, objectName+"-presigned") - if err != nil { - failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - if err != nil { - failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal() - } - err = c.RemoveBucket(bucketName) - if err == nil { - failureLog(function, args, startTime, "", "RemoveBucket should fail as bucket does not exist", err).Fatal() - } - if err.Error() != "The specified bucket does not exist" { - failureLog(function, args, startTime, "", "RemoveBucket failed with wrong error message", err).Fatal() + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return } + if err = os.Remove(fileName); err != nil { - failureLog(function, args, startTime, "", "File remove failed", err).Fatal() + logError(function, args, startTime, "", "File remove failed", err) + return } if err = os.Remove(fileName + "-f"); err != nil { - failureLog(function, args, startTime, "", "File removes failed", err).Fatal() + logError(function, args, startTime, "", "File removes failed", err) + return } successLogger(function, args, startTime).Info() } +// Test get object with GetObjectWithContext +func testGetObjectWithContext() { + // initialize logging params + startTime := time.Now() + function := "GetObjectWithContext(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "GetObjectWithContext failed - request timeout not honored", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "GetObjectWithContext failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(function, args, startTime, "", "object Close() call failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() + +} + +// Test get object with FGetObjectWithContext +func testFGetObjectWithContext() { + // initialize logging params + startTime := time.Now() + function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Millisecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(function, args, startTime, "", "FGetObjectWithContext with short timeout failed", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err) + return + } + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(function, args, startTime, "", "Remove file failed", err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() + +} + +// Test validates putObject with context to see if request cancellation is honored for V2. +func testPutObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "opts": "", + } + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket failed", err) + return + } + defer c.RemoveBucket(bucketName) + bufSize := dataFileMap["datatfile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + args["ctx"] = ctx + defer cancel() + + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(function, args, startTime, "", "PutObjectWithContext with short timeout failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(function, args, startTime, "", "PutObjectWithContext with long timeout failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() + +} + +// Test get object with GetObjectWithContext +func testGetObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + function := "GetObjectWithContext(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-33-kB"] + var reader = getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "GetObjectWithContext failed due to non-cancellation upon short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Minute) + defer cancel() + + // Read the data back + r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "GetObjectWithContext failed due to non-cancellation upon long timeout", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(function, args, startTime, "", " object Close() call failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() + +} + +// Test get object with FGetObjectWithContext +func testFGetObjectWithContextV2() { + // initialize logging params + startTime := time.Now() + function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.NewV2( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(function, args, startTime, "", "Minio client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(function, args, startTime, "", "MakeBucket call failed", err) + return + } + + bufSize := dataFileMap["datatfile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(function, args, startTime, "", "FGetObjectWithContext call with short request timeout failed", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Read the data back + err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(function, args, startTime, "", "FGetObjectWithContext call with long request timeout failed", err) + return + } + + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(function, args, startTime, "", "Remove file failed", err) + return + } + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(function, args, startTime).Info() + +} + // Convert string to bool and always return false if any error func mustParseBool(str string) bool { b, err := strconv.ParseBool(str) @@ -4862,6 +6496,8 @@ func main() { log.SetFormatter(&mintFormatter) // log Info or above -- success cases are Info level, failures are Fatal level log.SetLevel(log.InfoLevel) + + tls := mustParseBool(os.Getenv(enableHTTPS)) // execute tests if !isQuickMode() { testMakeBucketErrorV2() @@ -4875,11 +6511,14 @@ func main() { testFunctionalV2() testComposeObjectErrorCasesV2() testCompose10KSourcesV2() - testEncryptedCopyObjectV2() testUserMetadataCopyingV2() testPutObject0ByteV2() testPutObjectNoLengthV2() testPutObjectsUnknownV2() + testGetObjectWithContextV2() + testFPutObjectWithContextV2() + testFGetObjectWithContextV2() + testPutObjectWithContextV2() testMakeBucketError() testMakeBucketRegions() testPutObjectWithMetadata() @@ -4897,14 +6536,24 @@ func main() { testPresignedPostPolicy() testCopyObject() testEncryptionPutGet() + testEncryptionFPut() testComposeObjectErrorCases() testCompose10KSources() testUserMetadataCopying() - testEncryptedCopyObject() testBucketNotification() testFunctional() testGetObjectObjectModified() testPutObjectUploadSeekedObject() + testGetObjectWithContext() + testFPutObjectWithContext() + testFGetObjectWithContext() + testPutObjectWithContext() + + // SSE-C tests will only work over TLS connection. + if tls { + testEncryptedCopyObjectV2() + testEncryptedCopyObject() + } } else { testFunctional() testFunctionalV2() diff --git a/vendor/github.com/minio/minio-go/request-headers_test.go b/vendor/github.com/minio/minio-go/get-options_test.go similarity index 84% rename from vendor/github.com/minio/minio-go/request-headers_test.go rename to vendor/github.com/minio/minio-go/get-options_test.go index f026cd0a2..c5344a0c6 100644 --- a/vendor/github.com/minio/minio-go/request-headers_test.go +++ b/vendor/github.com/minio/minio-go/get-options_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -40,17 +41,17 @@ func TestSetHeader(t *testing.T) { {1, -5, fmt.Errorf("Invalid range specified: start=1 end=-5"), ""}, } for i, testCase := range testCases { - rh := NewGetReqHeaders() - err := rh.SetRange(testCase.start, testCase.end) + opts := GetObjectOptions{} + err := opts.SetRange(testCase.start, testCase.end) if err == nil && testCase.errVal != nil { t.Errorf("Test %d: Expected to fail with '%v' but it passed", i+1, testCase.errVal) } else if err != nil && testCase.errVal.Error() != err.Error() { t.Errorf("Test %d: Expected error '%v' but got error '%v'", i+1, testCase.errVal, err) - } else if err == nil && rh.Get("Range") != testCase.expected { + } else if err == nil && opts.headers["Range"] != testCase.expected { t.Errorf("Test %d: Expected range header '%s', but got '%s'", - i+1, testCase.expected, rh.Get("Range")) + i+1, testCase.expected, opts.headers["Range"]) } } } diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go index bc9ece049..8f32291d4 100644 --- a/vendor/github.com/minio/minio-go/hook-reader.go +++ b/vendor/github.com/minio/minio-go/hook-reader.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go index 6b0e57440..88d5d9c5e 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go index cb5a6dda5..8a800b6b5 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain_test.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go index cc3000532..4bfdad413 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go index cbfb673b7..92c77c4cb 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials_test.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go index fa1908aeb..c48784ba8 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go @@ -1,3 +1,20 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + // Package credentials provides credential retrieval and management // for S3 compatible object storage. // diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go index 11934433c..f9b2cc33a 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go index 791087ef5..d72e77185 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go index 2f72bea40..09cd77f7a 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_test.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go index 1be621385..247b6a11a 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go index 9e26dd302..2f79809e9 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go index c62c53365..c85c10494 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_test.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go index b862cf538..637df7466 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -46,18 +46,6 @@ type IAM struct { endpoint string } -// redirectHeaders copies all headers when following a redirect URL. -// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800) -func redirectHeaders(req *http.Request, via []*http.Request) error { - if len(via) == 0 { - return nil - } - for key, val := range via[0].Header { - req.Header[key] = val - } - return nil -} - // IAM Roles for Amazon EC2 // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html const ( @@ -74,8 +62,7 @@ func NewIAM(endpoint string) *Credentials { } p := &IAM{ Client: &http.Client{ - Transport: http.DefaultTransport, - CheckRedirect: redirectHeaders, + Transport: http.DefaultTransport, }, endpoint: endpoint, } diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go index 3e5ad3ec0..86ea66bf6 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws_test.go @@ -1,3 +1,20 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package credentials import ( diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go index c64ad6c23..1b768e8c3 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go index 25aff5696..8b0ba711c 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/static.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/static.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go b/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go index 491b1554b..f1d2d856c 100644 --- a/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go +++ b/vendor/github.com/minio/minio-go/pkg/credentials/static_test.go @@ -1,6 +1,6 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go index be45e52f4..b0f2d6e08 100644 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go index 8b8554336..482922ab7 100644 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go index 8814845e3..0ed95f5ff 100644 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go index 078bcd1db..737b810ac 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go index 419868f38..9e4aa8fb6 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go index b2d46e178..9dda99efc 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go index b1862c639..1e5196f7c 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go index d831436cd..156a6d63a 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,7 +33,6 @@ import ( // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming const ( streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingEncoding = "aws-chunked" streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" payloadChunkSize = 64 * 1024 @@ -99,9 +99,8 @@ func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int if sessionToken != "" { req.Header.Set("X-Amz-Security-Token", sessionToken) } - req.Header.Add("Content-Encoding", streamingEncoding) - req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) // Set content length with streaming signature for each chunk included. req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go index 1f49f2234..535adb39d 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -42,7 +43,7 @@ func TestGetSeedSignature(t *testing.T) { req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", "us-east-1", int64(dataLen), reqTime) actualSeedSignature := req.Body.(*StreamingReader).seedSignature - expectedSeedSignature := "007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686" + expectedSeedSignature := "38cab3af09aa15ddf29e26e36236f60fb6bfb6243a20797ae9a8183674526079" if actualSeedSignature != expectedSeedSignature { t.Errorf("Expected %s but received %s", expectedSeedSignature, actualSeedSignature) } @@ -74,7 +75,7 @@ func TestSetStreamingAuthorization(t *testing.T) { reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z") req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", location, dataLen, reqTime) - expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-encoding;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686" + expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=38cab3af09aa15ddf29e26e36236f60fb6bfb6243a20797ae9a8183674526079" actualAuthorization := req.Header.Get("Authorization") if actualAuthorization != expectedAuthorization { diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go index 39c4e0187..620af1c59 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -76,7 +77,7 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in } // Get presigned string to sign. - stringToSign := preStringifyHTTPReq(req) + stringToSign := preStringToSignV2(req) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) @@ -145,7 +146,7 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request } // Calculate HMAC for secretAccessKey. - stringToSign := stringifyHTTPReq(req) + stringToSign := stringToSignV2(req) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) @@ -170,15 +171,14 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request // Expires + "\n" + // CanonicalizedProtocolHeaders + // CanonicalizedResource; -func preStringifyHTTPReq(req http.Request) string { +func preStringToSignV2(req http.Request) string { buf := new(bytes.Buffer) // Write standard headers. writePreSignV2Headers(buf, req) // Write canonicalized protocol headers if any. writeCanonicalizedHeaders(buf, req) // Write canonicalized Query resources if any. - isPreSign := true - writeCanonicalizedResource(buf, req, isPreSign) + writeCanonicalizedResource(buf, req) return buf.String() } @@ -198,15 +198,14 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { // Date + "\n" + // CanonicalizedProtocolHeaders + // CanonicalizedResource; -func stringifyHTTPReq(req http.Request) string { +func stringToSignV2(req http.Request) string { buf := new(bytes.Buffer) // Write standard headers. writeSignV2Headers(buf, req) // Write canonicalized protocol headers if any. writeCanonicalizedHeaders(buf, req) // Write canonicalized Query resources if any. - isPreSign := false - writeCanonicalizedResource(buf, req, isPreSign) + writeCanonicalizedResource(buf, req) return buf.String() } @@ -253,17 +252,27 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { } } -// The following list is already sorted and should always be, otherwise we could -// have signature-related issues +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted var resourceList = []string{ "acl", "delete", + "lifecycle", "location", "logging", "notification", "partNumber", "policy", "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", "torrent", "uploadId", "uploads", @@ -278,22 +287,11 @@ var resourceList = []string{ // CanonicalizedResource = [ "/" + Bucket ] + // + // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) { +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) { // Save request URL. requestURL := req.URL // Get encoded URL path. - path := encodeURL2Path(requestURL) - if isPreSign { - // Get encoded URL path. - if len(requestURL.Query()) > 0 { - // Keep the usual queries unescaped for string to sign. - query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query())) - path = path + "?" + query - } - buf.WriteString(path) - return - } - buf.WriteString(path) + buf.WriteString(encodeURL2Path(requestURL)) if requestURL.RawQuery != "" { var n int vals, _ := url.ParseQuery(requestURL.RawQuery) diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go index 3c0e0ecea..042b6e65c 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go index 0d75dc162..d5721ac4b 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go index 85ff063df..d53483e4e 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go index 049e5813d..cf96d66c8 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/test-utils_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go index 0619b3082..29243635a 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go index 26f609013..22a2d651b 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go index bdc8d4e91..258390f63 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go index d3b4d4331..f19e688bf 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go +++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go index 9f33488e0..efd02629b 100644 --- a/vendor/github.com/minio/minio-go/pkg/set/stringset.go +++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go b/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go index e276fec5a..d7e6aa799 100644 --- a/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go +++ b/vendor/github.com/minio/minio-go/pkg/set/stringset_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go index 5e716124a..b3ae7050a 100644 --- a/vendor/github.com/minio/minio-go/post-policy.go +++ b/vendor/github.com/minio/minio-go/post-policy.go @@ -1,3 +1,20 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package minio import ( @@ -167,6 +184,28 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error { return nil } +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key string, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return ErrInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" || value == "" { + return ErrInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + // addNewPolicy - internal helper to validate adding new policies. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go index e300af69c..f31dfa6f2 100644 --- a/vendor/github.com/minio/minio-go/retry-continous.go +++ b/vendor/github.com/minio/minio-go/retry-continous.go @@ -1,3 +1,20 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package minio import "time" diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go index 1de5107e4..2c8ceda6b 100644 --- a/vendor/github.com/minio/minio-go/retry.go +++ b/vendor/github.com/minio/minio-go/retry.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go index c02f3f1fa..2a86eaab0 100644 --- a/vendor/github.com/minio/minio-go/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/s3-endpoints.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go index c5aff9bbc..f9e82334a 100644 --- a/vendor/github.com/minio/minio-go/s3-error.go +++ b/vendor/github.com/minio/minio-go/s3-error.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/test-utils_test.go b/vendor/github.com/minio/minio-go/test-utils_test.go index b109dfaf7..6f6443ccf 100644 --- a/vendor/github.com/minio/minio-go/test-utils_test.go +++ b/vendor/github.com/minio/minio-go/test-utils_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go index d286bd7ae..e2dafe172 100644 --- a/vendor/github.com/minio/minio-go/transport.go +++ b/vendor/github.com/minio/minio-go/transport.go @@ -2,7 +2,7 @@ /* * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. + * Copyright 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-go/transport_1_6.go b/vendor/github.com/minio/minio-go/transport_1_6.go deleted file mode 100644 index 77e7d76fc..000000000 --- a/vendor/github.com/minio/minio-go/transport_1_6.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6,!go1.7,!go1.8 - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net/http" - "time" -) - -// This default transport is similar to http.DefaultTransport -// but with additional DisableCompression: -var defaultMinioTransport http.RoundTripper = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, -} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go index 6f54639e0..a8ff8cfb9 100644 --- a/vendor/github.com/minio/minio-go/utils.go +++ b/vendor/github.com/minio/minio-go/utils.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +20,8 @@ package minio import ( "crypto/md5" "crypto/sha256" + "encoding/base64" + "encoding/hex" "encoding/xml" "io" "io/ioutil" @@ -38,18 +41,18 @@ func xmlDecoder(body io.Reader, v interface{}) error { return d.Decode(v) } -// sum256 calculate sha256 sum for an input byte array. -func sum256(data []byte) []byte { +// sum256 calculate sha256sum for an input byte array, returns hex encoded. +func sum256Hex(data []byte) string { hash := sha256.New() hash.Write(data) - return hash.Sum(nil) + return hex.EncodeToString(hash.Sum(nil)) } -// sumMD5 calculate sumMD5 sum for an input byte array. -func sumMD5(data []byte) []byte { +// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. +func sumMD5Base64(data []byte) string { hash := md5.New() hash.Write(data) - return hash.Sum(nil) + return base64.StdEncoding.EncodeToString(hash.Sum(nil)) } // getEndpointURL - construct a new endpoint. @@ -109,10 +112,13 @@ func closeResponse(resp *http.Response) { } } -var emptySHA256 = sum256(nil) +var ( + // Hex encoded string of nil sha256sum bytes. + emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -// Sentinel URL is the default url value which is invalid. -var sentinelURL = url.URL{} + // Sentinel URL is the default url value which is invalid. + sentinelURL = url.URL{} +) // Verify if input endpoint URL is valid. func isValidEndpointURL(endpointURL url.URL) error { @@ -212,3 +218,70 @@ func getDefaultLocation(u url.URL, regionOverride string) (location string) { // Default to location to 'us-east-1'. return "us-east-1" } + +var supportedHeaders = []string{ + "content-type", + "cache-control", + "content-encoding", + "content-disposition", + // Add more supported headers here. +} + +// cseHeaders is list of client side encryption headers +var cseHeaders = []string{ + "X-Amz-Iv", + "X-Amz-Key", + "X-Amz-Matdesc", +} + +// isStandardHeader returns true if header is a supported header and not a custom header +func isStandardHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, header := range supportedHeaders { + if strings.ToLower(header) == key { + return true + } + } + return false +} + +// isCSEHeader returns true if header is a client side encryption header. +func isCSEHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, h := range cseHeaders { + header := strings.ToLower(h) + if (header == key) || + (("x-amz-meta-" + header) == key) { + return true + } + } + return false +} + +// sseHeaders is list of server side encryption headers +var sseHeaders = []string{ + "x-amz-server-side-encryption", + "x-amz-server-side-encryption-aws-kms-key-id", + "x-amz-server-side-encryption-context", + "x-amz-server-side-encryption-customer-algorithm", + "x-amz-server-side-encryption-customer-key", + "x-amz-server-side-encryption-customer-key-MD5", +} + +// isSSEHeader returns true if header is a server side encryption header. +func isSSEHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, h := range sseHeaders { + if strings.ToLower(h) == key { + return true + } + } + return false +} + +// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. +func isAmzHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + + return strings.HasPrefix(key, "x-amz-meta-") || key == "x-amz-acl" +} diff --git a/vendor/github.com/minio/minio-go/utils_test.go b/vendor/github.com/minio/minio-go/utils_test.go index ba297112e..719ee4b0f 100644 --- a/vendor/github.com/minio/minio-go/utils_test.go +++ b/vendor/github.com/minio/minio-go/utils_test.go @@ -1,5 +1,6 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,6 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package minio import ( @@ -289,3 +291,105 @@ func TestIsValidBucketName(t *testing.T) { } } + +// Tests if header is standard supported header +func TestIsStandardHeader(t *testing.T) { + testCases := []struct { + // Input. + header string + // Expected result. + expectedValue bool + }{ + {"content-encoding", true}, + {"content-type", true}, + {"cache-control", true}, + {"content-disposition", true}, + {"random-header", false}, + } + + for i, testCase := range testCases { + actual := isStandardHeader(testCase.header) + if actual != testCase.expectedValue { + t.Errorf("Test %d: Expected to pass, but failed", i+1) + } + } + +} + +// Tests if header is server encryption header +func TestIsSSEHeader(t *testing.T) { + testCases := []struct { + // Input. + header string + // Expected result. + expectedValue bool + }{ + {"x-amz-server-side-encryption", true}, + {"x-amz-server-side-encryption-aws-kms-key-id", true}, + {"x-amz-server-side-encryption-context", true}, + {"x-amz-server-side-encryption-customer-algorithm", true}, + {"x-amz-server-side-encryption-customer-key", true}, + {"x-amz-server-side-encryption-customer-key-MD5", true}, + {"random-header", false}, + } + + for i, testCase := range testCases { + actual := isSSEHeader(testCase.header) + if actual != testCase.expectedValue { + t.Errorf("Test %d: Expected to pass, but failed", i+1) + } + } +} + +// Tests if header is client encryption header +func TestIsCSEHeader(t *testing.T) { + testCases := []struct { + // Input. + header string + // Expected result. + expectedValue bool + }{ + {"x-amz-iv", true}, + {"x-amz-key", true}, + {"x-amz-matdesc", true}, + {"x-amz-meta-x-amz-iv", true}, + {"x-amz-meta-x-amz-key", true}, + {"x-amz-meta-x-amz-matdesc", true}, + {"random-header", false}, + } + + for i, testCase := range testCases { + actual := isCSEHeader(testCase.header) + if actual != testCase.expectedValue { + t.Errorf("Test %d: Expected to pass, but failed", i+1) + } + } + +} + +// Tests if header is x-amz-meta or x-amz-acl +func TestIsAmzHeader(t *testing.T) { + testCases := []struct { + // Input. + header string + // Expected result. + expectedValue bool + }{ + {"x-amz-iv", false}, + {"x-amz-key", false}, + {"x-amz-matdesc", false}, + {"x-amz-meta-x-amz-iv", true}, + {"x-amz-meta-x-amz-key", true}, + {"x-amz-meta-x-amz-matdesc", true}, + {"x-amz-acl", true}, + {"random-header", false}, + } + + for i, testCase := range testCases { + actual := isAmzHeader(testCase.header) + if actual != testCase.expectedValue { + t.Errorf("Test %d: Expected to pass, but failed", i+1) + } + } + +}