diff --git a/vendor/manifest b/vendor/manifest
index ce00ac7fd..bca4ae098 100644
--- a/vendor/manifest
+++ b/vendor/manifest
@@ -28,8 +28,8 @@
{
"importpath": "github.com/minio/minio-go",
"repository": "https://github.com/minio/minio-go",
- "revision": "a4cd3caabd5f9c35ac100110eb60c2b80798f1af",
- "branch": "HEAD"
+ "revision": "17b4ebd52505bde655e3b14df732e31850641bb7",
+ "branch": "master"
},
{
"importpath": "github.com/pkg/sftp",
diff --git a/vendor/src/github.com/minio/minio-go/API.md b/vendor/src/github.com/minio/minio-go/API.md
new file mode 100644
index 000000000..24429d835
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/API.md
@@ -0,0 +1,536 @@
+## API Documentation
+
+### Minio client object creation
+Minio client object is created using minio-go:
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
+ if err !!= nil {
+ fmt.Println(err)
+ return
+ }
+}
+```
+
+s3Client can be used to perform operations on S3 storage. APIs are described below.
+
+### Bucket operations
+
+* [`MakeBucket`](#MakeBucket)
+* [`ListBuckets`](#ListBuckets)
+* [`BucketExists`](#BucketExists)
+* [`RemoveBucket`](#RemoveBucket)
+* [`ListObjects`](#ListObjects)
+* [`ListIncompleteUploads`](#ListIncompleteUploads)
+
+### Object operations
+
+* [`GetObject`](#GetObject)
+* [`PutObject`](#PutObject)
+* [`CopyObject`](#CopyObject)
+* [`StatObject`](#StatObject)
+* [`RemoveObject`](#RemoveObject)
+* [`RemoveIncompleteUpload`](#RemoveIncompleteUpload)
+
+### File operations.
+
+* [`FPutObject`](#FPutObject)
+* [`FGetObject`](#FPutObject)
+
+### Bucket policy operations.
+
+* [`SetBucketPolicy`](#SetBucketPolicy)
+* [`GetBucketPolicy`](#GetBucketPolicy)
+* [`RemoveBucketPolicy`](#RemoveBucketPolicy)
+
+### Presigned operations
+
+* [`PresignedGetObject`](#PresignedGetObject)
+* [`PresignedPutObject`](#PresignedPutObject)
+* [`PresignedPostPolicy`](#PresignedPostPolicy)
+
+### Bucket operations
+---------------------------------------
+
+#### MakeBucket(bucketName, location)
+Create a new bucket.
+
+__Arguments__
+* `bucketName` _string_ - Name of the bucket.
+* `location` _string_ - region valid values are _us-west-1_, _us-west-2_, _eu-west-1_, _eu-central-1_, _ap-southeast-1_, _ap-northeast-1_, _ap-southeast-2_, _sa-east-1_
+
+__Example__
+```go
+err := s3Client.MakeBucket("mybucket", "us-west-1")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully created mybucket.")
+```
+---------------------------------------
+
+#### ListBuckets()
+List all buckets.
+
+`bucketList` emits bucket with the format:
+* `bucket.Name` _string_: bucket name
+* `bucket.CreationDate` time.Time : date when bucket was created
+
+__Example__
+```go
+buckets, err := s3Client.ListBuckets()
+if err != nil {
+ fmt.Println(err)
+ return
+}
+for _, bucket := range buckets {
+ fmt.Println(bucket)
+}
+```
+---------------------------------------
+
+#### BucketExists(bucketName)
+Check if bucket exists.
+
+__Arguments__
+* `bucketName` _string_ : name of the bucket
+
+__Example__
+```go
+err := s3Client.BucketExists("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+---------------------------------------
+
+#### RemoveBucket(bucketName)
+Remove a bucket.
+
+__Arguments__
+* `bucketName` _string_ : name of the bucket
+
+__Example__
+```go
+err := s3Client.RemoveBucket("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+---------------------------------------
+
+#### GetBucketPolicy(bucketName, objectPrefix)
+Get access permissions on a bucket or a prefix.
+
+__Arguments__
+* `bucketName` _string_ : name of the bucket
+* `objectPrefix` _string_ : name of the object prefix
+
+__Example__
+```go
+bucketPolicy, err := s3Client.GetBucketPolicy("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Access permissions for mybucket is", bucketPolicy)
+```
+---------------------------------------
+
+#### SetBucketPolicy(bucketname, objectPrefix, policy)
+Set access permissions on bucket or an object prefix.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectPrefix` _string_ : name of the object prefix
+* `policy` _BucketPolicy_: policy can be _non_, _readonly_, _readwrite_, _writeonly_
+
+__Example__
+```go
+err := s3Client.SetBucketPolicy("mybucket", "myprefix", BucketPolicyReadWrite)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+---------------------------------------
+
+#### RemoveBucketPolicy(bucketname, objectPrefix)
+Remove existing permissions on bucket or an object prefix.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectPrefix` _string_ : name of the object prefix
+
+__Example__
+```go
+err := s3Client.RemoveBucketPolicy("mybucket", "myprefix")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+---------------------------------------
+
+#### ListObjects(bucketName, prefix, recursive, doneCh)
+List objects in a bucket.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectPrefix` _string_: the prefix of the objects that should be listed
+* `recursive` _bool_: `true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'
+* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
+
+__Return Value__
+* `<-chan ObjectInfo` _chan ObjectInfo_: Read channel for all the objects in the bucket, the object is of the format:
+ * `objectInfo.Key` _string_: name of the object
+ * `objectInfo.Size` _int64_: size of the object
+ * `objectInfo.ETag` _string_: etag of the object
+ * `objectInfo.LastModified` _time.Time_: modified time stamp
+
+__Example__
+```go
+// Create a done channel to control 'ListObjects' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true
+objectCh := s3Client.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
+for object := range objectCh {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+}
+
+```
+
+---------------------------------------
+
+#### ListIncompleteUploads(bucketName, prefix, recursive)
+List partially uploaded objects in a bucket.
+
+__Arguments__
+* `bucketname` _string_: name of the bucket
+* `prefix` _string_: prefix of the object names that are partially uploaded
+* `recursive` bool: directory style listing when false, recursive listing when true
+* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
+
+__Return Value__
+* `<-chan ObjectMultipartInfo` _chan ObjectMultipartInfo_ : emits multipart objects of the format:
+ * `multiPartObjInfo.Key` _string_: name of the incomplete object
+ * `multiPartObjInfo.UploadID` _string_: upload ID of the incomplete object
+ * `multiPartObjInfo.Size` _int64_: size of the incompletely uploaded object
+
+__Example__
+```go
+// Create a done channel to control 'ListObjects' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true
+multiPartObjectCh := s3Client.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
+for multiPartObject := range multiPartObjectCh {
+ if multiPartObject.Err != nil {
+ fmt.Println(multiPartObject.Err)
+ return
+ }
+ fmt.Println(multiPartObject)
+}
+```
+
+---------------------------------------
+### Object operations
+
+#### GetObject(bucketName, objectName)
+Download an object.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+
+__Return Value__
+* `object` _*minio.Object_ : _minio.Object_ represents object reader.
+
+__Example__
+```go
+object, err := s3Client.GetObject("mybucket", "photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+localFile _ := os.Open("/tmp/local-file")
+if _, err := io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+---------------------------------------
+---------------------------------------
+
+#### FGetObject(bucketName, objectName, filePath)
+Callback is called with `error` in case of error or `null` in case of success
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+* `filePath` _string_: path to which the object data will be written to
+
+__Example__
+```go
+err := s3Client.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+---------------------------------------
+
+#### PutObject(bucketName, objectName, reader, contentType)
+Upload an object.
+
+Uploading a stream
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+* `reader` _io.Reader_: Any golang object implementing io.Reader
+* `contentType` _string_: content type of the object.
+
+__Example__
+```go
+file, err := os.Open("my-testfile")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer file.Close()
+
+n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+---------------------------------------
+
+#### CopyObject(bucketName, objectName, objectSource, conditions)
+Copy a source object into a new object with the provided name in the provided bucket.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+* `objectSource` _string_: name of the object source.
+* `conditions` _CopyConditions_: Collection of supported CopyObject conditions. ['x-amz-copy-source', 'x-amz-copy-source-if-match', 'x-amz-copy-source-if-none-match', 'x-amz-copy-source-if-unmodified-since', 'x-amz-copy-source-if-modified-since']
+
+__Example__
+```go
+// All following conditions are allowed and can be combined together.
+
+// Set copy conditions.
+var copyConds = minio.NewCopyConditions()
+// Set modified condition, copy object modified since 2014 April.
+copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+
+// Set unmodified condition, copy object unmodified since 2014 April.
+// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+
+// Set matching ETag condition, copy object which matches the following ETag.
+// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+
+// Set matching ETag except condition, copy object which does not match the following ETag.
+// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
+
+err := s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+---------------------------------------
+
+#### FPutObject(bucketName, objectName, filePath, contentType)
+Uploads the object using contents from a file
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+* `filePath` _string_: file path of the file to be uploaded
+* `contentType` _string_: content type of the object
+
+__Example__
+```go
+n, err := s3Client.FPutObject("my-bucketname", "my-objectname", "/tmp/my-filename.csv", "application/csv")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+---------------------------------------
+
+#### StatObject(bucketName, objectName)
+Get metadata of an object.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+
+__Return Value__
+ `objInfo` _ObjectInfo_ : object stat info for following format:
+ * `objInfo.Size` _int64_: size of the object
+ * `objInfo.ETag` _string_: etag of the object
+ * `objInfo.ContentType` _string_: Content-Type of the object
+ * `objInfo.LastModified` _string_: modified time stamp
+
+__Example__
+```go
+objInfo, err := s3Client.StatObject("mybucket", "photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println(objInfo)
+```
+---------------------------------------
+
+#### RemoveObject(bucketName, objectName)
+Remove an object.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+
+__Example__
+```go
+err := s3Client.RemoveObject("mybucket", "photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+---------------------------------------
+
+#### RemoveIncompleteUpload(bucketName, objectName)
+Remove an partially uploaded object.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+
+__Example__
+```go
+err := s3Client.RemoveIncompleteUpload("mybucket", "photo.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+### Presigned operations
+---------------------------------------
+
+#### PresignedGetObject(bucketName, objectName, expiry)
+Generate a presigned URL for GET.
+
+__Arguments__
+* `bucketName` _string_: name of the bucket.
+* `objectName` _string_: name of the object.
+* `expiry` _time.Duration_: expiry in seconds.
+ `reqParams` _url.Values_ : additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_
+
+__Example__
+```go
+// Set request parameters for content-disposition.
+reqParams := make(url.Values)
+reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+// Generates a presigned url which expires in a day.
+presignedURL, err := s3Client.PresignedGetObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60, reqParams)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+---------------------------------------
+
+#### PresignedPutObject(bucketName, objectName, expiry)
+Generate a presigned URL for PUT.
+
+NOTE: you can upload to S3 only with specified object name.
+
+
+__Arguments__
+* `bucketName` _string_: name of the bucket
+* `objectName` _string_: name of the object
+* `expiry` _time.Duration_: expiry in seconds
+
+__Example__
+```go
+// Generates a url which expires in a day.
+presignedURL, err := s3Client.PresignedPutObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+---------------------------------------
+
+#### PresignedPostPolicy
+PresignedPostPolicy we can provide policies specifying conditions restricting
+what you want to allow in a POST request, such as bucket name where objects can be
+uploaded, key name prefixes that you want to allow for the object being created and more.
+
+We need to create our policy first:
+```go
+policy := minio.NewPostPolicy()
+```
+Apply upload policy restrictions:
+```go
+policy.SetBucket("my-bucketname")
+policy.SetKey("my-objectname")
+policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+
+// Only allow 'png' images.
+policy.SetContentType("image/png")
+
+// Only allow content size in range 1KB to 1MB.
+policy.SetContentLengthRange(1024, 1024*1024)
+```
+Get the POST form key/value object:
+```go
+formData, err := s3Client.PresignedPostPolicy(policy)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+POST your content from the command line using `curl`:
+```go
+fmt.Printf("curl ")
+for k, v := range m {
+ fmt.Printf("-F %s=%s ", k, v)
+}
+fmt.Printf("-F file=@/etc/bash.bashrc ")
+fmt.Printf("https://my-bucketname.s3.amazonaws.com\n")
+```
diff --git a/vendor/src/github.com/minio/minio-go/CONTRIBUTING.md b/vendor/src/github.com/minio/minio-go/CONTRIBUTING.md
index b4b224eef..8b1ee86c6 100644
--- a/vendor/src/github.com/minio/minio-go/CONTRIBUTING.md
+++ b/vendor/src/github.com/minio/minio-go/CONTRIBUTING.md
@@ -15,6 +15,8 @@
- Run `go fmt`
- Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
- Make sure `go test -race ./...` and `go build` completes.
+ NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
+ ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
- `minio-go` project is strictly conformant with Golang style
diff --git a/vendor/src/github.com/minio/minio-go/INSTALLGO.md b/vendor/src/github.com/minio/minio-go/INSTALLGO.md
index c3762bbfc..81c3d53f5 100644
--- a/vendor/src/github.com/minio/minio-go/INSTALLGO.md
+++ b/vendor/src/github.com/minio/minio-go/INSTALLGO.md
@@ -71,7 +71,7 @@ export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
export PATH=$PATH:${GOPATH}/bin
```
-##### Source the new enviornment
+##### Source the new environment
```sh
$ source ~/.bash_profile
diff --git a/vendor/src/github.com/minio/minio-go/README.md b/vendor/src/github.com/minio/minio-go/README.md
index 8e4da4317..3db4f7b8f 100644
--- a/vendor/src/github.com/minio/minio-go/README.md
+++ b/vendor/src/github.com/minio/minio-go/README.md
@@ -61,12 +61,14 @@ func main() {
## Documentation
+[API documentation](./API.md)
+
+## Examples
+
### Bucket Operations.
-* [MakeBucket(bucketName, BucketACL, location) error](examples/s3/makebucket.go)
+* [MakeBucket(bucketName, location) error](examples/s3/makebucket.go)
* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
-* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go)
-* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go)
* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
@@ -83,10 +85,15 @@ func main() {
* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
### Presigned Operations.
-* [PresignedGetObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedgetobject.go)
+* [PresignedGetObject(bucketName, objectName, time.Duration, url.Values) (string, error)](examples/s3/presignedgetobject.go)
* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go)
* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go)
+### Bucket Policy Operations.
+* [SetBucketPolicy(bucketName, objectPrefix, BucketPolicy) error](examples/s3/setbucketpolicy.go)
+* [GetBucketPolicy(bucketName, objectPrefix) (BucketPolicy, error)](examples/s3/getbucketpolicy.go)
+* [RemoveBucketPolicy(bucketName, objectPrefix) error](examples/s3/removebucketpolicy.go)
+
### API Reference
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go)
diff --git a/vendor/src/github.com/minio/minio-go/api-definitions.go b/vendor/src/github.com/minio/minio-go/api-datatypes.go
similarity index 100%
rename from vendor/src/github.com/minio/minio-go/api-definitions.go
rename to vendor/src/github.com/minio/minio-go/api-datatypes.go
diff --git a/vendor/src/github.com/minio/minio-go/api-error-response.go b/vendor/src/github.com/minio/minio-go/api-error-response.go
index 647165112..e6789aff5 100644
--- a/vendor/src/github.com/minio/minio-go/api-error-response.go
+++ b/vendor/src/github.com/minio/minio-go/api-error-response.go
@@ -47,7 +47,7 @@ type ErrorResponse struct {
// Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
- AmzBucketRegion string
+ Region string
}
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
@@ -98,65 +98,54 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
case http.StatusNotFound:
if objectName == "" {
errResp = ErrorResponse{
- Code: "NoSuchBucket",
- Message: "The specified bucket does not exist.",
- BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: "NoSuchBucket",
+ Message: "The specified bucket does not exist.",
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
} else {
errResp = ErrorResponse{
- Code: "NoSuchKey",
- Message: "The specified key does not exist.",
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: "NoSuchKey",
+ Message: "The specified key does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
}
case http.StatusForbidden:
errResp = ErrorResponse{
- Code: "AccessDenied",
- Message: "Access Denied.",
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: "AccessDenied",
+ Message: "Access Denied.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
case http.StatusConflict:
errResp = ErrorResponse{
- Code: "Conflict",
- Message: "Bucket not empty.",
- BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: "Conflict",
+ Message: "Bucket not empty.",
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
default:
errResp = ErrorResponse{
- Code: resp.Status,
- Message: resp.Status,
- BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: resp.Status,
+ Message: resp.Status,
+ BucketName: bucketName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
}
}
-
- // AccessDenied without a signature mismatch code, usually means
- // that the bucket policy has certain restrictions where some API
- // operations are not allowed. Handle this case so that top level
- // callers can interpret this easily and fall back if needed to a
- // lower functionality call. Read each individual API specific
- // code for such fallbacks.
- if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" {
- errResp.Code = "NotImplemented"
- errResp.Message = "Operation is not allowed according to your bucket policy."
- }
return errResp
}
diff --git a/vendor/src/github.com/minio/minio-go/api-get.go b/vendor/src/github.com/minio/minio-go/api-get-object.go
similarity index 69%
rename from vendor/src/github.com/minio/minio-go/api-get.go
rename to vendor/src/github.com/minio/minio-go/api-get-object.go
index 56d44c9f5..b5b74ff72 100644
--- a/vendor/src/github.com/minio/minio-go/api-get.go
+++ b/vendor/src/github.com/minio/minio-go/api-get-object.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,124 +20,12 @@ import (
"errors"
"fmt"
"io"
- "math"
"net/http"
- "net/url"
"strings"
"sync"
"time"
)
-// GetBucketACL - Get the permissions on an existing bucket.
-//
-// Returned values are:
-//
-// private - Owner gets full access.
-// public-read - Owner gets full access, others get read access.
-// public-read-write - Owner gets full access, others get full access
-// too.
-// authenticated-read - Owner gets full access, authenticated users
-// get read access.
-func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return "", err
- }
-
- // Set acl query.
- urlValues := make(url.Values)
- urlValues.Set("acl", "")
-
- // Instantiate a new request.
- req, err := c.newRequest("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- })
- if err != nil {
- return "", err
- }
-
- // Initiate the request.
- resp, err := c.do(req)
- defer closeResponse(resp)
- if err != nil {
- return "", err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return "", httpRespToErrorResponse(resp, bucketName, "")
- }
- }
-
- // Decode access control policy.
- policy := accessControlPolicy{}
- err = xmlDecoder(resp.Body, &policy)
- if err != nil {
- return "", err
- }
-
- // We need to avoid following de-serialization check for Google
- // Cloud Storage. On Google Cloud Storage "private" canned ACL's
- // policy do not have grant list. Treat it as a valid case, check
- // for all other vendors.
- if !isGoogleEndpoint(c.endpointURL) {
- if policy.AccessControlList.Grant == nil {
- errorResponse := ErrorResponse{
- Code: "InternalError",
- Message: "Access control Grant list is empty. " + reportIssue,
- BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
- }
- return "", errorResponse
- }
- }
-
- // Boolean cues to indentify right canned acls.
- var publicRead, publicWrite, authenticatedRead bool
-
- // Handle grants.
- grants := policy.AccessControlList.Grant
- for _, g := range grants {
- if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" {
- continue
- }
- if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
- authenticatedRead = true
- break
- } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
- publicWrite = true
- } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
- publicRead = true
- }
- }
-
- // Verify if acl is authenticated read.
- if authenticatedRead {
- return BucketACL("authenticated-read"), nil
- }
- // Verify if acl is private.
- if !publicWrite && !publicRead {
- return BucketACL("private"), nil
- }
- // Verify if acl is public-read.
- if !publicWrite && publicRead {
- return BucketACL("public-read"), nil
- }
- // Verify if acl is public-read-write.
- if publicRead && publicWrite {
- return BucketACL("public-read-write"), nil
- }
-
- return "", ErrorResponse{
- Code: "NoSuchBucketPolicy",
- Message: "The specified bucket does not have a bucket policy.",
- BucketName: bucketName,
- RequestID: "minio",
- }
-}
-
// GetObject - returns an seekable, readable object.
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// Input validation.
@@ -147,8 +35,9 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
if err := isValidObjectName(objectName); err != nil {
return nil, err
}
- // Send an explicit info to get the actual object size.
- objectInfo, err := c.StatObject(bucketName, objectName)
+
+ // Start the request as soon Get is initiated.
+ httpReader, objectInfo, err := c.getObject(bucketName, objectName, 0, 0)
if err != nil {
return nil, err
}
@@ -160,8 +49,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// Create done channel.
doneCh := make(chan struct{})
- // This routine feeds partial object data as and when the caller
- // reads.
+ // This routine feeds partial object data as and when the caller reads.
go func() {
defer close(reqCh)
defer close(resCh)
@@ -174,23 +62,27 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
return
// Request message.
case req := <-reqCh:
- // Get shortest length.
- // NOTE: Last remaining bytes are usually smaller than
- // req.Buffer size. Use that as the final length.
- length := math.Min(float64(len(req.Buffer)), float64(objectInfo.Size-req.Offset))
- httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length))
- if err != nil {
- resCh <- readResponse{
- Error: err,
+ // Offset changes fetch the new object at an Offset.
+ if req.DidOffsetChange {
+ // Read from offset.
+ httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
+ if err != nil {
+ resCh <- readResponse{
+ Error: err,
+ }
+ return
}
- return
}
+
+ // Read at least req.Buffer bytes, if not we have
+ // reached our EOF.
size, err := io.ReadFull(httpReader, req.Buffer)
if err == io.ErrUnexpectedEOF {
// If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF
}
+ // Reply back how much was read.
resCh <- readResponse{
Size: int(size),
Error: err,
@@ -211,8 +103,9 @@ type readResponse struct {
// Read request message container to communicate with internal
// go-routine.
type readRequest struct {
- Buffer []byte
- Offset int64 // readAt offset.
+ Buffer []byte
+ Offset int64 // readAt offset.
+ DidOffsetChange bool
}
// Object represents an open object. It implements Read, ReadAt,
@@ -225,6 +118,7 @@ type Object struct {
reqCh chan<- readRequest
resCh <-chan readResponse
doneCh chan<- struct{}
+ prevOffset int64
currOffset int64
objectInfo ObjectInfo
@@ -247,7 +141,7 @@ func (o *Object) Read(b []byte) (n int, err error) {
o.mutex.Lock()
defer o.mutex.Unlock()
- // Previous prevErr is which was saved in previous operation.
+ // prevErr is previous error saved from previous operation.
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
@@ -257,13 +151,27 @@ func (o *Object) Read(b []byte) (n int, err error) {
return 0, io.EOF
}
- // Send current information over control channel to indicate we
- // are ready.
+ // Send current information over control channel to indicate we are ready.
reqMsg := readRequest{}
-
- // Send the offset and pointer to the buffer over the channel.
+ // Send the pointer to the buffer over the channel.
reqMsg.Buffer = b
- reqMsg.Offset = o.currOffset
+
+ // Verify if offset has changed and currOffset is greater than
+ // previous offset. Perhaps due to Seek().
+ offsetChange := o.prevOffset - o.currOffset
+ if offsetChange < 0 {
+ offsetChange = -offsetChange
+ }
+ if offsetChange > 0 {
+ // Fetch the new reader at the current offset again.
+ reqMsg.Offset = o.currOffset
+ reqMsg.DidOffsetChange = true
+ } else {
+ // No offset changes no need to fetch new reader, continue
+ // reading.
+ reqMsg.DidOffsetChange = false
+ reqMsg.Offset = 0
+ }
// Send read request over the control channel.
o.reqCh <- reqMsg
@@ -277,6 +185,9 @@ func (o *Object) Read(b []byte) (n int, err error) {
// Update current offset.
o.currOffset += bytesRead
+ // Save the current offset as previous offset.
+ o.prevOffset = o.currOffset
+
if dataMsg.Error == nil {
// If currOffset read is equal to objectSize
// We have reached end of file, we return io.EOF.
@@ -320,7 +231,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
o.mutex.Lock()
defer o.mutex.Unlock()
- // prevErr is which was saved in previous operation.
+ // prevErr is error which was saved in previous operation.
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
@@ -337,7 +248,16 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
// Send the offset and pointer to the buffer over the channel.
reqMsg.Buffer = b
- reqMsg.Offset = offset
+
+ // For ReadAt offset always changes, minor optimization where
+ // offset same as currOffset we don't change the offset.
+ reqMsg.DidOffsetChange = offset != o.currOffset
+ if reqMsg.DidOffsetChange {
+ // Set new offset.
+ reqMsg.Offset = offset
+ // Save new offset as current offset.
+ o.currOffset = offset
+ }
// Send read request over the control channel.
o.reqCh <- reqMsg
@@ -348,10 +268,16 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
// Bytes read.
bytesRead := int64(dataMsg.Size)
+ // Update current offset.
+ o.currOffset += bytesRead
+
+ // Save current offset as previous offset before returning.
+ o.prevOffset = o.currOffset
+
if dataMsg.Error == nil {
- // If offset+bytes read is equal to objectSize
+ // If currentOffset is equal to objectSize
// we have reached end of file, we return io.EOF.
- if offset+bytesRead == o.objectInfo.Size {
+ if o.currOffset >= o.objectInfo.Size {
return dataMsg.Size, io.EOF
}
return dataMsg.Size, nil
@@ -381,7 +307,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
defer o.mutex.Unlock()
if o.prevErr != nil {
- // At EOF seeking is legal, for any other errors we return.
+ // At EOF seeking is legal allow only io.EOF, for any other errors we return.
if o.prevErr != io.EOF {
return 0, o.prevErr
}
@@ -391,6 +317,11 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if offset < 0 && whence != 2 {
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
}
+
+ // Save current offset as previous offset.
+ o.prevOffset = o.currOffset
+
+ // Switch through whence.
switch whence {
default:
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
@@ -484,8 +415,8 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
}
- // Instantiate a new request.
- req, err := c.newRequest("GET", requestMetadata{
+ // Execute GET on objectName.
+ resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
customHeader: customHeader,
@@ -493,11 +424,6 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
if err != nil {
return nil, ObjectInfo{}, err
}
- // Execute the request.
- resp, err := c.do(req)
- if err != nil {
- return nil, ObjectInfo{}, err
- }
if resp != nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
@@ -513,11 +439,11 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
if err != nil {
msg := "Last-Modified time format not recognized. " + reportIssue
return nil, ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: msg,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: "InternalError",
+ Message: msg,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
}
// Get content-type.
diff --git a/vendor/src/github.com/minio/minio-go/api-get-policy.go b/vendor/src/github.com/minio/minio-go/api-get-policy.go
new file mode 100644
index 000000000..bc86ebd04
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/api-get-policy.go
@@ -0,0 +1,92 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+)
+
+// GetBucketPolicy - get bucket policy at a given path.
+func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy BucketPolicy, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return BucketPolicyNone, err
+ }
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return BucketPolicyNone, err
+ }
+ policy, err := c.getBucketPolicy(bucketName, objectPrefix)
+ if err != nil {
+ return BucketPolicyNone, err
+ }
+ return identifyPolicyType(policy, bucketName, objectPrefix), nil
+}
+
+// Request server for policy.
+func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketAccessPolicy, error) {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return BucketAccessPolicy{}, err
+ }
+ return processBucketPolicyResponse(bucketName, resp)
+
+}
+
+// processes the GetPolicy http resposne from the server.
+func processBucketPolicyResponse(bucketName string, resp *http.Response) (BucketAccessPolicy, error) {
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
+ return BucketAccessPolicy{Version: "2012-10-17"}, nil
+ }
+ return BucketAccessPolicy{}, errResponse
+ }
+ }
+ // Read access policy up to maxAccessPolicySize.
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
+ // bucket policies are limited to 20KB in size, using a limit reader.
+ bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxAccessPolicySize))
+ if err != nil {
+ return BucketAccessPolicy{}, err
+ }
+ policy, err := unMarshalBucketPolicy(bucketPolicyBuf)
+ if err != nil {
+ return BucketAccessPolicy{}, err
+ }
+ // Sort the policy actions and resources for convenience.
+ for _, statement := range policy.Statements {
+ sort.Strings(statement.Actions)
+ sort.Strings(statement.Resources)
+ }
+ return policy, nil
+}
diff --git a/vendor/src/github.com/minio/minio-go/api-get-policy_test.go b/vendor/src/github.com/minio/minio-go/api-get-policy_test.go
new file mode 100644
index 000000000..83ae81e03
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/api-get-policy_test.go
@@ -0,0 +1,144 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "testing"
+)
+
+type APIError struct {
+ Code string
+ Description string
+ HTTPStatusCode int
+}
+
+// Mocks XML error response from the server.
+func generateErrorResponse(resp *http.Response, APIErr APIError, bucketName string) *http.Response {
+ // generate error response.
+ errorResponse := getAPIErrorResponse(APIErr, bucketName)
+ encodedErrorResponse := encodeResponse(errorResponse)
+ // write Header.
+ resp.StatusCode = APIErr.HTTPStatusCode
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer(encodedErrorResponse))
+
+ return resp
+}
+
+// getErrorResponse gets in standard error and resource value and
+// provides a encodable populated response values.
+func getAPIErrorResponse(err APIError, bucketName string) ErrorResponse {
+ var data = ErrorResponse{}
+ data.Code = err.Code
+ data.Message = err.Description
+
+ data.BucketName = bucketName
+ // TODO implement this in future
+ return data
+}
+
+// Encodes the response headers into XML format.
+func encodeResponse(response interface{}) []byte {
+ var bytesBuffer bytes.Buffer
+ bytesBuffer.WriteString(xml.Header)
+ encode := xml.NewEncoder(&bytesBuffer)
+ encode.Encode(response)
+ return bytesBuffer.Bytes()
+}
+
+// Mocks valid http response containing bucket policy from server.
+func generatePolicyResponse(resp *http.Response, policy BucketAccessPolicy) (*http.Response, error) {
+ policyBytes, e := json.Marshal(policy)
+ if e != nil {
+ return nil, e
+ }
+ resp.StatusCode = http.StatusOK
+ resp.Body = ioutil.NopCloser(bytes.NewBuffer(policyBytes))
+ return resp, nil
+
+}
+
+// Tests the processing of GetPolicy resposne from server.
+func TestProcessBucketPolicyResopnse(t *testing.T) {
+ bucketAccesPolicies := []BucketAccessPolicy{
+ {Version: "1.0"},
+ {Version: "1.0", Statements: setReadOnlyStatement("minio-bucket", "")},
+ {Version: "1.0", Statements: setReadWriteStatement("minio-bucket", "Asia/")},
+ {Version: "1.0", Statements: setWriteOnlyStatement("minio-bucket", "Asia/India/")},
+ }
+
+ APIErrors := []APIError{
+ {
+ Code: "NoSuchBucketPolicy",
+ Description: "The specified bucket does not have a bucket policy.",
+ HTTPStatusCode: http.StatusNotFound,
+ },
+ }
+ testCases := []struct {
+ bucketName string
+ isAPIError bool
+ apiErr APIError
+ // expected results.
+ expectedResult BucketAccessPolicy
+ err error
+ // flag indicating whether tests should pass.
+ shouldPass bool
+ }{
+ {"my-bucket", true, APIErrors[0], BucketAccessPolicy{Version: "2012-10-17"}, nil, true},
+ {"my-bucket", false, APIError{}, bucketAccesPolicies[0], nil, true},
+ {"my-bucket", false, APIError{}, bucketAccesPolicies[1], nil, true},
+ {"my-bucket", false, APIError{}, bucketAccesPolicies[2], nil, true},
+ {"my-bucket", false, APIError{}, bucketAccesPolicies[3], nil, true},
+ }
+
+ for i, testCase := range testCases {
+ inputResponse := &http.Response{}
+ var err error
+ if testCase.isAPIError {
+ inputResponse = generateErrorResponse(inputResponse, testCase.apiErr, testCase.bucketName)
+ } else {
+ inputResponse, err = generatePolicyResponse(inputResponse, testCase.expectedResult)
+ if err != nil {
+ t.Fatalf("Test %d: Creation of valid response failed", i+1)
+ }
+ }
+ actualResult, err := processBucketPolicyResponse("my-bucket", inputResponse)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+ if err == nil && testCase.shouldPass {
+ if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
+ t.Errorf("Test %d: The expected BucketPolicy doesnt match the actual BucketPolicy", i+1)
+ }
+ }
+
+ }
+}
diff --git a/vendor/src/github.com/minio/minio-go/api-list.go b/vendor/src/github.com/minio/minio-go/api-list.go
index 534ac4eb4..b17a51acc 100644
--- a/vendor/src/github.com/minio/minio-go/api-list.go
+++ b/vendor/src/github.com/minio/minio-go/api-list.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -34,13 +34,8 @@ import (
// }
//
func (c Client) ListBuckets() ([]BucketInfo, error) {
- // Instantiate a new request.
- req, err := c.newRequest("GET", requestMetadata{})
- if err != nil {
- return nil, err
- }
- // Initiate the request.
- resp, err := c.do(req)
+ // Execute GET on service.
+ resp, err := c.executeMethod("GET", requestMetadata{})
defer closeResponse(resp)
if err != nil {
return nil, err
@@ -188,11 +183,11 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
urlValues := make(url.Values)
// Set object prefix.
if objectPrefix != "" {
- urlValues.Set("prefix", urlEncodePath(objectPrefix))
+ urlValues.Set("prefix", objectPrefix)
}
// Set object marker.
if objectMarker != "" {
- urlValues.Set("marker", urlEncodePath(objectMarker))
+ urlValues.Set("marker", objectMarker)
}
// Set delimiter.
if delimiter != "" {
@@ -206,16 +201,11 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
// Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
- // Initialize a new request.
- req, err := c.newRequest("GET", requestMetadata{
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
- if err != nil {
- return listBucketResult{}, err
- }
- // Execute list buckets.
- resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return listBucketResult{}, err
@@ -366,7 +356,7 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
urlValues.Set("uploads", "")
// Set object key marker.
if keyMarker != "" {
- urlValues.Set("key-marker", urlEncodePath(keyMarker))
+ urlValues.Set("key-marker", keyMarker)
}
// Set upload id marker.
if uploadIDMarker != "" {
@@ -374,7 +364,7 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
}
// Set prefix marker.
if prefix != "" {
- urlValues.Set("prefix", urlEncodePath(prefix))
+ urlValues.Set("prefix", prefix)
}
// Set delimiter.
if delimiter != "" {
@@ -388,16 +378,11 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
// Set max-uploads.
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
- // Instantiate a new request.
- req, err := c.newRequest("GET", requestMetadata{
+ // Execute GET on bucketName to list multipart uploads.
+ resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
})
- if err != nil {
- return listMultipartUploadsResult{}, err
- }
- // Execute list multipart uploads request.
- resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return listMultipartUploadsResult{}, err
@@ -510,16 +495,12 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
// Set max parts.
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
- req, err := c.newRequest("GET", requestMetadata{
+ // Execute GET on objectName to get list of parts.
+ resp, err := c.executeMethod("GET", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
})
- if err != nil {
- return listObjectPartsResult{}, err
- }
- // Exectue list object parts.
- resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return listObjectPartsResult{}, err
diff --git a/vendor/src/github.com/minio/minio-go/api-presigned.go b/vendor/src/github.com/minio/minio-go/api-presigned.go
index 0f350d22e..5d1ab7bb0 100644
--- a/vendor/src/github.com/minio/minio-go/api-presigned.go
+++ b/vendor/src/github.com/minio/minio-go/api-presigned.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,13 +18,26 @@ package minio
import (
"errors"
+ "net/url"
"time"
)
+// supportedGetReqParams - supported request parameters for GET
+// presigned request.
+var supportedGetReqParams = map[string]struct{}{
+ "response-expires": {},
+ "response-content-type": {},
+ "response-cache-control": {},
+ "response-content-disposition": {},
+}
+
// presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
-func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration) (url string, err error) {
+func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (urlStr string, err error) {
// Input validation.
+ if method == "" {
+ return "", ErrInvalidArgument("method cannot be empty.")
+ }
if err := isValidBucketName(bucketName); err != nil {
return "", err
}
@@ -35,35 +48,50 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
return "", err
}
- if method == "" {
- return "", ErrInvalidArgument("method cannot be empty.")
- }
-
+ // Convert expires into seconds.
expireSeconds := int64(expires / time.Second)
- // Instantiate a new request.
- // Since expires is set newRequest will presign the request.
- req, err := c.newRequest(method, requestMetadata{
+ reqMetadata := requestMetadata{
presignURL: true,
bucketName: bucketName,
objectName: objectName,
expires: expireSeconds,
- })
+ }
+
+ // For "GET" we are handling additional request parameters to
+ // override its response headers.
+ if method == "GET" {
+ // Verify if input map has unsupported params, if yes exit.
+ for k := range reqParams {
+ if _, ok := supportedGetReqParams[k]; !ok {
+ return "", ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
+ }
+ }
+ // Save the request parameters to be used in presigning for
+ // GET request.
+ reqMetadata.queryValues = reqParams
+ }
+
+ // Instantiate a new request.
+ // Since expires is set newRequest will presign the request.
+ req, err := c.newRequest(method, reqMetadata)
if err != nil {
return "", err
}
return req.URL.String(), nil
}
-// PresignedGetObject - Returns a presigned URL to access an object without credentials.
-// Expires maximum is 7days - ie. 604800 and minimum is 1.
-func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration) (url string, err error) {
- return c.presignURL("GET", bucketName, objectName, expires)
+// PresignedGetObject - Returns a presigned URL to access an object
+// without credentials. Expires maximum is 7days - ie. 604800 and
+// minimum is 1. Additionally you can override a set of response
+// headers using the query parameters.
+func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (url string, err error) {
+ return c.presignURL("GET", bucketName, objectName, expires, reqParams)
}
// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (url string, err error) {
- return c.presignURL("PUT", bucketName, objectName, expires)
+ return c.presignURL("PUT", bucketName, objectName, expires, nil)
}
// PresignedPostPolicy - Returns POST form data to upload an object at a location.
diff --git a/vendor/src/github.com/minio/minio-go/api-put-bucket.go b/vendor/src/github.com/minio/minio-go/api-put-bucket.go
index 986099e34..357f8b992 100644
--- a/vendor/src/github.com/minio/minio-go/api-put-bucket.go
+++ b/vendor/src/github.com/minio/minio-go/api-put-bucket.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,8 +18,11 @@ package minio
import (
"bytes"
+ "encoding/base64"
"encoding/hex"
+ "encoding/json"
"encoding/xml"
+ "fmt"
"io/ioutil"
"net/http"
"net/url"
@@ -27,28 +30,18 @@ import (
/// Bucket operations
-// MakeBucket makes a new bucket.
+// MakeBucket creates a new bucket with bucketName.
//
-// Optional arguments are acl and location - by default all buckets are created
-// with ``private`` acl and in US Standard region.
-//
-// ACL valid values - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
-//
-// private - owner gets full access [default].
-// public-read - owner gets full access, all others get read access.
-// public-read-write - owner gets full access, all others get full access too.
-// authenticated-read - owner gets full access, authenticated users get read access.
+// Location is an optional argument, by default all buckets are
+// created in US Standard Region.
//
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
-func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error {
+func (c Client) MakeBucket(bucketName string, location string) error {
// Validate the input arguments.
if err := isValidBucketName(bucketName); err != nil {
return err
}
- if !acl.isValidBucketACL() {
- return ErrInvalidArgument("Unrecognized ACL " + acl.String())
- }
// If location is empty, treat is a default region 'us-east-1'.
if location == "" {
@@ -56,7 +49,7 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
}
// Instantiate the request.
- req, err := c.makeBucketRequest(bucketName, acl, location)
+ req, err := c.makeBucketRequest(bucketName, location)
if err != nil {
return err
}
@@ -74,7 +67,7 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
}
}
- // Save the location into cache on a successfull makeBucket response.
+ // Save the location into cache on a successful makeBucket response.
c.bucketLocCache.Set(bucketName, location)
// Return.
@@ -82,14 +75,11 @@ func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) er
}
// makeBucketRequest constructs request for makeBucket.
-func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) {
+func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
// Validate input arguments.
if err := isValidBucketName(bucketName); err != nil {
return nil, err
}
- if !acl.isValidBucketACL() {
- return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String())
- }
// In case of Amazon S3. The make bucket issued on already
// existing bucket would fail with 'AuthorizationMalformed' error
@@ -106,12 +96,6 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
return nil, err
}
- // by default bucket acl is set to private.
- req.Header.Set("x-amz-acl", "private")
- if acl != "" {
- req.Header.Set("x-amz-acl", string(acl))
- }
-
// set UserAgent for the request.
c.setUserAgent(req)
@@ -131,9 +115,12 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
}
createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
- req.ContentLength = int64(createBucketConfigBuffer.Len())
+ req.ContentLength = int64(len(createBucketConfigBytes))
+ // Set content-md5.
+ req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes())))
+ // Set sha256.
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
}
}
@@ -150,60 +137,89 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
return req, nil
}
-// SetBucketACL set the permissions on an existing bucket using access control lists (ACL).
+// SetBucketPolicy set the access permissions on an existing bucket.
//
// For example
//
-// private - owner gets full access [default].
-// public-read - owner gets full access, all others get read access.
-// public-read-write - owner gets full access, all others get full access too.
-// authenticated-read - owner gets full access, authenticated users get read access.
-func (c Client) SetBucketACL(bucketName string, acl BucketACL) error {
+// none - owner gets full access [default].
+// readonly - anonymous get access for everyone at a given object prefix.
+// readwrite - anonymous list/put/delete access to a given object prefix.
+// writeonly - anonymous put/delete access to a given object prefix.
+func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy BucketPolicy) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
- if !acl.isValidBucketACL() {
- return ErrInvalidArgument("Unrecognized ACL " + acl.String())
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return err
+ }
+ if !bucketPolicy.isValidBucketPolicy() {
+ return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
+ }
+ policy, err := c.getBucketPolicy(bucketName, objectPrefix)
+ if err != nil {
+ return err
+ }
+ // For bucket policy set to 'none' we need to remove the policy.
+ if bucketPolicy == BucketPolicyNone && policy.Statements == nil {
+ // No policies to set, return success.
+ return nil
+ }
+ // Remove any previous policies at this path.
+ policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
+
+ // generating []Statement for the given bucketPolicy.
+ statements, err := generatePolicyStatement(bucketPolicy, bucketName, objectPrefix)
+ if err != nil {
+ return err
+ }
+ policy.Statements = append(policy.Statements, statements...)
+ // Save the updated policies.
+ return c.putBucketPolicy(bucketName, policy)
+}
+
+// Saves a new bucket policy.
+func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
}
- // Set acl query.
+ // If there are no policy statements, we should remove entire policy.
+ if len(policy.Statements) == 0 {
+ return c.removeBucketPolicy(bucketName)
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
urlValues := make(url.Values)
- urlValues.Set("acl", "")
+ urlValues.Set("policy", "")
- // Add misc headers.
- customHeader := make(http.Header)
-
- if acl != "" {
- customHeader.Set("x-amz-acl", acl.String())
- } else {
- customHeader.Set("x-amz-acl", "private")
- }
-
- // Instantiate a new request.
- req, err := c.newRequest("PUT", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- customHeader: customHeader,
- })
+ policyBytes, err := json.Marshal(&policy)
if err != nil {
return err
}
- // Initiate the request.
- resp, err := c.do(req)
+ policyBuffer := bytes.NewReader(policyBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: policyBuffer,
+ contentLength: int64(len(policyBytes)),
+ contentMD5Bytes: sumMD5(policyBytes),
+ contentSHA256Bytes: sum256(policyBytes),
+ }
+
+ // Execute PUT to upload a new bucket policy.
+ resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
}
-
if resp != nil {
- // if error return.
- if resp.StatusCode != http.StatusOK {
+ if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp, bucketName, "")
}
}
-
- // return
return nil
}
diff --git a/vendor/src/github.com/minio/minio-go/api-put-bucket_test.go b/vendor/src/github.com/minio/minio-go/api-put-bucket_test.go
new file mode 100644
index 000000000..ce8b27dd0
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/api-put-bucket_test.go
@@ -0,0 +1,270 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "testing"
+)
+
+// Generates expected http request for bucket creation.
+// Used for asserting with the actual request generated.
+func createExpectedRequest(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
+
+ targetURL := *c.endpointURL
+ targetURL.Path = "/" + bucketName + "/"
+
+ // get a new HTTP request for the method.
+ req, err := http.NewRequest("PUT", targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // set sha256 sum for signature calculation only with signature version '4'.
+ if c.signature.isV4() {
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ }
+
+ // If location is not 'us-east-1' create bucket location config.
+ if location != "us-east-1" && location != "" {
+ createBucketConfig := createBucketConfiguration{}
+ createBucketConfig.Location = location
+ var createBucketConfigBytes []byte
+ createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
+ if err != nil {
+ return nil, err
+ }
+ createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
+ req.Body = ioutil.NopCloser(createBucketConfigBuffer)
+ req.ContentLength = int64(len(createBucketConfigBytes))
+ // Set content-md5.
+ req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
+ if c.signature.isV4() {
+ // Set sha256.
+ req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
+ }
+ }
+
+ // Sign the request.
+ if c.signature.isV4() {
+ // Signature calculated for MakeBucket request should be for 'us-east-1',
+ // regardless of the bucket's location constraint.
+ req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
+ } else if c.signature.isV2() {
+ req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ }
+
+ // Return signed request.
+ return req, nil
+}
+
+// Get Request body.
+func getReqBody(reqBody io.ReadCloser) (string, error) {
+ contents, err := ioutil.ReadAll(reqBody)
+ if err != nil {
+ return "", err
+ }
+ return string(contents), nil
+}
+
+// Tests validate http request formulated for creation of bucket.
+func TestMakeBucketRequest(t *testing.T) {
+ // Info for 'Client' creation.
+ // Will be used as arguments for 'NewClient'.
+ type infoForClient struct {
+ endPoint string
+ accessKey string
+ secretKey string
+ enableInsecure bool
+ }
+ // dataset for 'NewClient' call.
+ info := []infoForClient{
+ // endpoint localhost.
+ // both access-key and secret-key are empty.
+ {"localhost:9000", "", "", false},
+ // both access-key are secret-key exists.
+ {"localhost:9000", "my-access-key", "my-secret-key", false},
+ // one of acess-key and secret-key are empty.
+ {"localhost:9000", "", "my-secret-key", false},
+
+ // endpoint amazon s3.
+ {"s3.amazonaws.com", "", "", false},
+ {"s3.amazonaws.com", "my-access-key", "my-secret-key", false},
+ {"s3.amazonaws.com", "my-acess-key", "", false},
+
+ // endpoint google cloud storage.
+ {"storage.googleapis.com", "", "", false},
+ {"storage.googleapis.com", "my-access-key", "my-secret-key", false},
+ {"storage.googleapis.com", "", "my-secret-key", false},
+
+ // endpoint custom domain running Minio server.
+ {"play.minio.io", "", "", false},
+ {"play.minio.io", "my-access-key", "my-secret-key", false},
+ {"play.minio.io", "my-acess-key", "", false},
+ }
+
+ testCases := []struct {
+ bucketName string
+ location string
+ // data for new client creation.
+ info infoForClient
+ // error in the output.
+ err error
+ // flag indicating whether tests should pass.
+ shouldPass bool
+ }{
+ // Test cases with Invalid bucket name.
+ {".mybucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
+ {"mybucket.", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
+ {"mybucket-", "", infoForClient{}, ErrInvalidBucketName("Bucket name contains invalid characters."), false},
+ {"my", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
+ {"", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot be empty."), false},
+ {"my..bucket", "", infoForClient{}, ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
+
+ // Test case with all valid values for S3 bucket location.
+ // Client is constructed using the info struct.
+ // case with empty location.
+ {"my-bucket", "", info[0], nil, true},
+ // case with location set to standard 'us-east-1'.
+ {"my-bucket", "us-east-1", info[0], nil, true},
+ // case with location set to a value different from 'us-east-1'.
+ {"my-bucket", "eu-central-1", info[0], nil, true},
+
+ {"my-bucket", "", info[1], nil, true},
+ {"my-bucket", "us-east-1", info[1], nil, true},
+ {"my-bucket", "eu-central-1", info[1], nil, true},
+
+ {"my-bucket", "", info[2], nil, true},
+ {"my-bucket", "us-east-1", info[2], nil, true},
+ {"my-bucket", "eu-central-1", info[2], nil, true},
+
+ {"my-bucket", "", info[3], nil, true},
+ {"my-bucket", "us-east-1", info[3], nil, true},
+ {"my-bucket", "eu-central-1", info[3], nil, true},
+
+ {"my-bucket", "", info[4], nil, true},
+ {"my-bucket", "us-east-1", info[4], nil, true},
+ {"my-bucket", "eu-central-1", info[4], nil, true},
+
+ {"my-bucket", "", info[5], nil, true},
+ {"my-bucket", "us-east-1", info[5], nil, true},
+ {"my-bucket", "eu-central-1", info[5], nil, true},
+
+ {"my-bucket", "", info[6], nil, true},
+ {"my-bucket", "us-east-1", info[6], nil, true},
+ {"my-bucket", "eu-central-1", info[6], nil, true},
+
+ {"my-bucket", "", info[7], nil, true},
+ {"my-bucket", "us-east-1", info[7], nil, true},
+ {"my-bucket", "eu-central-1", info[7], nil, true},
+
+ {"my-bucket", "", info[8], nil, true},
+ {"my-bucket", "us-east-1", info[8], nil, true},
+ {"my-bucket", "eu-central-1", info[8], nil, true},
+
+ {"my-bucket", "", info[9], nil, true},
+ {"my-bucket", "us-east-1", info[9], nil, true},
+ {"my-bucket", "eu-central-1", info[9], nil, true},
+
+ {"my-bucket", "", info[10], nil, true},
+ {"my-bucket", "us-east-1", info[10], nil, true},
+ {"my-bucket", "eu-central-1", info[10], nil, true},
+
+ {"my-bucket", "", info[11], nil, true},
+ {"my-bucket", "us-east-1", info[11], nil, true},
+ {"my-bucket", "eu-central-1", info[11], nil, true},
+ }
+
+ for i, testCase := range testCases {
+ // cannot create a newclient with empty endPoint value.
+ // validates and creates a new client only if the endPoint value is not empty.
+ client := &Client{}
+ var err error
+ if testCase.info.endPoint != "" {
+
+ client, err = New(testCase.info.endPoint, testCase.info.accessKey, testCase.info.secretKey, testCase.info.enableInsecure)
+ if err != nil {
+ t.Fatalf("Test %d: Failed to create new Client: %s", i+1, err.Error())
+ }
+ }
+
+ actualReq, err := client.makeBucketRequest(testCase.bucketName, testCase.location)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ // Test passes as expected, but the output values are verified for correctness here.
+ if err == nil && testCase.shouldPass {
+ expectedReq := &http.Request{}
+ expectedReq, err = createExpectedRequest(client, testCase.bucketName, testCase.location, expectedReq)
+ if err != nil {
+ t.Fatalf("Test %d: Expected request Creation failed", i+1)
+ }
+ if expectedReq.Method != actualReq.Method {
+ t.Errorf("Test %d: The expected Request method doesn't match with the actual one", i+1)
+ }
+ if expectedReq.URL.String() != actualReq.URL.String() {
+ t.Errorf("Test %d: Expected the request URL to be '%s', but instead found '%s'", i+1, expectedReq.URL.String(), actualReq.URL.String())
+ }
+ if expectedReq.ContentLength != actualReq.ContentLength {
+ t.Errorf("Test %d: Expected the request body Content-Length to be '%d', but found '%d' instead", i+1, expectedReq.ContentLength, actualReq.ContentLength)
+ }
+
+ if expectedReq.Header.Get("X-Amz-Content-Sha256") != actualReq.Header.Get("X-Amz-Content-Sha256") {
+ t.Errorf("Test %d: 'X-Amz-Content-Sha256' header of the expected request doesn't match with that of the actual request", i+1)
+ }
+ if expectedReq.Header.Get("User-Agent") != actualReq.Header.Get("User-Agent") {
+ t.Errorf("Test %d: Expected 'User-Agent' header to be \"%s\",but found \"%s\" instead", i+1, expectedReq.Header.Get("User-Agent"), actualReq.Header.Get("User-Agent"))
+ }
+
+ if testCase.location != "us-east-1" && testCase.location != "" {
+ expectedContent, err := getReqBody(expectedReq.Body)
+ if err != nil {
+ t.Fatalf("Test %d: Coudln't parse request body", i+1)
+ }
+ actualContent, err := getReqBody(actualReq.Body)
+ if err != nil {
+ t.Fatalf("Test %d: Coudln't parse request body", i+1)
+ }
+ if expectedContent != actualContent {
+ t.Errorf("Test %d: Expected request body doesn't match actual content body", i+1)
+ }
+ if expectedReq.Header.Get("Content-Md5") != actualReq.Header.Get("Content-Md5") {
+ t.Errorf("Test %d: Request body Md5 differs from the expected result", i+1)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/src/github.com/minio/minio-go/api-put-object-common.go b/vendor/src/github.com/minio/minio-go/api-put-object-common.go
index 1584497bb..937c74d46 100644
--- a/vendor/src/github.com/minio/minio-go/api-put-object-common.go
+++ b/vendor/src/github.com/minio/minio-go/api-put-object-common.go
@@ -19,6 +19,7 @@ package minio
import (
"crypto/md5"
"crypto/sha256"
+ "fmt"
"hash"
"io"
"math"
@@ -55,7 +56,7 @@ func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
return true
}
// if md5sum mismatches should upload the part.
- if objPart.ETag == uploadedPart.ETag {
+ if objPart.ETag != uploadedPart.ETag {
return true
}
return false
@@ -94,62 +95,13 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
return totalPartsCount, partSize, lastPartSize, nil
}
-// Compatibility code for Golang < 1.5.x.
-// copyBuffer is identical to io.CopyBuffer, since such a function is
-// not available/implemented in Golang version < 1.5.x, we use a
-// custom call exactly implementng io.CopyBuffer from Golang > 1.5.x
-// version does.
+// hashCopyBuffer is identical to hashCopyN except that it doesn't take
+// any size argument but takes a buffer argument and reader should be
+// of io.ReaderAt interface.
//
-// copyBuffer stages through the provided buffer (if one is required)
-// rather than allocating a temporary one. If buf is nil, one is
-// allocated; otherwise if it has zero length, copyBuffer panics.
-//
-// FIXME: Remove this code when distributions move to newer Golang versions.
-func copyBuffer(writer io.Writer, reader io.Reader, buf []byte) (written int64, err error) {
- // If the reader has a WriteTo method, use it to do the copy.
- // Avoids an allocation and a copy.
- if wt, ok := reader.(io.WriterTo); ok {
- return wt.WriteTo(writer)
- }
- // Similarly, if the writer has a ReadFrom method, use it to do
- // the copy.
- if rt, ok := writer.(io.ReaderFrom); ok {
- return rt.ReadFrom(reader)
- }
- if buf == nil {
- buf = make([]byte, 32*1024)
- }
- for {
- nr, er := reader.Read(buf)
- if nr > 0 {
- nw, ew := writer.Write(buf[0:nr])
- if nw > 0 {
- written += int64(nw)
- }
- if ew != nil {
- err = ew
- break
- }
- if nr != nw {
- err = io.ErrShortWrite
- break
- }
- }
- if er == io.EOF {
- break
- }
- if er != nil {
- err = er
- break
- }
- }
- return written, err
-}
-
-// hashCopyBuffer is identical to hashCopyN except that it stages
-// through the provided buffer (if one is required) rather than
-// allocating a temporary one. If buf is nil, one is allocated for 5MiB.
-func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
+// Stages reads from offsets into the buffer, if buffer is nil it is
+// initialized to optimalBufferSize.
+func (c Client) hashCopyBuffer(writer io.Writer, reader io.ReaderAt, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
// MD5 and SHA256 hasher.
var hashMD5, hashSHA256 hash.Hash
// MD5 and SHA256 hasher.
@@ -160,14 +112,61 @@ func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
}
- // Allocate buf if not initialized.
+ // Buffer is nil, initialize.
if buf == nil {
buf = make([]byte, optimalReadBufferSize)
}
+ // Offset to start reading from.
+ var readAtOffset int64
+
+ // Following block reads data at an offset from the input
+ // reader and copies data to into local temporary file.
+ for {
+ readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
+ if rerr != nil {
+ if rerr != io.EOF {
+ return nil, nil, 0, rerr
+ }
+ }
+ writeSize, werr := hashWriter.Write(buf[:readAtSize])
+ if werr != nil {
+ return nil, nil, 0, werr
+ }
+ if readAtSize != writeSize {
+ return nil, nil, 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
+ }
+ readAtOffset += int64(writeSize)
+ size += int64(writeSize)
+ if rerr == io.EOF {
+ break
+ }
+ }
+
+ // Finalize md5 sum and sha256 sum.
+ md5Sum = hashMD5.Sum(nil)
+ if c.signature.isV4() {
+ sha256Sum = hashSHA256.Sum(nil)
+ }
+ return md5Sum, sha256Sum, size, err
+}
+
+// hashCopy is identical to hashCopyN except that it doesn't take
+// any size argument.
+func (c Client) hashCopy(writer io.Writer, reader io.Reader) (md5Sum, sha256Sum []byte, size int64, err error) {
+ // MD5 and SHA256 hasher.
+ var hashMD5, hashSHA256 hash.Hash
+ // MD5 and SHA256 hasher.
+ hashMD5 = md5.New()
+ hashWriter := io.MultiWriter(writer, hashMD5)
+ if c.signature.isV4() {
+ hashSHA256 = sha256.New()
+ hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
+ }
+
// Using copyBuffer to copy in large buffers, default buffer
// for io.Copy of 32KiB is too small.
- size, err = copyBuffer(hashWriter, reader, buf)
+ size, err = io.Copy(hashWriter, reader)
if err != nil {
return nil, nil, 0, err
}
@@ -244,12 +243,8 @@ func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadI
return uploadID, isNew, nil
}
-// computeHashBuffer - Calculates MD5 and SHA256 for an input read
-// Seeker is identical to computeHash except that it stages
-// through the provided buffer (if one is required) rather than
-// allocating a temporary one. If buf is nil, it uses a temporary
-// buffer.
-func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
+// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
+func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
// MD5 and SHA256 hasher.
var hashMD5, hashSHA256 hash.Hash
// MD5 and SHA256 hasher.
@@ -261,16 +256,9 @@ func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha
}
// If no buffer is provided, no need to allocate just use io.Copy.
- if buf == nil {
- size, err = io.Copy(hashWriter, reader)
- if err != nil {
- return nil, nil, 0, err
- }
- } else {
- size, err = copyBuffer(hashWriter, reader, buf)
- if err != nil {
- return nil, nil, 0, err
- }
+ size, err = io.Copy(hashWriter, reader)
+ if err != nil {
+ return nil, nil, 0, err
}
// Seek back reader to the beginning location.
@@ -285,8 +273,3 @@ func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha
}
return md5Sum, sha256Sum, size, nil
}
-
-// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
-func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
- return c.computeHashBuffer(reader, nil)
-}
diff --git a/vendor/src/github.com/minio/minio-go/api-put-object-copy.go b/vendor/src/github.com/minio/minio-go/api-put-object-copy.go
new file mode 100644
index 000000000..45d5693fc
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/api-put-object-copy.go
@@ -0,0 +1,68 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "net/http"
+
+// CopyObject - copy a source object into a new object with the provided name in the provided bucket
+func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectName(objectName); err != nil {
+ return err
+ }
+ if objectSource == "" {
+ return ErrInvalidArgument("Object source cannot be empty.")
+ }
+
+ // customHeaders apply headers.
+ customHeaders := make(http.Header)
+ for _, cond := range cpCond.conditions {
+ customHeaders.Set(cond.key, cond.value)
+ }
+
+ // Set copy source.
+ customHeaders.Set("x-amz-copy-source", objectSource)
+
+ // Execute PUT on objectName.
+ resp, err := c.executeMethod("PUT", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeaders,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Decode copy response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return err
+ }
+
+ // Return nil on success.
+ return nil
+}
diff --git a/vendor/src/github.com/minio/minio-go/api-put-object-file.go b/vendor/src/github.com/minio/minio-go/api-put-object-file.go
index d212dfb87..e7a1a9685 100644
--- a/vendor/src/github.com/minio/minio-go/api-put-object-file.go
+++ b/vendor/src/github.com/minio/minio-go/api-put-object-file.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,7 +21,9 @@ import (
"fmt"
"io"
"io/ioutil"
+ "mime"
"os"
+ "path/filepath"
"sort"
)
@@ -57,6 +59,14 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
}
+ // Set contentType based on filepath extension if not given or default
+ // value of "binary/octet-stream" if the extension has no associated type.
+ if contentType == "" {
+ if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ }
+
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
if isGoogleEndpoint(c.endpointURL) {
@@ -187,7 +197,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
}, partsInfo) {
// Proceed to upload the part.
var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber,
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
md5Sum, sha256Sum, prtSize)
if err != nil {
return totalUploadedSize, err
diff --git a/vendor/src/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/src/github.com/minio/minio-go/api-put-object-multipart.go
index ee0019165..be96ad232 100644
--- a/vendor/src/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/src/github.com/minio/minio-go/api-put-object-multipart.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -134,8 +134,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}, partsInfo) {
// Proceed to upload the part.
var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber,
- md5Sum, sha256Sum, prtSize)
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
if err != nil {
// Reset the temporary buffer upon any error.
tmpBuffer.Reset()
@@ -230,14 +229,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
customHeader: customHeader,
}
- // Instantiate the request.
- req, err := c.newRequest("POST", reqMetadata)
- if err != nil {
- return initiateMultipartUploadResult{}, err
- }
-
- // Execute the request.
- resp, err := c.do(req)
+ // Execute POST on an objectName to initiate multipart upload.
+ resp, err := c.executeMethod("POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
@@ -257,7 +250,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
}
// uploadPart - Uploads a part in a multipart upload.
-func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.ReadCloser, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
+func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return objectPart{}, err
@@ -295,13 +288,8 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
contentSHA256Bytes: sha256Sum,
}
- // Instantiate a request.
- req, err := c.newRequest("PUT", reqMetadata)
- if err != nil {
- return objectPart{}, err
- }
- // Execute the request.
- resp, err := c.do(req)
+ // Execute PUT on each part.
+ resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return objectPart{}, err
@@ -342,24 +330,18 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
}
// Instantiate all the complete multipart buffer.
- completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
+ completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
- contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
- contentLength: int64(completeMultipartUploadBuffer.Len()),
- contentSHA256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
+ contentBody: completeMultipartUploadBuffer,
+ contentLength: int64(len(completeMultipartUploadBytes)),
+ contentSHA256Bytes: sum256(completeMultipartUploadBytes),
}
- // Instantiate the request.
- req, err := c.newRequest("POST", reqMetadata)
- if err != nil {
- return completeMultipartUploadResult{}, err
- }
-
- // Execute the request.
- resp, err := c.do(req)
+ // Execute POST to complete multipart upload for an objectName.
+ resp, err := c.executeMethod("POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return completeMultipartUploadResult{}, err
diff --git a/vendor/src/github.com/minio/minio-go/api-put-object-progress.go b/vendor/src/github.com/minio/minio-go/api-put-object-progress.go
index ae4425d49..ebbc380c3 100644
--- a/vendor/src/github.com/minio/minio-go/api-put-object-progress.go
+++ b/vendor/src/github.com/minio/minio-go/api-put-object-progress.go
@@ -91,7 +91,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
- if errResp.Code == "NotImplemented" {
+ if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied." {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
diff --git a/vendor/src/github.com/minio/minio-go/api-put-object-readat.go b/vendor/src/github.com/minio/minio-go/api-put-object-readat.go
index ddb1ab3dc..957e3380e 100644
--- a/vendor/src/github.com/minio/minio-go/api-put-object-readat.go
+++ b/vendor/src/github.com/minio/minio-go/api-put-object-readat.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -97,7 +97,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
tmpBuffer := new(bytes.Buffer)
// Read defaults to reading at 5MiB buffer.
- readBuffer := make([]byte, optimalReadBufferSize)
+ readAtBuffer := make([]byte, optimalReadBufferSize)
// Upload all the missing parts.
for partNumber <= lastPartNumber {
@@ -147,7 +147,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Calculates MD5 and SHA256 sum for a section reader.
var md5Sum, sha256Sum []byte
var prtSize int64
- md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readBuffer)
+ md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readAtBuffer)
if err != nil {
return 0, err
}
@@ -159,8 +159,7 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Proceed to upload the part.
var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader),
- partNumber, md5Sum, sha256Sum, prtSize)
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
if err != nil {
// Reset the buffer upon any error.
tmpBuffer.Reset()
diff --git a/vendor/src/github.com/minio/minio-go/api-put-object.go b/vendor/src/github.com/minio/minio-go/api-put-object.go
index a09e658f4..10390c6c5 100644
--- a/vendor/src/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/src/github.com/minio/minio-go/api-put-object.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,7 +38,7 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
if lenFn.Kind() == reflect.Func {
// Call the 'Size' function and save its return value.
result = lenFn.Call([]reflect.Value{})
- if result != nil && len(result) == 1 {
+ if len(result) == 1 {
lenValue := result[0]
if lenValue.IsValid() {
switch lenValue.Kind() {
@@ -146,11 +146,11 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// Update progress reader appropriately to the latest offset as we
// read from the source.
- reader = newHook(reader, progress)
+ readSeeker := newHook(reader, progress)
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, ioutil.NopCloser(reader), nil, nil, size, contentType)
+ st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
if err != nil {
return 0, err
}
@@ -178,12 +178,12 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
size = maxSinglePutObjectSize
}
var md5Sum, sha256Sum []byte
- var readCloser io.ReadCloser
if size <= minPartSize {
// Initialize a new temporary buffer.
tmpBuffer := new(bytes.Buffer)
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size)
- readCloser = ioutil.NopCloser(tmpBuffer)
+ reader = bytes.NewReader(tmpBuffer.Bytes())
+ tmpBuffer.Reset()
} else {
// Initialize a new temporary file.
var tmpFile *tempFile
@@ -191,12 +191,13 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
if err != nil {
return 0, err
}
+ defer tmpFile.Close()
md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size)
// Seek back to beginning of the temporary file.
if _, err = tmpFile.Seek(0, 0); err != nil {
return 0, err
}
- readCloser = tmpFile
+ reader = tmpFile
}
// Return error if its not io.EOF.
if err != nil {
@@ -204,26 +205,26 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
return 0, err
}
}
- // Progress the reader to the size.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
- return size, err
- }
- }
// Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, readCloser, md5Sum, sha256Sum, size, contentType)
+ st, err := c.putObjectDo(bucketName, objectName, reader, md5Sum, sha256Sum, size, contentType)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
+ // Progress the reader to the size if putObjectDo is successful.
+ if progress != nil {
+ if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
+ return size, err
+ }
+ }
return size, nil
}
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
+func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -258,13 +259,9 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
- // Initiate new request.
- req, err := c.newRequest("PUT", reqMetadata)
- if err != nil {
- return ObjectInfo{}, err
- }
- // Execute the request.
- resp, err := c.do(req)
+
+ // Execute PUT an objectName.
+ resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
diff --git a/vendor/src/github.com/minio/minio-go/api-remove.go b/vendor/src/github.com/minio/minio-go/api-remove.go
index 8f59c15e6..bd5842828 100644
--- a/vendor/src/github.com/minio/minio-go/api-remove.go
+++ b/vendor/src/github.com/minio/minio-go/api-remove.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -30,15 +30,10 @@ func (c Client) RemoveBucket(bucketName string) error {
if err := isValidBucketName(bucketName); err != nil {
return err
}
- // Instantiate a new request.
- req, err := c.newRequest("DELETE", requestMetadata{
+ // Execute DELETE on bucket.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
})
- if err != nil {
- return err
- }
- // Initiate the request.
- resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
@@ -55,6 +50,54 @@ func (c Client) RemoveBucket(bucketName string) error {
return nil
}
+// RemoveBucketPolicy remove a bucket policy on given path.
+func (c Client) RemoveBucketPolicy(bucketName, objectPrefix string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return err
+ }
+ policy, err := c.getBucketPolicy(bucketName, objectPrefix)
+ if err != nil {
+ return err
+ }
+ // No bucket policy found, nothing to remove return success.
+ if policy.Statements == nil {
+ return nil
+ }
+
+ // Save new statements after removing requested bucket policy.
+ policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
+
+ // Commit the update policy.
+ return c.putBucketPolicy(bucketName, policy)
+}
+
+// Removes all policies on a bucket.
+func (c Client) removeBucketPolicy(bucketName string) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
// RemoveObject remove an object from a bucket.
func (c Client) RemoveObject(bucketName, objectName string) error {
// Input validation.
@@ -64,16 +107,11 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
if err := isValidObjectName(objectName); err != nil {
return err
}
- // Instantiate the request.
- req, err := c.newRequest("DELETE", requestMetadata{
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
objectName: objectName,
})
- if err != nil {
- return err
- }
- // Initiate the request.
- resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
@@ -124,18 +162,12 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
- // Instantiate a new DELETE request.
- req, err := c.newRequest("DELETE", requestMetadata{
+ // Execute DELETE on multipart upload.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
})
- if err != nil {
- return err
- }
-
- // Initiate the request.
- resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
@@ -149,13 +181,13 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
// This is needed specifically for abort and it cannot
// be converged into default case.
errorResponse = ErrorResponse{
- Code: "NoSuchUpload",
- Message: "The specified multipart upload does not exist.",
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: "NoSuchUpload",
+ Message: "The specified multipart upload does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
default:
return httpRespToErrorResponse(resp, bucketName, objectName)
diff --git a/vendor/src/github.com/minio/minio-go/api-s3-definitions.go b/vendor/src/github.com/minio/minio-go/api-s3-datatypes.go
similarity index 89%
rename from vendor/src/github.com/minio/minio-go/api-s3-definitions.go
rename to vendor/src/github.com/minio/minio-go/api-s3-datatypes.go
index de562e475..ca81e302d 100644
--- a/vendor/src/github.com/minio/minio-go/api-s3-definitions.go
+++ b/vendor/src/github.com/minio/minio-go/api-s3-datatypes.go
@@ -96,6 +96,12 @@ type initiator struct {
DisplayName string
}
+// copyObjectResult container for copy object response.
+type copyObjectResult struct {
+ ETag string
+ LastModified string // time string format "2006-01-02T15:04:05.000Z"
+}
+
// objectPart container for particular part of an object.
type objectPart struct {
// Part number identifies the part.
@@ -171,27 +177,3 @@ type createBucketConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
-
-// grant container for the grantee and his or her permissions.
-type grant struct {
- // grantee container for DisplayName and ID of the person being
- // granted permissions.
- Grantee struct {
- ID string
- DisplayName string
- EmailAddress string
- Type string
- URI string
- }
- Permission string
-}
-
-// accessControlPolicy contains the elements providing ACL permissions
-// for a bucket.
-type accessControlPolicy struct {
- // accessControlList container for ACL information.
- AccessControlList struct {
- Grant []grant
- }
- Owner owner
-}
diff --git a/vendor/src/github.com/minio/minio-go/api-stat.go b/vendor/src/github.com/minio/minio-go/api-stat.go
index 20f66e8fc..b5db7fedc 100644
--- a/vendor/src/github.com/minio/minio-go/api-stat.go
+++ b/vendor/src/github.com/minio/minio-go/api-stat.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,15 +29,11 @@ func (c Client) BucketExists(bucketName string) error {
if err := isValidBucketName(bucketName); err != nil {
return err
}
- // Instantiate a new request.
- req, err := c.newRequest("HEAD", requestMetadata{
+
+ // Execute HEAD on bucketName.
+ resp, err := c.executeMethod("HEAD", requestMetadata{
bucketName: bucketName,
})
- if err != nil {
- return err
- }
- // Initiate the request.
- resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return err
@@ -59,16 +55,12 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
if err := isValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
- // Instantiate a new request.
- req, err := c.newRequest("HEAD", requestMetadata{
+
+ // Execute HEAD on objectName.
+ resp, err := c.executeMethod("HEAD", requestMetadata{
bucketName: bucketName,
objectName: objectName,
})
- if err != nil {
- return ObjectInfo{}, err
- }
- // Initiate the request.
- resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
@@ -87,26 +79,26 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: "Content-Length is invalid. " + reportIssue,
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: "InternalError",
+ Message: "Content-Length is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
}
// Parse Last-Modified has http time format.
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: "Last-Modified time format is invalid. " + reportIssue,
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
+ Code: "InternalError",
+ Message: "Last-Modified time format is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
}
}
// Fetch content type if any present.
diff --git a/vendor/src/github.com/minio/minio-go/api.go b/vendor/src/github.com/minio/minio-go/api.go
index 96cab8c02..a47cadd1e 100644
--- a/vendor/src/github.com/minio/minio-go/api.go
+++ b/vendor/src/github.com/minio/minio-go/api.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,6 +22,8 @@ import (
"encoding/hex"
"fmt"
"io"
+ "io/ioutil"
+ "math/rand"
"net/http"
"net/http/httputil"
"net/url"
@@ -29,6 +31,7 @@ import (
"regexp"
"runtime"
"strings"
+ "sync"
"time"
)
@@ -56,15 +59,18 @@ type Client struct {
httpClient *http.Client
bucketLocCache *bucketLocationCache
- // Advanced functionality
+ // Advanced functionality.
isTraceEnabled bool
traceOutput io.Writer
+
+ // Random seed.
+ random *rand.Rand
}
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "0.2.5"
+ libraryVersion = "1.0.1"
)
// User Agent should always following the below style.
@@ -78,7 +84,7 @@ const (
// NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatibility.
-func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
+func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil {
return nil, err
@@ -90,7 +96,7 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
// NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
-func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
+func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil {
return nil, err
@@ -102,7 +108,7 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
// New - instantiate minio client Client, adds automatic verification
// of signature.
-func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
+func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil {
return nil, err
@@ -112,13 +118,36 @@ func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (C
if isGoogleEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV2
}
- // If Amazon S3 set to signature v2.
+ // If Amazon S3 set to signature v2.n
if isAmazonEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV4
}
return clnt, nil
}
+// lockedRandSource provides protected rand source, implements rand.Source interface.
+type lockedRandSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an
+// int64.
+func (r *lockedRandSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+// Seed uses the provided seed value to initialize the generator to a
+// deterministic state.
+func (r *lockedRandSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, insecure)
@@ -138,9 +167,20 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*
clnt.endpointURL = endpointURL
// Instantiate http client and bucket location cache.
- clnt.httpClient = &http.Client{}
+ clnt.httpClient = &http.Client{
+ // Setting a sensible time out of 2minutes to wait for response
+ // headers. Request is pro-actively cancelled after 2minutes
+ // if no response was received from server.
+ Timeout: 2 * time.Minute,
+ Transport: http.DefaultTransport,
+ }
+
+ // Instantiae bucket location cache.
clnt.bucketLocCache = newBucketLocationCache()
+ // Introduce a new locked random seed.
+ clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
+
// Return.
return clnt, nil
}
@@ -180,6 +220,13 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
}
}
+// SetClientTimeout - set http client timeout.
+func (c *Client) SetClientTimeout(timeout time.Duration) {
+ if c.httpClient != nil {
+ c.httpClient.Timeout = timeout
+ }
+}
+
// TraceOn - enable HTTP tracing.
func (c *Client) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout.
@@ -214,7 +261,7 @@ type requestMetadata struct {
// Generated by our internal code.
bucketLocation string
- contentBody io.ReadCloser
+ contentBody io.Reader
contentLength int64
contentSHA256Bytes []byte
contentMD5Bytes []byte
@@ -292,7 +339,7 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// to zero. Keep this workaround until the above bug is fixed.
if resp.ContentLength == 0 {
var buffer bytes.Buffer
- if err := resp.Header.Write(&buffer); err != nil {
+ if err = resp.Header.Write(&buffer); err != nil {
return err
}
respTrace = buffer.Bytes()
@@ -322,11 +369,28 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
- // execute the request.
+ // do the request.
resp, err := c.httpClient.Do(req)
if err != nil {
- return resp, err
+ // Handle this specifically for now until future Golang
+ // versions fix this issue properly.
+ urlErr, ok := err.(*url.Error)
+ if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
+ return nil, &url.Error{
+ Op: urlErr.Op,
+ URL: urlErr.URL,
+ Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
+ }
+ }
+ return nil, err
}
+
+ // Response cannot be non-nil, report if its the case.
+ if resp == nil {
+ msg := "Response is empty. " + reportIssue
+ return nil, ErrInvalidArgument(msg)
+ }
+
// If trace is enabled, dump http request and response.
if c.isTraceEnabled {
err = c.dumpHTTP(req, resp)
@@ -337,13 +401,110 @@ func (c Client) do(req *http.Request) (*http.Response, error) {
return resp, nil
}
+// List of success status.
+var successStatus = []int{
+ http.StatusOK,
+ http.StatusNoContent,
+ http.StatusPartialContent,
+}
+
+// executeMethod - instantiates a given method, and retries the
+// request upon any error up to maxRetries attempts in a binomially
+// delayed manner using a standard back off algorithm.
+func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
+ var isRetryable bool // Indicates if request can be retried.
+ var bodySeeker io.Seeker // Extracted seeker from io.Reader.
+ if metadata.contentBody != nil {
+ // Check if body is seekable then it is retryable.
+ bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
+ }
+
+ // Retry executes the following function body if request has an
+ // error until maxRetries have been exhausted, retry attempts are
+ // performed after waiting for a given period of time in a
+ // binomial fashion.
+ for range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter) {
+ if isRetryable {
+ // Seek back to beginning for each attempt.
+ if _, err = bodySeeker.Seek(0, 0); err != nil {
+ // If seek failed, no need to retry.
+ return nil, err
+ }
+ }
+
+ // Instantiate a new request.
+ var req *http.Request
+ req, err = c.newRequest(method, metadata)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if isS3CodeRetryable(errResponse.Code) {
+ continue // Retry.
+ }
+ return nil, err
+ }
+
+ // Initiate the request.
+ res, err = c.do(req)
+ if err != nil {
+ // For supported network errors verify.
+ if isNetErrorRetryable(err) {
+ continue // Retry.
+ }
+ // For other errors, return here no need to retry.
+ return nil, err
+ }
+
+ // For any known successful http status, return quickly.
+ for _, httpStatus := range successStatus {
+ if httpStatus == res.StatusCode {
+ return res, nil
+ }
+ }
+
+ // Read the body to be saved later.
+ errBodyBytes, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ // Save the body.
+ errBodySeeker := bytes.NewReader(errBodyBytes)
+ res.Body = ioutil.NopCloser(errBodySeeker)
+
+ // For errors verify if its retryable otherwise fail quickly.
+ errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
+ // Bucket region if set in error response, we can retry the
+ // request with the new region.
+ if errResponse.Region != "" {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
+
+ // Verify if error response code is retryable.
+ if isS3CodeRetryable(errResponse.Code) {
+ continue // Retry.
+ }
+
+ // Verify if http status code is retryable.
+ if isHTTPStatusRetryable(res.StatusCode) {
+ continue // Retry.
+ }
+
+ // Save the body back again.
+ errBodySeeker.Seek(0, 0) // Seek back to starting point.
+ res.Body = ioutil.NopCloser(errBodySeeker)
+
+ // For all other cases break out of the retry loop.
+ break
+ }
+ return res, err
+}
+
// newRequest - instantiate a new HTTP request for a given method.
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = "POST"
}
-
// Gather location only if bucketName is present.
location := "us-east-1" // Default all other requests to "us-east-1".
if metadata.bucketName != "" {
@@ -385,10 +546,13 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// Set content body if available.
if metadata.contentBody != nil {
- req.Body = metadata.contentBody
+ req.Body = ioutil.NopCloser(metadata.contentBody)
}
- // set UserAgent for the request.
+ // set 'Expect' header for the request.
+ req.Header.Set("Expect", "100-continue")
+
+ // set 'User-Agent' header for the request.
c.setUserAgent(req)
// Set all headers.
@@ -415,7 +579,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// set md5Sum for content protection.
if metadata.contentMD5Bytes != nil {
- req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
+ req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
}
// Sign the request for all authenticated requests.
@@ -478,7 +642,7 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
- urlStr = urlStr + "?" + queryValues.Encode()
+ urlStr = urlStr + "?" + queryEncode(queryValues)
}
u, err := url.Parse(urlStr)
if err != nil {
@@ -487,46 +651,3 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
return u, nil
}
-
-// CloudStorageClient - Cloud Storage Client interface.
-type CloudStorageClient interface {
- // Bucket Read/Write/Stat operations.
- MakeBucket(bucketName string, cannedACL BucketACL, location string) error
- BucketExists(bucketName string) error
- RemoveBucket(bucketName string) error
- SetBucketACL(bucketName string, cannedACL BucketACL) error
- GetBucketACL(bucketName string) (BucketACL, error)
-
- ListBuckets() ([]BucketInfo, error)
- ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo
- ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo
-
- // Object Read/Write/Stat operations.
- GetObject(bucketName, objectName string) (reader *Object, err error)
- PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error)
- StatObject(bucketName, objectName string) (ObjectInfo, error)
- RemoveObject(bucketName, objectName string) error
- RemoveIncompleteUpload(bucketName, objectName string) error
-
- // File to Object API.
- FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error)
- FGetObject(bucketName, objectName, filePath string) error
-
- // PutObjectWithProgress for progress.
- PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error)
-
- // Presigned operations.
- PresignedGetObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
- PresignedPutObject(bucketName, objectName string, expires time.Duration) (presignedURL string, err error)
- PresignedPostPolicy(*PostPolicy) (formData map[string]string, err error)
-
- // Application info.
- SetAppInfo(appName, appVersion string)
-
- // Set custom transport.
- SetCustomTransport(customTransport http.RoundTripper)
-
- // HTTP tracing methods.
- TraceOn(traceOutput io.Writer)
- TraceOff()
-}
diff --git a/vendor/src/github.com/minio/minio-go/api_functional_v2_test.go b/vendor/src/github.com/minio/minio-go/api_functional_v2_test.go
index 990e02810..7fbd97c31 100644
--- a/vendor/src/github.com/minio/minio-go/api_functional_v2_test.go
+++ b/vendor/src/github.com/minio/minio-go/api_functional_v2_test.go
@@ -24,6 +24,7 @@ import (
"io/ioutil"
"math/rand"
"net/http"
+ "net/url"
"os"
"testing"
"time"
@@ -61,10 +62,10 @@ func TestMakeBucketErrorV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket in 'eu-west-1'.
- if err = c.MakeBucket(bucketName, "private", "eu-west-1"); err != nil {
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
t.Fatal("Error:", err, bucketName)
}
- if err = c.MakeBucket(bucketName, "private", "eu-west-1"); err == nil {
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
t.Fatal("Error: make bucket should should fail for", bucketName)
}
// Verify valid error response from server.
@@ -107,7 +108,7 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -192,7 +193,7 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -229,7 +230,7 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
}
// Tests resumable put object cloud to cloud.
-func TestResumbalePutObjectV2(t *testing.T) {
+func TestResumablePutObjectV2(t *testing.T) {
// By passing 'go test -short' skips these tests.
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
@@ -259,7 +260,7 @@ func TestResumbalePutObjectV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -340,6 +341,154 @@ func TestResumbalePutObjectV2(t *testing.T) {
}
+// Tests FPutObject hidden contentType setting
+func TestFPutObjectV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ false,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a temp file with 11*1024*1024 bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Add extension to temp file name
+ fileName := file.Name()
+ err = os.Rename(file.Name(), fileName+".gtar")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Check headers
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-standard")
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/octet-stream", rStandard.ContentType)
+ }
+
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-Octet")
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/octet-stream", rStandard.ContentType)
+ }
+
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-GTar")
+ }
+ if rGTar.ContentType != "application/x-gtar" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/x-gtar", rStandard.ContentType)
+ }
+
+ // Remove all objects and bucket and temp file
+ err = c.RemoveObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = os.Remove(fileName + ".gtar")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+}
+
// Tests resumable file based put object multipart upload.
func TestResumableFPutObjectV2(t *testing.T) {
if testing.Short() {
@@ -370,7 +519,7 @@ func TestResumableFPutObjectV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -447,7 +596,7 @@ func TestMakeBucketRegionsV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "private", "eu-west-1"); err != nil {
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -458,7 +607,7 @@ func TestMakeBucketRegionsV2(t *testing.T) {
// Make a new bucket with '.' in its name, in 'us-west-2'. This
// request is internally staged into a path style instead of
// virtual host style.
- if err = c.MakeBucket(bucketName+".withperiod", "private", "us-west-2"); err != nil {
+ if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
t.Fatal("Error:", err, bucketName+".withperiod")
}
@@ -468,70 +617,6 @@ func TestMakeBucketRegionsV2(t *testing.T) {
}
}
-// Tests resumable put object multipart upload.
-func TestResumablePutObjectV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- false,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // generate 11MB
- buf := make([]byte, 11*1024*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- objectName := bucketName + "-resumable"
- reader := bytes.NewReader(buf)
- n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
// Tests get object ReaderSeeker interface methods.
func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
if testing.Short() {
@@ -562,7 +647,7 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -629,13 +714,37 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
if n != 0 {
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
}
- var buffer bytes.Buffer
- if _, err = io.CopyN(&buffer, r, st.Size); err != nil {
- t.Fatal("Error:", err)
+
+ var buffer1 bytes.Buffer
+ if n, err = io.CopyN(&buffer1, r, st.Size); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
}
- if !bytes.Equal(buf, buffer.Bytes()) {
+ if !bytes.Equal(buf, buffer1.Bytes()) {
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
}
+
+ // Seek again and read again.
+ n, err = r.Seek(offset-1, 0)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != (offset - 1) {
+ t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
+ }
+
+ var buffer2 bytes.Buffer
+ if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
+ }
+ // Verify now lesser bytes.
+ if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
+ t.Fatal("Error: Incorrect read bytes v/s original buffer.")
+ }
+
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
@@ -676,7 +785,7 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -788,6 +897,132 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
}
}
+// Tests copy object
+func TestCopyObjectV2(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping functional tests for short runs")
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ false,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName+"-copy")
+ }
+
+ // Generate data more than 32K
+ buf := make([]byte, rand.Intn(1<<20)+32*1024)
+
+ _, err = io.ReadFull(crand.Reader, buf)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match want %v, got %v",
+ len(buf), n)
+ }
+
+ // Set copy conditions.
+ copyConds := minio.NewCopyConditions()
+ err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Copy source.
+ copySource := bucketName + "/" + objectName
+
+ // Perform the Copy
+ err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
+ }
+
+ // Source object
+ reader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Destination object
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := reader.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
+ objInfo.Size, objInfoCopy.Size)
+ }
+ if objInfo.ETag != objInfoCopy.ETag {
+ t.Fatalf("Error: ETags do not match, want %v, got %v\n",
+ objInfoCopy.ETag, objInfo.ETag)
+ }
+
+ // Remove all objects and buckets
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName + "-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
// Tests comprehensive list of all methods.
func TestFunctionalV2(t *testing.T) {
if testing.Short() {
@@ -817,7 +1052,7 @@ func TestFunctionalV2(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -844,22 +1079,11 @@ func TestFunctionalV2(t *testing.T) {
}
// Make the bucket 'public read/write'.
- err = c.SetBucketACL(bucketName, "public-read-write")
+ err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadWrite)
if err != nil {
t.Fatal("Error:", err)
}
- // Get the previously set acl.
- acl, err := c.GetBucketACL(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // ACL must be 'public read/write'.
- if acl != minio.BucketACL("public-read-write") {
- t.Fatal("Error:", acl)
- }
-
// List all buckets.
buckets, err := c.ListBuckets()
if len(buckets) == 0 {
@@ -954,11 +1178,12 @@ func TestFunctionalV2(t *testing.T) {
t.Fatal("Error: ", err)
}
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second)
+ // Generate presigned GET object url.
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
if err != nil {
t.Fatal("Error: ", err)
}
-
+ // Verify if presigned url works.
resp, err := http.Get(presignedGetURL)
if err != nil {
t.Fatal("Error: ", err)
@@ -974,6 +1199,34 @@ func TestFunctionalV2(t *testing.T) {
t.Fatal("Error: bytes mismatch.")
}
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ // Generate presigned GET object url.
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ t.Fatal("Error: ", resp.Status)
+ }
+ newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ t.Fatal("Error: bytes mismatch for presigned GET url.")
+ }
+ // Verify content disposition.
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
+ }
+
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
if err != nil {
t.Fatal("Error: ", err)
@@ -987,7 +1240,13 @@ func TestFunctionalV2(t *testing.T) {
if err != nil {
t.Fatal("Error: ", err)
}
- httpClient := &http.Client{}
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively cancelled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: http.DefaultTransport,
+ }
resp, err = httpClient.Do(req)
if err != nil {
t.Fatal("Error: ", err)
diff --git a/vendor/src/github.com/minio/minio-go/api_functional_v4_test.go b/vendor/src/github.com/minio/minio-go/api_functional_v4_test.go
index 5e88c6124..7126dd7a9 100644
--- a/vendor/src/github.com/minio/minio-go/api_functional_v4_test.go
+++ b/vendor/src/github.com/minio/minio-go/api_functional_v4_test.go
@@ -24,6 +24,7 @@ import (
"io/ioutil"
"math/rand"
"net/http"
+ "net/url"
"os"
"testing"
"time"
@@ -85,10 +86,10 @@ func TestMakeBucketError(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "private", "eu-central-1"); err != nil {
+ if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
t.Fatal("Error:", err, bucketName)
}
- if err = c.MakeBucket(bucketName, "private", "eu-central-1"); err == nil {
+ if err = c.MakeBucket(bucketName, "eu-central-1"); err == nil {
t.Fatal("Error: make bucket should should fail for", bucketName)
}
// Verify valid error response from server.
@@ -131,7 +132,7 @@ func TestMakeBucketRegions(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "private", "eu-central-1"); err != nil {
+ if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -142,7 +143,7 @@ func TestMakeBucketRegions(t *testing.T) {
// Make a new bucket with '.' in its name, in 'us-west-2'. This
// request is internally staged into a path style instead of
// virtual host style.
- if err = c.MakeBucket(bucketName+".withperiod", "private", "us-west-2"); err != nil {
+ if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
t.Fatal("Error:", err, bucketName+".withperiod")
}
@@ -182,7 +183,7 @@ func TestGetObjectClosedTwice(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -267,7 +268,7 @@ func TestRemovePartiallyUploaded(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -307,7 +308,7 @@ func TestRemovePartiallyUploaded(t *testing.T) {
}
// Tests resumable put object cloud to cloud.
-func TestResumbalePutObject(t *testing.T) {
+func TestResumablePutObject(t *testing.T) {
// By passing 'go test -short' skips these tests.
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
@@ -337,7 +338,7 @@ func TestResumbalePutObject(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -447,7 +448,7 @@ func TestResumableFPutObject(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -497,10 +498,10 @@ func TestResumableFPutObject(t *testing.T) {
}
}
-// Tests resumable put object multipart upload.
-func TestResumablePutObject(t *testing.T) {
+// Tests FPutObject hidden contentType setting
+func TestFPutObject(t *testing.T) {
if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
+ t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
@@ -527,30 +528,108 @@ func TestResumablePutObject(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
- // Generate 11MB
- buf := make([]byte, 11*1024*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
+ // Make a temp file with 11*1024*1024 bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
if err != nil {
t.Fatal("Error:", err)
}
- objectName := bucketName + "-resumable"
- reader := bytes.NewReader(buf)
- n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
+ t.Fatal("Error:", err)
}
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
}
- err = c.RemoveObject(bucketName, objectName)
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Add extension to temp file name
+ fileName := file.Name()
+ err = os.Rename(file.Name(), fileName+".gtar")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != int64(11*1024*1024) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
+ }
+
+ // Check headers
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-standard")
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/octet-stream", rStandard.ContentType)
+ }
+
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-Octet")
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/octet-stream", rStandard.ContentType)
+ }
+
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName+"-GTar")
+ }
+ if rGTar.ContentType != "application/x-gtar" {
+ t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
+ "application/x-gtar", rStandard.ContentType)
+ }
+
+ // Remove all objects and bucket and temp file
+ err = c.RemoveObject(bucketName, objectName+"-standard")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-GTar")
if err != nil {
t.Fatal("Error: ", err)
}
@@ -559,6 +638,12 @@ func TestResumablePutObject(t *testing.T) {
if err != nil {
t.Fatal("Error:", err)
}
+
+ err = os.Remove(fileName + ".gtar")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
}
// Tests get object ReaderSeeker interface methods.
@@ -591,7 +676,7 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -658,13 +743,37 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
if n != 0 {
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
}
- var buffer bytes.Buffer
- if _, err = io.CopyN(&buffer, r, st.Size); err != nil {
- t.Fatal("Error:", err)
+
+ var buffer1 bytes.Buffer
+ if n, err = io.CopyN(&buffer1, r, st.Size); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
}
- if !bytes.Equal(buf, buffer.Bytes()) {
+ if !bytes.Equal(buf, buffer1.Bytes()) {
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
}
+
+ // Seek again and read again.
+ n, err = r.Seek(offset-1, 0)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if n != (offset - 1) {
+ t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
+ }
+
+ var buffer2 bytes.Buffer
+ if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
+ if err != io.EOF {
+ t.Fatal("Error:", err)
+ }
+ }
+ // Verify now lesser bytes.
+ if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
+ t.Fatal("Error: Incorrect read bytes v/s original buffer.")
+ }
+
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
@@ -705,7 +814,7 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -817,6 +926,132 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
}
}
+// Tests copy object
+func TestCopyObject(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping functional tests for short runs")
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ "s3.amazonaws.com",
+ os.Getenv("ACCESS_KEY"),
+ os.Getenv("SECRET_KEY"),
+ false,
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName+"-copy")
+ }
+
+ // Generate data more than 32K
+ buf := make([]byte, rand.Intn(1<<20)+32*1024)
+
+ _, err = io.ReadFull(crand.Reader, buf)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match want %v, got %v",
+ len(buf), n)
+ }
+
+ // Set copy conditions.
+ copyConds := minio.NewCopyConditions()
+ err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Copy source.
+ copySource := bucketName + "/" + objectName
+
+ // Perform the Copy
+ err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
+ if err != nil {
+ t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
+ }
+
+ // Source object
+ reader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Destination object
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := reader.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
+ objInfo.Size, objInfoCopy.Size)
+ }
+ if objInfo.ETag != objInfoCopy.ETag {
+ t.Fatalf("Error: ETags do not match, want %v, got %v\n",
+ objInfoCopy.ETag, objInfo.ETag)
+ }
+
+ // Remove all objects and buckets
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ err = c.RemoveBucket(bucketName + "-copy")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+}
+
// Tests comprehensive list of all methods.
func TestFunctional(t *testing.T) {
if testing.Short() {
@@ -846,7 +1081,7 @@ func TestFunctional(t *testing.T) {
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
- err = c.MakeBucket(bucketName, "private", "us-east-1")
+ err = c.MakeBucket(bucketName, "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
@@ -872,23 +1107,54 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error:", err, bucketName)
}
+ // Asserting the default bucket policy.
+ policy, err := c.GetBucketPolicy(bucketName, "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if policy != "none" {
+ t.Fatalf("Default bucket policy incorrect")
+ }
+ // Set the bucket policy to 'public readonly'.
+ err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadOnly)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // should return policy `readonly`.
+ policy, err = c.GetBucketPolicy(bucketName, "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if policy != "readonly" {
+ t.Fatalf("Expected bucket policy to be readonly")
+ }
+
+ // Make the bucket 'public writeonly'.
+ err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyWriteOnly)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ // should return policy `writeonly`.
+ policy, err = c.GetBucketPolicy(bucketName, "")
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+ if policy != "writeonly" {
+ t.Fatalf("Expected bucket policy to be writeonly")
+ }
// Make the bucket 'public read/write'.
- err = c.SetBucketACL(bucketName, "public-read-write")
+ err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadWrite)
if err != nil {
t.Fatal("Error:", err)
}
-
- // Get the previously set acl.
- acl, err := c.GetBucketACL(bucketName)
+ // should return policy `readwrite`.
+ policy, err = c.GetBucketPolicy(bucketName, "")
if err != nil {
t.Fatal("Error:", err)
}
-
- // ACL must be 'public read/write'.
- if acl != minio.BucketACL("public-read-write") {
- t.Fatal("Error:", acl)
+ if policy != "readwrite" {
+ t.Fatalf("Expected bucket policy to be readwrite")
}
-
// List all buckets.
buckets, err := c.ListBuckets()
if len(buckets) == 0 {
@@ -983,11 +1249,13 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error: ", err)
}
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second)
+ // Generate presigned GET object url.
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
if err != nil {
t.Fatal("Error: ", err)
}
+ // Verify if presigned url works.
resp, err := http.Get(presignedGetURL)
if err != nil {
t.Fatal("Error: ", err)
@@ -1003,6 +1271,32 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error: bytes mismatch.")
}
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ t.Fatal("Error: ", resp.Status)
+ }
+ newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ t.Fatal("Error: bytes mismatch for presigned GET URL.")
+ }
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
+ }
+
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
if err != nil {
t.Fatal("Error: ", err)
@@ -1016,7 +1310,13 @@ func TestFunctional(t *testing.T) {
if err != nil {
t.Fatal("Error: ", err)
}
- httpClient := &http.Client{}
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively cancelled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: http.DefaultTransport,
+ }
resp, err = httpClient.Do(req)
if err != nil {
t.Fatal("Error: ", err)
diff --git a/vendor/src/github.com/minio/minio-go/api_unit_test.go b/vendor/src/github.com/minio/minio-go/api_unit_test.go
index 2afc666d8..4fb1978d1 100644
--- a/vendor/src/github.com/minio/minio-go/api_unit_test.go
+++ b/vendor/src/github.com/minio/minio-go/api_unit_test.go
@@ -160,31 +160,6 @@ func TestValidBucketLocation(t *testing.T) {
}
}
-// Tests valid bucket names.
-func TestBucketNames(t *testing.T) {
- buckets := []struct {
- name string
- valid error
- }{
- {".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")},
- {"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")},
- {"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters.")},
- {"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")},
- {"", ErrInvalidBucketName("Bucket name cannot be empty.")},
- {"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods.")},
- {"my.bucket.com", nil},
- {"my-bucket", nil},
- {"123my-bucket", nil},
- }
-
- for _, b := range buckets {
- err := isValidBucketName(b.name)
- if err != b.valid {
- t.Fatal("Error:", err)
- }
- }
-}
-
// Tests temp file.
func TestTempFile(t *testing.T) {
tmpFile, err := newTempFile("testing")
@@ -340,17 +315,17 @@ func TestSignatureType(t *testing.T) {
}
}
-// Tests bucket acl types.
-func TestBucketACLTypes(t *testing.T) {
+// Tests bucket policy types.
+func TestBucketPolicyTypes(t *testing.T) {
want := map[string]bool{
- "private": true,
- "public-read": true,
- "public-read-write": true,
- "authenticated-read": true,
- "invalid": false,
+ "none": true,
+ "readonly": true,
+ "writeonly": true,
+ "readwrite": true,
+ "invalid": false,
}
- for acl, ok := range want {
- if BucketACL(acl).isValidBucketACL() != ok {
+ for bucketPolicy, ok := range want {
+ if BucketPolicy(bucketPolicy).isValidBucketPolicy() != ok {
t.Fatal("Error")
}
}
@@ -396,188 +371,3 @@ func TestPartSize(t *testing.T) {
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
}
}
-
-// Tests url encoding.
-func TestURLEncoding(t *testing.T) {
- type urlStrings struct {
- name string
- encodedName string
- }
-
- want := []urlStrings{
- {
- name: "bigfile-1._%",
- encodedName: "bigfile-1._%25",
- },
- {
- name: "本語",
- encodedName: "%E6%9C%AC%E8%AA%9E",
- },
- {
- name: "本語.1",
- encodedName: "%E6%9C%AC%E8%AA%9E.1",
- },
- {
- name: ">123>3123123",
- encodedName: "%3E123%3E3123123",
- },
- {
- name: "test 1 2.txt",
- encodedName: "test%201%202.txt",
- },
- {
- name: "test++ 1.txt",
- encodedName: "test%2B%2B%201.txt",
- },
- }
-
- for _, u := range want {
- if u.encodedName != urlEncodePath(u.name) {
- t.Fatal("Error")
- }
- }
-}
-
-// Tests constructing valid endpoint url.
-func TestGetEndpointURL(t *testing.T) {
- if _, err := getEndpointURL("s3.amazonaws.com", false); err != nil {
- t.Fatal("Error:", err)
- }
- if _, err := getEndpointURL("192.168.1.1", false); err != nil {
- t.Fatal("Error:", err)
- }
- if _, err := getEndpointURL("13333.123123.-", false); err == nil {
- t.Fatal("Error")
- }
- if _, err := getEndpointURL("s3.aamzza.-", false); err == nil {
- t.Fatal("Error")
- }
- if _, err := getEndpointURL("s3.amazonaws.com:443", false); err == nil {
- t.Fatal("Error")
- }
-}
-
-// Tests valid ip address.
-func TestValidIPAddr(t *testing.T) {
- type validIP struct {
- ip string
- valid bool
- }
-
- want := []validIP{
- {
- ip: "192.168.1.1",
- valid: true,
- },
- {
- ip: "192.1.8",
- valid: false,
- },
- {
- ip: "..192.",
- valid: false,
- },
- {
- ip: "192.168.1.1.1",
- valid: false,
- },
- }
- for _, w := range want {
- valid := isValidIP(w.ip)
- if valid != w.valid {
- t.Fatal("Error")
- }
- }
-}
-
-// Tests valid endpoint domain.
-func TestValidEndpointDomain(t *testing.T) {
- type validEndpoint struct {
- endpointDomain string
- valid bool
- }
-
- want := []validEndpoint{
- {
- endpointDomain: "s3.amazonaws.com",
- valid: true,
- },
- {
- endpointDomain: "s3.amazonaws.com_",
- valid: false,
- },
- {
- endpointDomain: "%$$$",
- valid: false,
- },
- {
- endpointDomain: "s3.amz.test.com",
- valid: true,
- },
- {
- endpointDomain: "s3.%%",
- valid: false,
- },
- {
- endpointDomain: "localhost",
- valid: true,
- },
- {
- endpointDomain: "-localhost",
- valid: false,
- },
- {
- endpointDomain: "",
- valid: false,
- },
- {
- endpointDomain: "\n \t",
- valid: false,
- },
- {
- endpointDomain: " ",
- valid: false,
- },
- }
- for _, w := range want {
- valid := isValidDomain(w.endpointDomain)
- if valid != w.valid {
- t.Fatal("Error:", w.endpointDomain)
- }
- }
-}
-
-// Tests valid endpoint url.
-func TestValidEndpointURL(t *testing.T) {
- type validURL struct {
- url string
- valid bool
- }
- want := []validURL{
- {
- url: "https://s3.amazonaws.com",
- valid: true,
- },
- {
- url: "https://s3.amazonaws.com/bucket/object",
- valid: false,
- },
- {
- url: "192.168.1.1",
- valid: false,
- },
- }
- for _, w := range want {
- u, err := url.Parse(w.url)
- if err != nil {
- t.Fatal("Error:", err)
- }
- valid := false
- if err := isValidEndpointURL(u); err == nil {
- valid = true
- }
- if valid != w.valid {
- t.Fatal("Error")
- }
- }
-}
diff --git a/vendor/src/github.com/minio/minio-go/appveyor.yml b/vendor/src/github.com/minio/minio-go/appveyor.yml
index 5b8824d45..a5dc2b226 100644
--- a/vendor/src/github.com/minio/minio-go/appveyor.yml
+++ b/vendor/src/github.com/minio/minio-go/appveyor.yml
@@ -17,8 +17,8 @@ install:
- go version
- go env
- go get -u github.com/golang/lint/golint
- - go get -u golang.org/x/tools/cmd/vet
- go get -u github.com/remyoudompheng/go-misc/deadcode
+ - go get -u github.com/gordonklaus/ineffassign
# to run your custom scripts instead of automatic MSBuild
build_script:
@@ -26,6 +26,7 @@ build_script:
- gofmt -s -l .
- golint github.com/minio/minio-go...
- deadcode
+ - ineffassign .
- go test -short -v
- go test -short -race -v
diff --git a/vendor/src/github.com/minio/minio-go/bucket-acl.go b/vendor/src/github.com/minio/minio-go/bucket-acl.go
deleted file mode 100644
index d8eda0f54..000000000
--- a/vendor/src/github.com/minio/minio-go/bucket-acl.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-// BucketACL - Bucket level access control.
-type BucketACL string
-
-// Different types of ACL's currently supported for buckets.
-const (
- bucketPrivate = BucketACL("private")
- bucketReadOnly = BucketACL("public-read")
- bucketPublic = BucketACL("public-read-write")
- bucketAuthenticated = BucketACL("authenticated-read")
-)
-
-// Stringify acl.
-func (b BucketACL) String() string {
- if string(b) == "" {
- return "private"
- }
- return string(b)
-}
-
-// isValidBucketACL - Is provided acl string supported.
-func (b BucketACL) isValidBucketACL() bool {
- switch true {
- case b.isPrivate():
- fallthrough
- case b.isReadOnly():
- fallthrough
- case b.isPublic():
- fallthrough
- case b.isAuthenticated():
- return true
- case b.String() == "private":
- // By default its "private"
- return true
- default:
- return false
- }
-}
-
-// isPrivate - Is acl Private.
-func (b BucketACL) isPrivate() bool {
- return b == bucketPrivate
-}
-
-// isPublicRead - Is acl PublicRead.
-func (b BucketACL) isReadOnly() bool {
- return b == bucketReadOnly
-}
-
-// isPublicReadWrite - Is acl PublicReadWrite.
-func (b BucketACL) isPublic() bool {
- return b == bucketPublic
-}
-
-// isAuthenticated - Is acl AuthenticatedRead.
-func (b BucketACL) isAuthenticated() bool {
- return b == bucketAuthenticated
-}
diff --git a/vendor/src/github.com/minio/minio-go/bucket-cache.go b/vendor/src/github.com/minio/minio-go/bucket-cache.go
index c0b4f0cb8..50679a380 100644
--- a/vendor/src/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/src/github.com/minio/minio-go/bucket-cache.go
@@ -20,7 +20,8 @@ import (
"encoding/hex"
"net/http"
"net/url"
- "path/filepath"
+ "path"
+ "strings"
"sync"
)
@@ -67,11 +68,6 @@ func (r *bucketLocationCache) Delete(bucketName string) {
// getBucketLocation - Get location for the bucketName from location map cache.
func (c Client) getBucketLocation(bucketName string) (string, error) {
- // For anonymous requests, default to "us-east-1" and let other calls
- // move forward.
- if c.anonymous {
- return "us-east-1", nil
- }
if location, ok := c.bucketLocCache.Get(bucketName); ok {
return location, nil
}
@@ -90,7 +86,15 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return "", httpRespToErrorResponse(resp, bucketName, "")
+ err = httpRespToErrorResponse(resp, bucketName, "")
+ errResp := ToErrorResponse(err)
+ // For access denied error, it could be an anonymous
+ // request. Move forward and let the top level callers
+ // succeed if possible based on their policy.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ return "us-east-1", nil
+ }
+ return "", err
}
}
@@ -127,7 +131,7 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Set get bucket location always as path style.
targetURL := c.endpointURL
- targetURL.Path = filepath.Join(bucketName, "") + "/"
+ targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
// Get a new HTTP request for the method.
diff --git a/vendor/src/github.com/minio/minio-go/bucket-policy.go b/vendor/src/github.com/minio/minio-go/bucket-policy.go
new file mode 100644
index 000000000..57e3f2d02
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/bucket-policy.go
@@ -0,0 +1,488 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// maximum supported access policy size.
+const maxAccessPolicySize = 20 * 1024 * 1024 // 20KiB.
+
+// Resource prefix for all aws resources.
+const awsResourcePrefix = "arn:aws:s3:::"
+
+// BucketPolicy - Bucket level policy.
+type BucketPolicy string
+
+// Different types of Policies currently supported for buckets.
+const (
+ BucketPolicyNone BucketPolicy = "none"
+ BucketPolicyReadOnly = "readonly"
+ BucketPolicyReadWrite = "readwrite"
+ BucketPolicyWriteOnly = "writeonly"
+)
+
+// isValidBucketPolicy - Is provided policy value supported.
+func (p BucketPolicy) isValidBucketPolicy() bool {
+ switch p {
+ case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
+ return true
+ }
+ return false
+}
+
+// User - canonical users list.
+type User struct {
+ AWS []string
+}
+
+// Statement - minio policy statement
+type Statement struct {
+ Sid string
+ Effect string
+ Principal User `json:"Principal"`
+ Actions []string `json:"Action"`
+ Resources []string `json:"Resource"`
+ Conditions map[string]map[string]string `json:"Condition,omitempty"`
+}
+
+// BucketAccessPolicy - minio policy collection
+type BucketAccessPolicy struct {
+ Version string // date in 0000-00-00 format
+ Statements []Statement `json:"Statement"`
+}
+
+// Read write actions.
+var (
+ readWriteBucketActions = []string{
+ "s3:GetBucketLocation",
+ "s3:ListBucket",
+ "s3:ListBucketMultipartUploads",
+ // Add more bucket level read-write actions here.
+ }
+ readWriteObjectActions = []string{
+ "s3:AbortMultipartUpload",
+ "s3:DeleteObject",
+ "s3:GetObject",
+ "s3:ListMultipartUploadParts",
+ "s3:PutObject",
+ // Add more object level read-write actions here.
+ }
+)
+
+// Write only actions.
+var (
+ writeOnlyBucketActions = []string{
+ "s3:GetBucketLocation",
+ "s3:ListBucketMultipartUploads",
+ // Add more bucket level write actions here.
+ }
+ writeOnlyObjectActions = []string{
+ "s3:AbortMultipartUpload",
+ "s3:DeleteObject",
+ "s3:ListMultipartUploadParts",
+ "s3:PutObject",
+ // Add more object level write actions here.
+ }
+)
+
+// Read only actions.
+var (
+ readOnlyBucketActions = []string{
+ "s3:GetBucketLocation",
+ "s3:ListBucket",
+ // Add more bucket level read actions here.
+ }
+ readOnlyObjectActions = []string{
+ "s3:GetObject",
+ // Add more object level read actions here.
+ }
+)
+
+// subsetActions returns true if the first array is completely
+// contained in the second array. There must be at least
+// the same number of duplicate values in second as there
+// are in first.
+func subsetActions(first, second []string) bool {
+ set := make(map[string]int)
+ for _, value := range second {
+ set[value]++
+ }
+ for _, value := range first {
+ if count, found := set[value]; !found {
+ return false
+ } else if count < 1 {
+ return false
+ } else {
+ set[value] = count - 1
+ }
+ }
+ return true
+}
+
+// Verifies if we have read/write policy set at bucketName, objectPrefix.
+func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPrefix string) bool {
+ var commonActions, readWrite bool
+ sort.Strings(readWriteBucketActions)
+ sort.Strings(readWriteObjectActions)
+ for _, statement := range statements {
+ for _, resource := range statement.Resources {
+ if resource == awsResourcePrefix+bucketName {
+ if subsetActions(readWriteBucketActions, statement.Actions) {
+ commonActions = true
+ continue
+ }
+ } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
+ if subsetActions(readWriteObjectActions, statement.Actions) {
+ readWrite = true
+ }
+ }
+ }
+ }
+ return commonActions && readWrite
+}
+
+// Verifies if we have write only policy set at bucketName, objectPrefix.
+func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPrefix string) bool {
+ var commonActions, writeOnly bool
+ sort.Strings(writeOnlyBucketActions)
+ sort.Strings(writeOnlyObjectActions)
+ for _, statement := range statements {
+ for _, resource := range statement.Resources {
+ if resource == awsResourcePrefix+bucketName {
+ if subsetActions(writeOnlyBucketActions, statement.Actions) {
+ commonActions = true
+ continue
+ }
+ } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
+ if subsetActions(writeOnlyObjectActions, statement.Actions) {
+ writeOnly = true
+ }
+ }
+ }
+ }
+ return commonActions && writeOnly
+}
+
+// Verifies if we have read only policy set at bucketName, objectPrefix.
+func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPrefix string) bool {
+ var commonActions, readOnly bool
+ sort.Strings(readOnlyBucketActions)
+ sort.Strings(readOnlyObjectActions)
+ for _, statement := range statements {
+ for _, resource := range statement.Resources {
+ if resource == awsResourcePrefix+bucketName {
+ if subsetActions(readOnlyBucketActions, statement.Actions) {
+ commonActions = true
+ continue
+ }
+ } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
+ if subsetActions(readOnlyObjectActions, statement.Actions) {
+ readOnly = true
+ break
+ }
+ }
+ }
+ }
+ return commonActions && readOnly
+}
+
+// Removes read write bucket policy if found.
+func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
+ var newStatements []Statement
+ for _, statement := range statements {
+ for _, resource := range statement.Resources {
+ if resource == awsResourcePrefix+bucketName {
+ var newActions []string
+ for _, action := range statement.Actions {
+ switch action {
+ case "s3:GetBucketLocation", "s3:ListBucket", "s3:ListBucketMultipartUploads":
+ continue
+ }
+ newActions = append(newActions, action)
+ }
+ statement.Actions = newActions
+ } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
+ var newActions []string
+ for _, action := range statement.Actions {
+ switch action {
+ case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject", "s3:GetObject":
+ continue
+ }
+ newActions = append(newActions, action)
+ }
+ statement.Actions = newActions
+ }
+ }
+ if len(statement.Actions) != 0 {
+ newStatements = append(newStatements, statement)
+ }
+ }
+ return newStatements
+}
+
+// Removes write only bucket policy if found.
+func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
+ var newStatements []Statement
+ for _, statement := range statements {
+ for _, resource := range statement.Resources {
+ if resource == awsResourcePrefix+bucketName {
+ var newActions []string
+ for _, action := range statement.Actions {
+ switch action {
+ case "s3:GetBucketLocation", "s3:ListBucketMultipartUploads":
+ continue
+ }
+ newActions = append(newActions, action)
+ }
+ statement.Actions = newActions
+ } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
+ var newActions []string
+ for _, action := range statement.Actions {
+ switch action {
+ case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject":
+ continue
+ }
+ newActions = append(newActions, action)
+ }
+ statement.Actions = newActions
+ }
+ }
+ if len(statement.Actions) != 0 {
+ newStatements = append(newStatements, statement)
+ }
+ }
+ return newStatements
+}
+
+// Removes read only bucket policy if found.
+func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
+ var newStatements []Statement
+ for _, statement := range statements {
+ for _, resource := range statement.Resources {
+ if resource == awsResourcePrefix+bucketName {
+ var newActions []string
+ for _, action := range statement.Actions {
+ switch action {
+ case "s3:GetBucketLocation", "s3:ListBucket":
+ continue
+ }
+ newActions = append(newActions, action)
+ }
+ statement.Actions = newActions
+ } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
+ var newActions []string
+ for _, action := range statement.Actions {
+ if action == "s3:GetObject" {
+ continue
+ }
+ newActions = append(newActions, action)
+ }
+ statement.Actions = newActions
+ }
+ }
+ if len(statement.Actions) != 0 {
+ newStatements = append(newStatements, statement)
+ }
+ }
+ return newStatements
+}
+
+// Remove bucket policies based on the type.
+func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement {
+ // Verify type of policy to be removed.
+ if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) {
+ statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix)
+ } else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) {
+ statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix)
+ } else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) {
+ statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix)
+ }
+ return statements
+}
+
+// Unmarshals bucket policy byte array into a structured bucket access policy.
+func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) {
+ // Untyped lazy JSON struct.
+ type bucketAccessPolicyUntyped struct {
+ Version string
+ Statement []struct {
+ Sid string
+ Effect string
+ Principal struct {
+ AWS json.RawMessage
+ }
+ Action json.RawMessage
+ Resource json.RawMessage
+ Condition map[string]map[string]string
+ }
+ }
+ var policyUntyped = bucketAccessPolicyUntyped{}
+ // Unmarshal incoming policy into an untyped structure, to be
+ // evaluated lazily later.
+ err := json.Unmarshal(bucketPolicyBuf, &policyUntyped)
+ if err != nil {
+ return BucketAccessPolicy{}, err
+ }
+ var policy = BucketAccessPolicy{}
+ policy.Version = policyUntyped.Version
+ for _, stmtUntyped := range policyUntyped.Statement {
+ statement := Statement{}
+ // These are properly typed messages.
+ statement.Sid = stmtUntyped.Sid
+ statement.Effect = stmtUntyped.Effect
+ statement.Conditions = stmtUntyped.Condition
+
+ // AWS user can have two different types, either as []string
+ // and either as regular 'string'. We fall back to doing this
+ // since there is no other easier way to fix this.
+ err = json.Unmarshal(stmtUntyped.Principal.AWS, &statement.Principal.AWS)
+ if err != nil {
+ var awsUser string
+ err = json.Unmarshal(stmtUntyped.Principal.AWS, &awsUser)
+ if err != nil {
+ return BucketAccessPolicy{}, err
+ }
+ statement.Principal.AWS = []string{awsUser}
+ }
+ // Actions can have two different types, either as []string
+ // and either as regular 'string'. We fall back to doing this
+ // since there is no other easier way to fix this.
+ err = json.Unmarshal(stmtUntyped.Action, &statement.Actions)
+ if err != nil {
+ var action string
+ err = json.Unmarshal(stmtUntyped.Action, &action)
+ if err != nil {
+ return BucketAccessPolicy{}, err
+ }
+ statement.Actions = []string{action}
+ }
+ // Resources can have two different types, either as []string
+ // and either as regular 'string'. We fall back to doing this
+ // since there is no other easier way to fix this.
+ err = json.Unmarshal(stmtUntyped.Resource, &statement.Resources)
+ if err != nil {
+ var resource string
+ err = json.Unmarshal(stmtUntyped.Resource, &resource)
+ if err != nil {
+ return BucketAccessPolicy{}, err
+ }
+ statement.Resources = []string{resource}
+ }
+ // Append the typed policy.
+ policy.Statements = append(policy.Statements, statement)
+ }
+ return policy, nil
+}
+
+// Identifies the policy type from policy Statements.
+func identifyPolicyType(policy BucketAccessPolicy, bucketName, objectPrefix string) (bucketPolicy BucketPolicy) {
+ if policy.Statements == nil {
+ return BucketPolicyNone
+ }
+ if isBucketPolicyReadWrite(policy.Statements, bucketName, objectPrefix) {
+ return BucketPolicyReadWrite
+ } else if isBucketPolicyWriteOnly(policy.Statements, bucketName, objectPrefix) {
+ return BucketPolicyWriteOnly
+ } else if isBucketPolicyReadOnly(policy.Statements, bucketName, objectPrefix) {
+ return BucketPolicyReadOnly
+ }
+ return BucketPolicyNone
+}
+
+// Generate policy statements for various bucket policies.
+// refer to http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
+// for more details about statement fields.
+func generatePolicyStatement(bucketPolicy BucketPolicy, bucketName, objectPrefix string) ([]Statement, error) {
+ if !bucketPolicy.isValidBucketPolicy() {
+ return []Statement{}, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
+ }
+ var statements []Statement
+ if bucketPolicy == BucketPolicyNone {
+ return []Statement{}, nil
+ } else if bucketPolicy == BucketPolicyReadWrite {
+ // Get read-write policy.
+ statements = setReadWriteStatement(bucketName, objectPrefix)
+ } else if bucketPolicy == BucketPolicyReadOnly {
+ // Get read only policy.
+ statements = setReadOnlyStatement(bucketName, objectPrefix)
+ } else if bucketPolicy == BucketPolicyWriteOnly {
+ // Return Write only policy.
+ statements = setWriteOnlyStatement(bucketName, objectPrefix)
+ }
+ return statements, nil
+}
+
+// Obtain statements for read-write BucketPolicy.
+func setReadWriteStatement(bucketName, objectPrefix string) []Statement {
+ bucketResourceStatement := Statement{}
+ objectResourceStatement := Statement{}
+ statements := []Statement{}
+
+ bucketResourceStatement.Effect = "Allow"
+ bucketResourceStatement.Principal.AWS = []string{"*"}
+ bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
+ bucketResourceStatement.Actions = readWriteBucketActions
+ objectResourceStatement.Effect = "Allow"
+ objectResourceStatement.Principal.AWS = []string{"*"}
+ objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
+ objectResourceStatement.Actions = readWriteObjectActions
+ // Save the read write policy.
+ statements = append(statements, bucketResourceStatement, objectResourceStatement)
+ return statements
+}
+
+// Obtain statements for read only BucketPolicy.
+func setReadOnlyStatement(bucketName, objectPrefix string) []Statement {
+ bucketResourceStatement := Statement{}
+ objectResourceStatement := Statement{}
+ statements := []Statement{}
+
+ bucketResourceStatement.Effect = "Allow"
+ bucketResourceStatement.Principal.AWS = []string{"*"}
+ bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
+ bucketResourceStatement.Actions = readOnlyBucketActions
+ objectResourceStatement.Effect = "Allow"
+ objectResourceStatement.Principal.AWS = []string{"*"}
+ objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
+ objectResourceStatement.Actions = readOnlyObjectActions
+ // Save the read only policy.
+ statements = append(statements, bucketResourceStatement, objectResourceStatement)
+ return statements
+}
+
+// Obtain statements for write only BucketPolicy.
+func setWriteOnlyStatement(bucketName, objectPrefix string) []Statement {
+ bucketResourceStatement := Statement{}
+ objectResourceStatement := Statement{}
+ statements := []Statement{}
+ // Write only policy.
+ bucketResourceStatement.Effect = "Allow"
+ bucketResourceStatement.Principal.AWS = []string{"*"}
+ bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
+ bucketResourceStatement.Actions = writeOnlyBucketActions
+ objectResourceStatement.Effect = "Allow"
+ objectResourceStatement.Principal.AWS = []string{"*"}
+ objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
+ objectResourceStatement.Actions = writeOnlyObjectActions
+ // Save the write only policy.
+ statements = append(statements, bucketResourceStatement, objectResourceStatement)
+ return statements
+}
diff --git a/vendor/src/github.com/minio/minio-go/bucket-policy_test.go b/vendor/src/github.com/minio/minio-go/bucket-policy_test.go
new file mode 100644
index 000000000..483da597a
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/bucket-policy_test.go
@@ -0,0 +1,515 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+// Validates bucket policy string.
+func TestIsValidBucketPolicy(t *testing.T) {
+ testCases := []struct {
+ inputPolicy BucketPolicy
+ expectedResult bool
+ }{
+ // valid inputs.
+ {BucketPolicy("none"), true},
+ {BucketPolicy("readonly"), true},
+ {BucketPolicy("readwrite"), true},
+ {BucketPolicy("writeonly"), true},
+ // invalid input.
+ {BucketPolicy("readwriteonly"), false},
+ {BucketPolicy("writeread"), false},
+ }
+
+ for i, testCase := range testCases {
+ actualResult := testCase.inputPolicy.isValidBucketPolicy()
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected IsValidBucket policy to be '%v' for policy \"%s\", but instead found it to be '%v'", i+1, testCase.expectedResult, testCase.inputPolicy, actualResult)
+ }
+ }
+}
+
+// Tests whether first array is completly contained in second array.
+func TestSubsetActions(t *testing.T) {
+ testCases := []struct {
+ firstArray []string
+ secondArray []string
+
+ expectedResult bool
+ }{
+ {[]string{"aaa", "bbb"}, []string{"ccc", "bbb"}, false},
+ {[]string{"aaa", "bbb"}, []string{"aaa", "ccc"}, false},
+ {[]string{"aaa", "bbb"}, []string{"aaa", "bbb"}, true},
+ {[]string{"aaa", "bbb"}, []string{"aaa", "bbb", "ccc"}, true},
+ {[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "ccc"}, false},
+ {[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "bbb", "aaa"}, true},
+ {[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb"}, false},
+ {[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "aaa", "bbb", "ccc"}, true},
+ }
+ for i, testCase := range testCases {
+ actualResult := subsetActions(testCase.firstArray, testCase.secondArray)
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: First array '%v' is not contained in second array '%v'", i+1, testCase.firstArray, testCase.secondArray)
+ }
+ }
+
+}
+
+// Tests validate Bucket Policy type identifier.
+func TestIdentifyPolicyType(t *testing.T) {
+ testCases := []struct {
+ inputPolicy BucketAccessPolicy
+ bucketName string
+ objName string
+
+ expectedPolicy BucketPolicy
+ }{
+ {BucketAccessPolicy{Version: "2012-10-17"}, "my-bucket", "", BucketPolicyNone},
+ }
+ for i, testCase := range testCases {
+ actualBucketPolicy := identifyPolicyType(testCase.inputPolicy, testCase.bucketName, testCase.objName)
+ if testCase.expectedPolicy != actualBucketPolicy {
+ t.Errorf("Test %d: Expected bucket policy to be '%v', but instead got '%v'", i+1, testCase.expectedPolicy, actualBucketPolicy)
+ }
+ }
+}
+
+// Test validate Resource Statement Generator.
+func TestGeneratePolicyStatement(t *testing.T) {
+
+ testCases := []struct {
+ bucketPolicy BucketPolicy
+ bucketName string
+ objectPrefix string
+ expectedStatements []Statement
+
+ shouldPass bool
+ err error
+ }{
+ {BucketPolicy("my-policy"), "my-bucket", "", []Statement{}, false, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", BucketPolicy("my-policy")))},
+ {BucketPolicyNone, "my-bucket", "", []Statement{}, true, nil},
+ {BucketPolicyReadOnly, "read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true, nil},
+ {BucketPolicyWriteOnly, "write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true, nil},
+ {BucketPolicyReadWrite, "read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true, nil},
+ }
+ for i, testCase := range testCases {
+ actualStatements, err := generatePolicyStatement(testCase.bucketPolicy, testCase.bucketName, testCase.objectPrefix)
+
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error())
+ }
+
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+ // Test passes as expected, but the output values are verified for correctness here.
+ if err == nil && testCase.shouldPass {
+ if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
+ t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
+ }
+ }
+ }
+}
+
+// Tests validating read only statement generator.
+func TestsetReadOnlyStatement(t *testing.T) {
+
+ expectedReadOnlyStatement := func(bucketName, objectPrefix string) []Statement {
+ bucketResourceStatement := &Statement{}
+ objectResourceStatement := &Statement{}
+ statements := []Statement{}
+
+ bucketResourceStatement.Effect = "Allow"
+ bucketResourceStatement.Principal.AWS = []string{"*"}
+ bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
+ bucketResourceStatement.Actions = readOnlyBucketActions
+ objectResourceStatement.Effect = "Allow"
+ objectResourceStatement.Principal.AWS = []string{"*"}
+ objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
+ objectResourceStatement.Actions = readOnlyObjectActions
+ // Save the read only policy.
+ statements = append(statements, *bucketResourceStatement, *objectResourceStatement)
+ return statements
+ }
+
+ testCases := []struct {
+ // inputs.
+ bucketName string
+ objectPrefix string
+ // expected result.
+ expectedStatements []Statement
+ }{
+ {"my-bucket", "", expectedReadOnlyStatement("my-bucket", "")},
+ {"my-bucket", "Asia/", expectedReadOnlyStatement("my-bucket", "Asia/")},
+ {"my-bucket", "Asia/India", expectedReadOnlyStatement("my-bucket", "Asia/India")},
+ }
+
+ for i, testCase := range testCases {
+ actualStaments := setReadOnlyStatement(testCase.bucketName, testCase.objectPrefix)
+ if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
+ t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
+ }
+ }
+}
+
+// Tests validating write only statement generator.
+func TestsetWriteOnlyStatement(t *testing.T) {
+
+ expectedWriteOnlyStatement := func(bucketName, objectPrefix string) []Statement {
+ bucketResourceStatement := &Statement{}
+ objectResourceStatement := &Statement{}
+ statements := []Statement{}
+ // Write only policy.
+ bucketResourceStatement.Effect = "Allow"
+ bucketResourceStatement.Principal.AWS = []string{"*"}
+ bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
+ bucketResourceStatement.Actions = writeOnlyBucketActions
+ objectResourceStatement.Effect = "Allow"
+ objectResourceStatement.Principal.AWS = []string{"*"}
+ objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
+ objectResourceStatement.Actions = writeOnlyObjectActions
+ // Save the write only policy.
+ statements = append(statements, *bucketResourceStatement, *objectResourceStatement)
+ return statements
+ }
+ testCases := []struct {
+ // inputs.
+ bucketName string
+ objectPrefix string
+ // expected result.
+ expectedStatements []Statement
+ }{
+ {"my-bucket", "", expectedWriteOnlyStatement("my-bucket", "")},
+ {"my-bucket", "Asia/", expectedWriteOnlyStatement("my-bucket", "Asia/")},
+ {"my-bucket", "Asia/India", expectedWriteOnlyStatement("my-bucket", "Asia/India")},
+ }
+
+ for i, testCase := range testCases {
+ actualStaments := setWriteOnlyStatement(testCase.bucketName, testCase.objectPrefix)
+ if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
+ t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
+ }
+ }
+}
+
+// Tests validating read-write statement generator.
+func TestsetReadWriteStatement(t *testing.T) {
+ // Obtain statements for read-write BucketPolicy.
+ expectedReadWriteStatement := func(bucketName, objectPrefix string) []Statement {
+ bucketResourceStatement := &Statement{}
+ objectResourceStatement := &Statement{}
+ statements := []Statement{}
+
+ bucketResourceStatement.Effect = "Allow"
+ bucketResourceStatement.Principal.AWS = []string{"*"}
+ bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
+ bucketResourceStatement.Actions = readWriteBucketActions
+ objectResourceStatement.Effect = "Allow"
+ objectResourceStatement.Principal.AWS = []string{"*"}
+ objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
+ objectResourceStatement.Actions = readWriteObjectActions
+ // Save the read write policy.
+ statements = append(statements, *bucketResourceStatement, *objectResourceStatement)
+ return statements
+ }
+
+ testCases := []struct {
+ // inputs.
+ bucketName string
+ objectPrefix string
+ // expected result.
+ expectedStatements []Statement
+ }{
+ {"my-bucket", "", expectedReadWriteStatement("my-bucket", "")},
+ {"my-bucket", "Asia/", expectedReadWriteStatement("my-bucket", "Asia/")},
+ {"my-bucket", "Asia/India", expectedReadWriteStatement("my-bucket", "Asia/India")},
+ }
+
+ for i, testCase := range testCases {
+ actualStaments := setReadWriteStatement(testCase.bucketName, testCase.objectPrefix)
+ if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
+ t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
+ }
+ }
+}
+
+// Tests validate Unmarshalling of BucketAccessPolicy.
+func TestUnMarshalBucketPolicy(t *testing.T) {
+
+ bucketAccesPolicies := []BucketAccessPolicy{
+ {Version: "1.0"},
+ {Version: "1.0", Statements: setReadOnlyStatement("minio-bucket", "")},
+ {Version: "1.0", Statements: setReadWriteStatement("minio-bucket", "Asia/")},
+ {Version: "1.0", Statements: setWriteOnlyStatement("minio-bucket", "Asia/India/")},
+ }
+
+ testCases := []struct {
+ inputPolicy BucketAccessPolicy
+ // expected results.
+ expectedPolicy BucketAccessPolicy
+ err error
+ // Flag indicating whether the test should pass.
+ shouldPass bool
+ }{
+ {bucketAccesPolicies[0], bucketAccesPolicies[0], nil, true},
+ {bucketAccesPolicies[1], bucketAccesPolicies[1], nil, true},
+ {bucketAccesPolicies[2], bucketAccesPolicies[2], nil, true},
+ {bucketAccesPolicies[3], bucketAccesPolicies[3], nil, true},
+ }
+ for i, testCase := range testCases {
+ inputPolicyBytes, e := json.Marshal(testCase.inputPolicy)
+ if e != nil {
+ t.Fatalf("Test %d: Couldn't Marshal bucket policy", i+1)
+ }
+ actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error())
+ }
+
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+ // Test passes as expected, but the output values are verified for correctness here.
+ if err == nil && testCase.shouldPass {
+ if !reflect.DeepEqual(testCase.expectedPolicy, actualAccessPolicy) {
+ t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
+ }
+ }
+ }
+}
+
+// Statement.Action, Statement.Resource, Statement.Principal.AWS fields could be just string also.
+// Setting these values to just a string and testing the unMarshalBucketPolicy
+func TestUnMarshalBucketPolicyUntyped(t *testing.T) {
+ obtainRaw := func(v interface{}, t *testing.T) []byte {
+ rawData, e := json.Marshal(v)
+ if e != nil {
+ t.Fatal(e.Error())
+ }
+ return rawData
+ }
+
+ type untypedStatement struct {
+ Sid string
+ Effect string
+ Principal struct {
+ AWS json.RawMessage
+ }
+ Action json.RawMessage
+ Resource json.RawMessage
+ Condition map[string]map[string]string
+ }
+
+ type bucketAccessPolicyUntyped struct {
+ Version string
+ Statement []untypedStatement
+ }
+
+ statements := setReadOnlyStatement("my-bucket", "Asia/")
+ expectedBucketPolicy := BucketAccessPolicy{Statements: statements}
+ accessPolicyUntyped := bucketAccessPolicyUntyped{}
+ accessPolicyUntyped.Statement = make([]untypedStatement, 2)
+
+ accessPolicyUntyped.Statement[0].Effect = statements[0].Effect
+ accessPolicyUntyped.Statement[0].Principal.AWS = obtainRaw(statements[0].Principal.AWS, t)
+ accessPolicyUntyped.Statement[0].Action = obtainRaw(statements[0].Actions, t)
+ accessPolicyUntyped.Statement[0].Resource = obtainRaw(statements[0].Resources, t)
+
+ // Setting the values are strings.
+ accessPolicyUntyped.Statement[1].Effect = statements[1].Effect
+ accessPolicyUntyped.Statement[1].Principal.AWS = obtainRaw(statements[1].Principal.AWS[0], t)
+ accessPolicyUntyped.Statement[1].Action = obtainRaw(statements[1].Actions[0], t)
+ accessPolicyUntyped.Statement[1].Resource = obtainRaw(statements[1].Resources[0], t)
+
+ inputPolicyBytes := obtainRaw(accessPolicyUntyped, t)
+ actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)
+ if err != nil {
+ t.Fatal("Unmarshalling bucket policy from untyped statements failed")
+ }
+ if !reflect.DeepEqual(expectedBucketPolicy, actualAccessPolicy) {
+ t.Errorf("Expected BucketPolicy after unmarshalling untyped statements doesn't match the actual one")
+ }
+}
+
+// Tests validate removal of policy statement from the list of statements.
+func TestRemoveBucketPolicyStatement(t *testing.T) {
+ testCases := []struct {
+ bucketName string
+ objectPrefix string
+ inputStatements []Statement
+ }{
+ {"my-bucket", "", []Statement{}},
+ {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", "")},
+ {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", "")},
+ {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", "")},
+ }
+ for i, testCase := range testCases {
+ actualStatements := removeBucketPolicyStatement(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
+ // empty statement is expected after the invocation of removeBucketPolicyStatement().
+ if len(actualStatements) != 0 {
+ t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
+ }
+ }
+}
+
+// Tests validate removing of read only bucket statement.
+func TestRemoveBucketPolicyStatementReadOnly(t *testing.T) {
+ var emptyStatement []Statement
+ testCases := []struct {
+ bucketName string
+ objectPrefix string
+ inputStatements []Statement
+ expectedStatements []Statement
+ }{
+ {"my-bucket", "", []Statement{}, emptyStatement},
+ {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement},
+ }
+ for i, testCase := range testCases {
+ actualStatements := removeBucketPolicyStatementReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
+ // empty statement is expected after the invocation of removeBucketPolicyStatement().
+ if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
+ t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
+ }
+ }
+}
+
+// Tests validate removing of write only bucket statement.
+func TestRemoveBucketPolicyStatementWriteOnly(t *testing.T) {
+ var emptyStatement []Statement
+ testCases := []struct {
+ bucketName string
+ objectPrefix string
+ inputStatements []Statement
+ expectedStatements []Statement
+ }{
+ {"my-bucket", "", []Statement{}, emptyStatement},
+ {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement},
+ }
+ for i, testCase := range testCases {
+ actualStatements := removeBucketPolicyStatementWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
+ // empty statement is expected after the invocation of removeBucketPolicyStatement().
+ if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
+ t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
+ }
+ }
+}
+
+// Tests validate removing of read-write bucket statement.
+func TestRemoveBucketPolicyStatementReadWrite(t *testing.T) {
+ var emptyStatement []Statement
+ testCases := []struct {
+ bucketName string
+ objectPrefix string
+ inputStatements []Statement
+ expectedStatements []Statement
+ }{
+ {"my-bucket", "", []Statement{}, emptyStatement},
+ {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement},
+ }
+ for i, testCase := range testCases {
+ actualStatements := removeBucketPolicyStatementReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
+ // empty statement is expected after the invocation of removeBucketPolicyStatement().
+ if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
+ t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
+ }
+ }
+}
+
+// Tests validate whether the bucket policy is read only.
+func TestIsBucketPolicyReadOnly(t *testing.T) {
+ testCases := []struct {
+ bucketName string
+ objectPrefix string
+ inputStatements []Statement
+ // expected result.
+ expectedResult bool
+ }{
+ {"my-bucket", "", []Statement{}, false},
+ {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true},
+ {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
+ {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
+ }
+ for i, testCase := range testCases {
+ actualResult := isBucketPolicyReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
+ // empty statement is expected after the invocation of removeBucketPolicyStatement().
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
+ }
+ }
+}
+
+// Tests validate whether the bucket policy is read-write.
+func TestIsBucketPolicyReadWrite(t *testing.T) {
+ testCases := []struct {
+ bucketName string
+ objectPrefix string
+ inputStatements []Statement
+ // expected result.
+ expectedResult bool
+ }{
+ {"my-bucket", "", []Statement{}, false},
+ {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
+ {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
+ {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
+ }
+ for i, testCase := range testCases {
+ actualResult := isBucketPolicyReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
+ // empty statement is expected after the invocation of removeBucketPolicyStatement().
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
+ }
+ }
+}
+
+// Tests validate whether the bucket policy is read only.
+func TestIsBucketPolicyWriteOnly(t *testing.T) {
+ testCases := []struct {
+ bucketName string
+ objectPrefix string
+ inputStatements []Statement
+ // expected result.
+ expectedResult bool
+ }{
+ {"my-bucket", "", []Statement{}, false},
+ {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
+ {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true},
+ {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
+ }
+ for i, testCase := range testCases {
+ actualResult := isBucketPolicyWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
+ // empty statement is expected after the invocation of removeBucketPolicyStatement().
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
+ }
+ }
+}
diff --git a/vendor/src/github.com/minio/minio-go/copy-conditions.go b/vendor/src/github.com/minio/minio-go/copy-conditions.go
new file mode 100644
index 000000000..9dd63f65e
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/copy-conditions.go
@@ -0,0 +1,97 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "time"
+)
+
+// copyCondition explanation:
+// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
+//
+// Example:
+//
+// copyCondition {
+// key: "x-amz-copy-if-modified-since",
+// value: "Tue, 15 Nov 1994 12:45:26 GMT",
+// }
+//
+type copyCondition struct {
+ key string
+ value string
+}
+
+// CopyConditions - copy conditions.
+type CopyConditions struct {
+ conditions []copyCondition
+}
+
+// NewCopyConditions - Instantiate new list of conditions.
+func NewCopyConditions() CopyConditions {
+ return CopyConditions{
+ conditions: make([]copyCondition, 0),
+ }
+}
+
+// SetMatchETag - set match etag.
+func (c CopyConditions) SetMatchETag(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ c.conditions = append(c.conditions, copyCondition{
+ key: "x-amz-copy-source-if-match",
+ value: etag,
+ })
+ return nil
+}
+
+// SetMatchETagExcept - set match etag except.
+func (c CopyConditions) SetMatchETagExcept(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ c.conditions = append(c.conditions, copyCondition{
+ key: "x-amz-copy-source-if-none-match",
+ value: etag,
+ })
+ return nil
+}
+
+// SetUnmodified - set unmodified time since.
+func (c CopyConditions) SetUnmodified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Modified since cannot be empty.")
+ }
+ c.conditions = append(c.conditions, copyCondition{
+ key: "x-amz-copy-source-if-unmodified-since",
+ value: modTime.Format(http.TimeFormat),
+ })
+ return nil
+}
+
+// SetModified - set modified time since.
+func (c CopyConditions) SetModified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Modified since cannot be empty.")
+ }
+ c.conditions = append(c.conditions, copyCondition{
+ key: "x-amz-copy-source-if-modified-since",
+ value: modTime.Format(http.TimeFormat),
+ })
+ return nil
+}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/bucketexists.go b/vendor/src/github.com/minio/minio-go/examples/play/bucketexists.go
deleted file mode 100644
index 0629d0a2d..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/bucketexists.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname is a dummy value, please replace them with original value.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- err = s3Client.BucketExists("my-bucketname")
- if err != nil {
- log.Fatalln(err)
- }
-
- log.Println("Success")
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/fgetobject.go b/vendor/src/github.com/minio/minio-go/examples/play/fgetobject.go
deleted file mode 100644
index 57856a578..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/fgetobject.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- if err := s3Client.FGetObject("bucket-name", "objectName", "fileName.csv"); err != nil {
- log.Fatalln(err)
- }
- log.Println("Successfully saved fileName.csv")
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/fputobject.go b/vendor/src/github.com/minio/minio-go/examples/play/fputobject.go
deleted file mode 100644
index 5f85b5c07..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/fputobject.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname, my-objectname and my-filename.csv are dummy values, please replace them with original values.
-
- // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
- // This boolean value is the last argument for New().
-
- // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
- // determined based on the Endpoint value.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
- log.Fatalln(err)
- }
- log.Println("Successfully uploaded my-filename.csv")
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/getbucketacl.go b/vendor/src/github.com/minio/minio-go/examples/play/getbucketacl.go
deleted file mode 100644
index 202baa3a3..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/getbucketacl.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname is a dummy value, please replace them with original value.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- acl, err := s3Client.GetBucketACL("my-bucketname")
- if err != nil {
- log.Fatalln(err)
- }
- log.Println(acl)
-
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/listbuckets.go b/vendor/src/github.com/minio/minio-go/examples/play/listbuckets.go
deleted file mode 100644
index b5e505ccc..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/listbuckets.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- buckets, err := s3Client.ListBuckets()
- if err != nil {
- log.Fatalln(err)
- }
- for _, bucket := range buckets {
- log.Println(bucket)
- }
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go b/vendor/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go
deleted file mode 100644
index 23219f21c..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/listincompleteuploads.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname and my-prefixname are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- // Create a done channel to control 'ListObjects' go routine.
- doneCh := make(chan struct{})
-
- // Indicate to our routine to exit cleanly upon return.
- defer close(doneCh)
-
- // List all multipart uploads from a bucket-name with a matching prefix.
- for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) {
- if multipartObject.Err != nil {
- fmt.Println(multipartObject.Err)
- return
- }
- fmt.Println(multipartObject)
- }
- return
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/listobjects.go b/vendor/src/github.com/minio/minio-go/examples/play/listobjects.go
deleted file mode 100644
index eaa57e9e1..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/listobjects.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname and my-prefixname are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- // Create a done channel to control 'ListObjects' go routine.
- doneCh := make(chan struct{})
-
- // Indicate to our routine to exit cleanly upon return.
- defer close(doneCh)
-
- // List all objects from a bucket-name with a matching prefix.
- for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
- if object.Err != nil {
- fmt.Println(object.Err)
- return
- }
- fmt.Println(object)
- }
- return
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/makebucket.go b/vendor/src/github.com/minio/minio-go/examples/play/makebucket.go
deleted file mode 100644
index 52bebf1a5..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/makebucket.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname is a dummy value, please replace them with original value.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1")
- if err != nil {
- log.Fatalln(err)
- }
- log.Println("Success")
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/presignedgetobject.go b/vendor/src/github.com/minio/minio-go/examples/play/presignedgetobject.go
deleted file mode 100644
index 2ba878a97..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/presignedgetobject.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
- "time"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
- if err != nil {
- log.Fatalln(err)
- }
- log.Println(presignedURL)
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go b/vendor/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go
deleted file mode 100644
index 65fa66ddf..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/presignedpostpolicy.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "fmt"
- "log"
- "time"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- policy := minio.NewPostPolicy()
- policy.SetBucket("my-bucketname")
- policy.SetKey("my-objectname")
- policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
- m, err := s3Client.PresignedPostPolicy(policy)
- if err != nil {
- log.Fatalln(err)
- }
- fmt.Printf("curl ")
- for k, v := range m {
- fmt.Printf("-F %s=%s ", k, v)
- }
- fmt.Printf("-F file=@/etc/bashrc ")
- fmt.Printf("https://play.minio.io:9002/my-bucketname\n")
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/presignedputobject.go b/vendor/src/github.com/minio/minio-go/examples/play/presignedputobject.go
deleted file mode 100644
index b55f721f7..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/presignedputobject.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
- "time"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
- if err != nil {
- log.Fatalln(err)
- }
- log.Println(presignedURL)
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/putobject.go b/vendor/src/github.com/minio/minio-go/examples/play/putobject.go
deleted file mode 100644
index 073f75870..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/putobject.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
- "os"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- object, err := os.Open("my-testfile")
- if err != nil {
- log.Fatalln(err)
- }
- defer object.Close()
-
- n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
- if err != nil {
- log.Fatalln(err)
- }
- log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/removebucket.go b/vendor/src/github.com/minio/minio-go/examples/play/removebucket.go
deleted file mode 100644
index 1d2d03ba3..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/removebucket.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname is a dummy value, please replace them with original value.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
- // This operation will only work if your bucket is empty.
- err = s3Client.RemoveBucket("my-bucketname")
- if err != nil {
- log.Fatalln(err)
- }
- log.Println("Success")
-
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go b/vendor/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go
deleted file mode 100644
index 458a4c450..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/removeincompleteupload.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") {
- if err != nil {
- log.Fatalln(err)
- }
- }
- log.Println("Success")
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/removeobject.go b/vendor/src/github.com/minio/minio-go/examples/play/removeobject.go
deleted file mode 100644
index 2301a77de..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/removeobject.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
- err = s3Client.RemoveObject("my-bucketname", "my-objectname")
- if err != nil {
- log.Fatalln(err)
- }
- log.Println("Success")
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/setbucketacl.go b/vendor/src/github.com/minio/minio-go/examples/play/setbucketacl.go
deleted file mode 100644
index 7893018f7..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/setbucketacl.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname is a dummy value, please replace them with original value.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
-
- err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write"))
- if err != nil {
- log.Fatalln(err)
- }
-
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/statobject.go b/vendor/src/github.com/minio/minio-go/examples/play/statobject.go
deleted file mode 100644
index 8f24460ab..000000000
--- a/vendor/src/github.com/minio/minio-go/examples/play/statobject.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
- "log"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- // Note: my-bucketname and my-objectname are dummy values, please replace them with original values.
-
- // Requests are always secure by default. set inSecure=true to enable insecure access.
- // inSecure boolean is the last argument for New().
-
- // New provides a client object backend by automatically detected signature type based
- // on the provider.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
- if err != nil {
- log.Fatalln(err)
- }
- stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
- if err != nil {
- log.Fatalln(err)
- }
- log.Println(stat)
-}
diff --git a/vendor/src/github.com/minio/minio-go/examples/s3/copyobject.go b/vendor/src/github.com/minio/minio-go/examples/s3/copyobject.go
new file mode 100644
index 000000000..5517c2e98
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/examples/s3/copyobject.go
@@ -0,0 +1,67 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Enable trace.
+ // s3Client.TraceOn(os.Stderr)
+
+ // All following conditions are allowed and can be combined together.
+
+ // Set copy conditions.
+ var copyConds = minio.NewCopyConditions()
+ // Set modified condition, copy object modified since 2014 April.
+ copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+
+ // Set unmodified condition, copy object unmodified since 2014 April.
+ // copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+
+ // Set matching ETag condition, copy object which matches the following ETag.
+ // copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+
+ // Set matching ETag except condition, copy object which does not match the following ETag.
+ // copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
+
+ // Initiate copy object.
+ err = s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Copied source object /my-sourcebucketname/my-sourceobjectname to destination /my-bucketname/my-objectname Successfully.")
+}
diff --git a/vendor/src/github.com/minio/minio-go/examples/play/getobject.go b/vendor/src/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
similarity index 61%
rename from vendor/src/github.com/minio/minio-go/examples/play/getobject.go
rename to vendor/src/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
index 96b584253..843954f49 100644
--- a/vendor/src/github.com/minio/minio-go/examples/play/getobject.go
+++ b/vendor/src/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
@@ -1,7 +1,7 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,44 +19,35 @@
package main
import (
- "io"
"log"
- "os"
"github.com/minio/minio-go"
)
func main() {
- // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
- s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
- reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ // s3Client.TraceOn(os.Stderr)
+
+ policy, err := s3Client.GetBucketPolicy("my-bucketname", "my-objectprefix")
if err != nil {
log.Fatalln(err)
}
- defer reader.Close()
-
- localFile, err := os.Create("my-testfile")
- if err != nil {
- log.Fatalln(err)
- }
- defer localfile.Close()
-
- stat, err := reader.Stat()
- if err != nil {
- log.Fatalln(err)
- }
-
- if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
- log.Fatalln(err)
- }
+ // Description of policy output.
+ // "none" - The specified bucket does not have a bucket policy.
+ // "readonly" - Read only operatoins are allowed.
+ // "writeonly" - Write only operations are allowed.
+ // "readwrite" - both read and write operations are allowed, the bucket is public.
+ log.Println("Success - ", policy)
}
diff --git a/vendor/src/github.com/minio/minio-go/examples/s3/getobject.go b/vendor/src/github.com/minio/minio-go/examples/s3/getobject.go
index 9413dc5e5..f49c71223 100644
--- a/vendor/src/github.com/minio/minio-go/examples/s3/getobject.go
+++ b/vendor/src/github.com/minio/minio-go/examples/s3/getobject.go
@@ -50,7 +50,7 @@ func main() {
if err != nil {
log.Fatalln(err)
}
- defer localfile.Close()
+ defer localFile.Close()
stat, err := reader.Stat()
if err != nil {
diff --git a/vendor/src/github.com/minio/minio-go/examples/s3/makebucket.go b/vendor/src/github.com/minio/minio-go/examples/s3/makebucket.go
index 22f9e18f2..02a09e553 100644
--- a/vendor/src/github.com/minio/minio-go/examples/s3/makebucket.go
+++ b/vendor/src/github.com/minio/minio-go/examples/s3/makebucket.go
@@ -38,7 +38,7 @@ func main() {
log.Fatalln(err)
}
- err = s3Client.MakeBucket("my-bucketname", minio.BucketACL("private"), "us-east-1")
+ err = s3Client.MakeBucket("my-bucketname", "us-east-1")
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go b/vendor/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go
index 08929cdc0..117f2ec23 100644
--- a/vendor/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go
+++ b/vendor/src/github.com/minio/minio-go/examples/s3/presignedgetobject.go
@@ -20,6 +20,7 @@ package main
import (
"log"
+ "net/url"
"time"
"github.com/minio/minio-go"
@@ -39,7 +40,12 @@ func main() {
log.Fatalln(err)
}
- presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
+ // Set request parameters
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+ // Gernerate presigned get object url.
+ presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams)
if err != nil {
log.Fatalln(err)
}
diff --git a/vendor/src/github.com/minio/minio-go/examples/s3/getbucketacl.go b/vendor/src/github.com/minio/minio-go/examples/s3/removebucketpolicy.go
similarity index 91%
rename from vendor/src/github.com/minio/minio-go/examples/s3/getbucketacl.go
rename to vendor/src/github.com/minio/minio-go/examples/s3/removebucketpolicy.go
index 24991df0c..141f3c678 100644
--- a/vendor/src/github.com/minio/minio-go/examples/s3/getbucketacl.go
+++ b/vendor/src/github.com/minio/minio-go/examples/s3/removebucketpolicy.go
@@ -1,7 +1,7 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,10 +38,9 @@ func main() {
log.Fatalln(err)
}
- acl, err := s3Client.GetBucketACL("my-bucketname")
+ err = s3Client.RemoveBucketPolicy("my-bucketname", "my-objectprefix")
if err != nil {
log.Fatalln(err)
}
- log.Println(acl)
-
+ log.Println("Success")
}
diff --git a/vendor/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go b/vendor/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
index 8b7533472..0990f662c 100644
--- a/vendor/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
+++ b/vendor/src/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
@@ -38,10 +38,9 @@ func main() {
log.Fatalln(err)
}
- for err := range s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname") {
- if err != nil {
- log.Fatalln(err)
- }
+ err = s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname")
+ if err != nil {
+ log.Fatalln(err)
}
log.Println("Success")
}
diff --git a/vendor/src/github.com/minio/minio-go/examples/s3/setbucketacl.go b/vendor/src/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
similarity index 85%
rename from vendor/src/github.com/minio/minio-go/examples/s3/setbucketacl.go
rename to vendor/src/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
index 59fb10ef7..8f29db13c 100644
--- a/vendor/src/github.com/minio/minio-go/examples/s3/setbucketacl.go
+++ b/vendor/src/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
@@ -1,7 +1,7 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,8 +25,8 @@ import (
)
func main() {
- // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname
- // are dummy values, please replace them with original values.
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
@@ -38,9 +38,9 @@ func main() {
log.Fatalln(err)
}
- err = s3Client.SetBucketACL("my-bucketname", minio.BucketACL("public-read-write"))
+ err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", minio.BucketPolicyReadWrite)
if err != nil {
log.Fatalln(err)
}
-
+ log.Println("Success")
}
diff --git a/vendor/src/github.com/minio/minio-go/hook-reader.go b/vendor/src/github.com/minio/minio-go/hook-reader.go
index 043425f23..bc9ece049 100644
--- a/vendor/src/github.com/minio/minio-go/hook-reader.go
+++ b/vendor/src/github.com/minio/minio-go/hook-reader.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,6 +27,22 @@ type hookReader struct {
hook io.Reader
}
+// Seek implements io.Seeker. Seeks source first, and if necessary
+// seeks hook if Seek method is appropriately found.
+func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
+ // Verify for source has embedded Seeker, use it.
+ sourceSeeker, ok := hr.source.(io.Seeker)
+ if ok {
+ return sourceSeeker.Seek(offset, whence)
+ }
+ // Verify if hook has embedded Seeker, use it.
+ hookSeeker, ok := hr.hook.(io.Seeker)
+ if ok {
+ return hookSeeker.Seek(offset, whence)
+ }
+ return n, nil
+}
+
// Read implements io.Reader. Always reads from the source, the return
// value 'n' number of bytes are reported through the hook. Returns
// error for all non io.EOF conditions.
@@ -44,7 +60,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) {
return n, err
}
-// newHook returns a io.Reader which implements hookReader that
+// newHook returns a io.ReadSeeker which implements hookReader that
// reports the data read from the source to the hook.
func newHook(source, hook io.Reader) io.Reader {
if hook == nil {
diff --git a/vendor/src/github.com/minio/minio-go/request-signature-v2.go b/vendor/src/github.com/minio/minio-go/request-signature-v2.go
index aa0fc9f91..c14ce2aab 100644
--- a/vendor/src/github.com/minio/minio-go/request-signature-v2.go
+++ b/vendor/src/github.com/minio/minio-go/request-signature-v2.go
@@ -73,6 +73,11 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
// Get encoded URL path.
path := encodeURL2Path(req.URL)
+ if len(req.URL.Query()) > 0 {
+ // Keep the usual queries unescaped for string to sign.
+ query, _ := url.QueryUnescape(queryEncode(req.URL.Query()))
+ path = path + "?" + query
+ }
// Find epoch expires when the request will expire.
epochExpires := d.Unix() + expires
@@ -93,12 +98,16 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
query.Set("AWSAccessKeyId", accessKeyID)
}
- // Fill in Expires and Signature for presigned query.
+ // Fill in Expires for presigned query.
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
- query.Set("Signature", signature)
// Encode query and save.
- req.URL.RawQuery = query.Encode()
+ req.URL.RawQuery = queryEncode(query)
+
+ // Save signature finally.
+ req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
+
+ // Return.
return &req
}
@@ -115,7 +124,7 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
//
// StringToSign = HTTP-Verb + "\n" +
-// Content-MD5 + "\n" +
+// Content-Md5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
@@ -163,7 +172,7 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
// From the Amazon docs:
//
// StringToSign = HTTP-Verb + "\n" +
-// Content-MD5 + "\n" +
+// Content-Md5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
@@ -183,7 +192,7 @@ func getStringToSignV2(req http.Request) string {
func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
buf.WriteString(req.Method)
buf.WriteByte('\n')
- buf.WriteString(req.Header.Get("Content-MD5"))
+ buf.WriteString(req.Header.Get("Content-Md5"))
buf.WriteByte('\n')
buf.WriteString(req.Header.Get("Content-Type"))
buf.WriteByte('\n')
@@ -226,7 +235,8 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
}
}
-// Must be sorted:
+// The following list is already sorted and should always be, otherwise we could
+// have signature-related issues
var resourceList = []string{
"acl",
"location",
@@ -234,13 +244,13 @@ var resourceList = []string{
"notification",
"partNumber",
"policy",
- "response-content-type",
- "response-content-language",
- "response-expires",
+ "requestPayment",
"response-cache-control",
"response-content-disposition",
"response-content-encoding",
- "requestPayment",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
"torrent",
"uploadId",
"uploads",
@@ -262,7 +272,6 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
path := encodeURL2Path(requestURL)
buf.WriteString(path)
- sort.Strings(resourceList)
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
@@ -283,7 +292,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
// Request parameters
if len(vv[0]) > 0 {
buf.WriteByte('=')
- buf.WriteString(url.QueryEscape(vv[0]))
+ buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1))
}
}
}
diff --git a/vendor/src/github.com/minio/minio-go/request-signature-v2_test.go b/vendor/src/github.com/minio/minio-go/request-signature-v2_test.go
new file mode 100644
index 000000000..6d861fb81
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/request-signature-v2_test.go
@@ -0,0 +1,35 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "sort"
+ "testing"
+)
+
+// Tests for 'func TestResourceListSorting(t *testing.T)'.
+func TestResourceListSorting(t *testing.T) {
+ sortedResourceList := make([]string, len(resourceList))
+ copy(sortedResourceList, resourceList)
+ sort.Strings(sortedResourceList)
+ for i := 0; i < len(resourceList); i++ {
+ if resourceList[i] != sortedResourceList[i] {
+ t.Errorf("Expected resourceList[%d] = \"%s\", resourceList is not correctly sorted.", i, sortedResourceList[i])
+ break
+ }
+ }
+}
diff --git a/vendor/src/github.com/minio/minio-go/retry.go b/vendor/src/github.com/minio/minio-go/retry.go
new file mode 100644
index 000000000..d9fbe12f5
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/retry.go
@@ -0,0 +1,121 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// MaxRetry is the maximum number of retries before stopping.
+var MaxRetry = 5
+
+// MaxJitter will randomize over the full exponential backoff time
+const MaxJitter = 1.0
+
+// NoJitter disables the use of jitter for randomizing the exponential backoff time
+const NoJitter = 0.0
+
+// newRetryTimer creates a timer with exponentially increasing delays
+// until the maximum retry attempts are reached.
+func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int {
+ attemptCh := make(chan int)
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ //sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1< cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ for i := 0; i < maxRetry; i++ {
+ attemptCh <- i + 1 // Attempts start from 1.
+ time.Sleep(exponentialBackoffWait(i))
+ }
+ }()
+ return attemptCh
+}
+
+// isNetErrorRetryable - is network error retryable.
+func isNetErrorRetryable(err error) bool {
+ switch err.(type) {
+ case *net.DNSError, *net.OpError, net.UnknownNetworkError:
+ return true
+ case *url.Error:
+ // For a URL error, where it replies back "connection closed"
+ // retry again.
+ if strings.Contains(err.Error(), "Connection closed by foreign host") {
+ return true
+ }
+ }
+ return false
+}
+
+// List of AWS S3 error codes which are retryable.
+var retryableS3Codes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "InternalError": {},
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ // Add more AWS S3 codes here.
+}
+
+// isS3CodeRetryable - is s3 error code retryable.
+func isS3CodeRetryable(s3Code string) (ok bool) {
+ _, ok = retryableS3Codes[s3Code]
+ return ok
+}
+
+// List of HTTP status codes which are retryable.
+var retryableHTTPStatusCodes = map[int]struct{}{
+ 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
+ http.StatusInternalServerError: {},
+ http.StatusBadGateway: {},
+ http.StatusServiceUnavailable: {},
+ // Add more HTTP status codes here.
+}
+
+// isHTTPStatusRetryable - is HTTP error code retryable.
+func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
+ _, ok = retryableHTTPStatusCodes[httpStatusCode]
+ return ok
+}
diff --git a/vendor/src/github.com/minio/minio-go/s3-endpoints.go b/vendor/src/github.com/minio/minio-go/s3-endpoints.go
index 8c9ff5e88..a46b5e335 100644
--- a/vendor/src/github.com/minio/minio-go/s3-endpoints.go
+++ b/vendor/src/github.com/minio/minio-go/s3-endpoints.go
@@ -17,6 +17,7 @@
package minio
// awsS3EndpointMap Amazon S3 endpoint map.
+// "cn-north-1" adds support for AWS China.
var awsS3EndpointMap = map[string]string{
"us-east-1": "s3.amazonaws.com",
"us-west-2": "s3-us-west-2.amazonaws.com",
@@ -27,6 +28,7 @@ var awsS3EndpointMap = map[string]string{
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
"sa-east-1": "s3-sa-east-1.amazonaws.com",
+ "cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
}
// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
diff --git a/vendor/src/github.com/minio/minio-go/utils.go b/vendor/src/github.com/minio/minio-go/utils.go
index 63966c30c..104665849 100644
--- a/vendor/src/github.com/minio/minio-go/utils.go
+++ b/vendor/src/github.com/minio/minio-go/utils.go
@@ -17,7 +17,9 @@
package minio
import (
+ "bytes"
"crypto/hmac"
+ "crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
@@ -27,6 +29,7 @@ import (
"net/http"
"net/url"
"regexp"
+ "sort"
"strings"
"time"
"unicode/utf8"
@@ -45,6 +48,13 @@ func sum256(data []byte) []byte {
return hash.Sum(nil)
}
+// sumMD5 calculate sumMD5 sum for an input byte array.
+func sumMD5(data []byte) []byte {
+ hash := md5.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
@@ -163,6 +173,23 @@ func isAmazonEndpoint(endpointURL *url.URL) bool {
if endpointURL.Host == "s3.amazonaws.com" {
return true
}
+ if isAmazonChinaEndpoint(endpointURL) {
+ return true
+ }
+ return false
+}
+
+// Match if it is exactly Amazon S3 China endpoint.
+// Customers who wish to use the new Beijing Region are required to sign up for a separate set of account credentials unique to the China (Beijing) Region.
+// Customers with existing AWS credentials will not be able to access resources in the new Region, and vice versa."
+// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
+func isAmazonChinaEndpoint(endpointURL *url.URL) bool {
+ if endpointURL == nil {
+ return false
+ }
+ if endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn" {
+ return true
+ }
return false
}
@@ -183,7 +210,7 @@ func isValidEndpointURL(endpointURL *url.URL) error {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
if endpointURL.Path != "/" && endpointURL.Path != "" {
- return ErrInvalidArgument("Endpoing url cannot have fully qualified paths.")
+ return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
if !isAmazonEndpoint(endpointURL) {
@@ -229,7 +256,7 @@ func isValidBucketName(bucketName string) error {
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
}
- if match, _ := regexp.MatchString("\\.\\.", bucketName); match == true {
+ if match, _ := regexp.MatchString("\\.\\.", bucketName); match {
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
}
if !validBucketName.MatchString(bucketName) {
@@ -264,6 +291,31 @@ func isValidObjectPrefix(objectPrefix string) error {
return nil
}
+// queryEncode - encodes query values in their URL encoded form.
+func queryEncode(v url.Values) string {
+ if v == nil {
+ return ""
+ }
+ var buf bytes.Buffer
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ prefix := urlEncodePath(k) + "="
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(urlEncodePath(v))
+ }
+ }
+ return buf.String()
+}
+
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
diff --git a/vendor/src/github.com/minio/minio-go/utils_test.go b/vendor/src/github.com/minio/minio-go/utils_test.go
new file mode 100644
index 000000000..045ec85da
--- /dev/null
+++ b/vendor/src/github.com/minio/minio-go/utils_test.go
@@ -0,0 +1,430 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package minio
+
+import (
+ "fmt"
+ "net/url"
+ "testing"
+ "time"
+)
+
+// Tests for 'getEndpointURL(endpoint string, inSecure bool)'.
+func TestGetEndpointURL(t *testing.T) {
+ testCases := []struct {
+ // Inputs.
+ endPoint string
+ inSecure bool
+
+ // Expected result.
+ result string
+ err error
+ // Flag indicating whether the test is expected to pass or not.
+ shouldPass bool
+ }{
+ {"s3.amazonaws.com", false, "https://s3.amazonaws.com", nil, true},
+ {"s3.cn-north-1.amazonaws.com.cn", false, "https://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"s3.amazonaws.com", true, "http://s3.amazonaws.com", nil, true},
+ {"s3.cn-north-1.amazonaws.com.cn", true, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"192.168.1.1:9000", true, "http://192.168.1.1:9000", nil, true},
+ {"192.168.1.1:9000", false, "https://192.168.1.1:9000", nil, true},
+ {"192.168.1.1::9000", false, "", fmt.Errorf("too many colons in address %s", "192.168.1.1::9000"), false},
+ {"13333.123123.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
+ {"13333.123123.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
+ {"s3.amazonaws.com:443", false, "", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
+ {"storage.googleapis.com:4000", false, "", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
+ {"s3.aamzza.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-"), false},
+ {"", false, "", fmt.Errorf("Endpoint: does not follow ip address or domain name standards."), false},
+ }
+
+ for i, testCase := range testCases {
+ result, err := getEndpointURL(testCase.endPoint, testCase.inSecure)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ // Test passes as expected, but the output values are verified for correctness here.
+ if err == nil && testCase.shouldPass {
+ if testCase.result != result.String() {
+ t.Errorf("Test %d: Expected the result Url to be \"%s\", but found \"%s\" instead", i+1, testCase.result, result.String())
+ }
+ }
+ }
+}
+
+// Tests for 'isValidDomain(host string) bool'.
+func TestIsValidDomain(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ host string
+ // Expected result.
+ result bool
+ }{
+ {"s3.amazonaws.com", true},
+ {"s3.cn-north-1.amazonaws.com.cn", true},
+ {"s3.amazonaws.com_", false},
+ {"%$$$", false},
+ {"s3.amz.test.com", true},
+ {"s3.%%", false},
+ {"localhost", true},
+ {"-localhost", false},
+ {"", false},
+ {"\n \t", false},
+ {" ", false},
+ }
+
+ for i, testCase := range testCases {
+ result := isValidDomain(testCase.host)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
+ }
+ }
+}
+
+// Tests validate end point validator.
+func TestIsValidEndpointURL(t *testing.T) {
+ testCases := []struct {
+ url string
+ err error
+ // Flag indicating whether the test is expected to pass or not.
+ shouldPass bool
+ }{
+ {"", nil, true},
+ {"/", nil, true},
+ {"https://s3.amazonaws.com", nil, true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", nil, true},
+ {"https://s3.amazonaws.com/", nil, true},
+ {"https://storage.googleapis.com/", nil, true},
+ {"192.168.1.1", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
+ {"https://amazon.googleapis.com/", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
+ {"https://storage.googleapis.com/bucket/", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
+ {"https://z3.amazonaws.com", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
+ {"https://s3.amazonaws.com/bucket/object", fmt.Errorf("Endpoint url cannot have fully qualified paths."), false},
+ }
+
+ for i, testCase := range testCases {
+ endPoint, e := url.Parse(testCase.url)
+ if e != nil {
+ t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
+ }
+ err := isValidEndpointURL(endPoint)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ }
+}
+
+// Tests validate IP address validator.
+func TestIsValidIP(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ ip string
+ // Expected result.
+ result bool
+ }{
+ {"192.168.1.1", true},
+ {"192.168.1", false},
+ {"192.168.1.1.1", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ }
+
+ for i, testCase := range testCases {
+ result := isValidIP(testCase.ip)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
+ }
+ }
+
+}
+
+// Tests validate virtual host validator.
+func TestIsVirtualHostSupported(t *testing.T) {
+ testCases := []struct {
+ url string
+ bucket string
+ // Expeceted result.
+ result bool
+ }{
+ {"https://s3.amazonaws.com", "my-bucket", true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
+ {"https://s3.amazonaws.com", "my-bucket.", false},
+ {"https://amazons3.amazonaws.com", "my-bucket.", false},
+ {"https://storage.googleapis.com/", "my-bucket", true},
+ {"https://mystorage.googleapis.com/", "my-bucket", false},
+ }
+
+ for i, testCase := range testCases {
+ endPoint, e := url.Parse(testCase.url)
+ if e != nil {
+ t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
+ }
+ result := isVirtualHostSupported(endPoint, testCase.bucket)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
+ }
+ }
+}
+
+// Tests validate Amazon endpoint validator.
+func TestIsAmazonEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"https://192.168.1.1", false},
+ {"192.168.1.1", false},
+ {"http://storage.googleapis.com", false},
+ {"https://storage.googleapis.com", false},
+ {"storage.googleapis.com", false},
+ {"s3.amazonaws.com", false},
+ {"https://amazons3.amazonaws.com", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // valid inputs.
+ {"https://s3.amazonaws.com", true},
+ {"https://s3.cn-north-1.amazonaws.com.cn", true},
+ }
+
+ for i, testCase := range testCases {
+ endPoint, e := url.Parse(testCase.url)
+ if e != nil {
+ t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
+ }
+ result := isAmazonEndpoint(endPoint)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate Amazon S3 China endpoint validator.
+func TestIsAmazonChinaEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"https://192.168.1.1", false},
+ {"192.168.1.1", false},
+ {"http://storage.googleapis.com", false},
+ {"https://storage.googleapis.com", false},
+ {"storage.googleapis.com", false},
+ {"s3.amazonaws.com", false},
+ {"https://amazons3.amazonaws.com", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // s3.amazonaws.com is not a valid Amazon S3 China end point.
+ {"https://s3.amazonaws.com", false},
+ // valid input.
+ {"https://s3.cn-north-1.amazonaws.com.cn", true},
+ }
+
+ for i, testCase := range testCases {
+ endPoint, e := url.Parse(testCase.url)
+ if e != nil {
+ t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
+ }
+ result := isAmazonChinaEndpoint(endPoint)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate Google Cloud end point validator.
+func TestIsGoogleEndpoint(t *testing.T) {
+ testCases := []struct {
+ url string
+ // Expected result.
+ result bool
+ }{
+ {"192.168.1.1", false},
+ {"https://192.168.1.1", false},
+ {"s3.amazonaws.com", false},
+ {"http://s3.amazonaws.com", false},
+ {"https://s3.amazonaws.com", false},
+ {"https://s3.cn-north-1.amazonaws.com.cn", false},
+ {"-192.168.1.1", false},
+ {"260.192.1.1", false},
+ // valid inputs.
+ {"http://storage.googleapis.com", true},
+ {"https://storage.googleapis.com", true},
+ }
+
+ for i, testCase := range testCases {
+ endPoint, e := url.Parse(testCase.url)
+ if e != nil {
+ t.Fatalf("Test %d: Fatal err \"%s\"", i+1, e.Error())
+ }
+ result := isGoogleEndpoint(endPoint)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
+ }
+ }
+
+}
+
+// Tests validate the expiry time validator.
+func TestIsValidExpiry(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ duration time.Duration
+ // Expected result.
+ err error
+ // Flag to indicate whether the test should pass.
+ shouldPass bool
+ }{
+ {100 * time.Millisecond, fmt.Errorf("Expires cannot be lesser than 1 second."), false},
+ {604801 * time.Second, fmt.Errorf("Expires cannot be greater than 7 days."), false},
+ {0 * time.Second, fmt.Errorf("Expires cannot be lesser than 1 second."), false},
+ {1 * time.Second, nil, true},
+ {10000 * time.Second, nil, true},
+ {999 * time.Second, nil, true},
+ }
+
+ for i, testCase := range testCases {
+ err := isValidExpiry(testCase.duration)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ }
+}
+
+// Tests validate the bucket name validator.
+func TestIsValidBucketName(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ bucketName string
+ // Expected result.
+ err error
+ // Flag to indicate whether test should Pass.
+ shouldPass bool
+ }{
+ {".mybucket", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
+ {"mybucket.", ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot."), false},
+ {"mybucket-", ErrInvalidBucketName("Bucket name contains invalid characters."), false},
+ {"my", ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters."), false},
+ {"", ErrInvalidBucketName("Bucket name cannot be empty."), false},
+ {"my..bucket", ErrInvalidBucketName("Bucket name cannot have successive periods."), false},
+ {"my.bucket.com", nil, true},
+ {"my-bucket", nil, true},
+ {"123my-bucket", nil, true},
+ }
+
+ for i, testCase := range testCases {
+ err := isValidBucketName(testCase.bucketName)
+ if err != nil && testCase.shouldPass {
+ t.Errorf("Test %d: Expected to pass, but failed with: %s", i+1, err.Error())
+ }
+ if err == nil && !testCase.shouldPass {
+ t.Errorf("Test %d: Expected to fail with \"%s\", but passed instead", i+1, testCase.err.Error())
+ }
+ // Failed as expected, but does it fail for the expected reason.
+ if err != nil && !testCase.shouldPass {
+ if err.Error() != testCase.err.Error() {
+ t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
+ }
+ }
+
+ }
+
+}
+
+// Tests validate the query encoder.
+func TestQueryEncode(t *testing.T) {
+ testCases := []struct {
+ queryKey string
+ valueToEncode []string
+ // Expected result.
+ result string
+ }{
+ {"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
+ {"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
+ {"prefix", []string{"test#123"}, "prefix=test%23123"},
+ {"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
+ {"prefix", []string{"test123"}, "prefix=test123"},
+ {"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
+ }
+
+ for i, testCase := range testCases {
+ urlValues := make(url.Values)
+ for _, valueToEncode := range testCase.valueToEncode {
+ urlValues.Add(testCase.queryKey, valueToEncode)
+ }
+ result := queryEncode(urlValues)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
+ }
+ }
+}
+
+// Tests validate the URL path encoder.
+func TestUrlEncodePath(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ inputStr string
+ // Expected result.
+ result string
+ }{
+ {"thisisthe%url", "thisisthe%25url"},
+ {"本語", "%E6%9C%AC%E8%AA%9E"},
+ {"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
+ {">123", "%3E123"},
+ {"myurl#link", "myurl%23link"},
+ {"space in url", "space%20in%20url"},
+ {"url+path", "url%2Bpath"},
+ }
+
+ for i, testCase := range testCases {
+ result := urlEncodePath(testCase.inputStr)
+ if testCase.result != result {
+ t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
+ }
+ }
+}